1 /* 2 * QEMU S390x KVM implementation 3 * 4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de> 5 * Copyright IBM Corp. 2012 6 * 7 * This library is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU Lesser General Public 9 * License as published by the Free Software Foundation; either 10 * version 2 of the License, or (at your option) any later version. 11 * 12 * This library is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * Lesser General Public License for more details. 16 * 17 * Contributions after 2012-10-29 are licensed under the terms of the 18 * GNU GPL, version 2 or (at your option) any later version. 19 * 20 * You should have received a copy of the GNU (Lesser) General Public 21 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 22 */ 23 24 #include "qemu/osdep.h" 25 #include <sys/ioctl.h> 26 27 #include <linux/kvm.h> 28 #include <asm/ptrace.h> 29 30 #include "qemu-common.h" 31 #include "cpu.h" 32 #include "qemu/error-report.h" 33 #include "qemu/timer.h" 34 #include "sysemu/sysemu.h" 35 #include "sysemu/kvm.h" 36 #include "hw/hw.h" 37 #include "sysemu/device_tree.h" 38 #include "qapi/qmp/qjson.h" 39 #include "exec/gdbstub.h" 40 #include "exec/address-spaces.h" 41 #include "trace.h" 42 #include "qapi-event.h" 43 #include "hw/s390x/s390-pci-inst.h" 44 #include "hw/s390x/s390-pci-bus.h" 45 #include "hw/s390x/ipl.h" 46 #include "hw/s390x/ebcdic.h" 47 #include "exec/memattrs.h" 48 #include "hw/s390x/s390-virtio-ccw.h" 49 50 /* #define DEBUG_KVM */ 51 52 #ifdef DEBUG_KVM 53 #define DPRINTF(fmt, ...) \ 54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) 55 #else 56 #define DPRINTF(fmt, ...) \ 57 do { } while (0) 58 #endif 59 60 #define kvm_vm_check_mem_attr(s, attr) \ 61 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) 62 63 #define IPA0_DIAG 0x8300 64 #define IPA0_SIGP 0xae00 65 #define IPA0_B2 0xb200 66 #define IPA0_B9 0xb900 67 #define IPA0_EB 0xeb00 68 #define IPA0_E3 0xe300 69 70 #define PRIV_B2_SCLP_CALL 0x20 71 #define PRIV_B2_CSCH 0x30 72 #define PRIV_B2_HSCH 0x31 73 #define PRIV_B2_MSCH 0x32 74 #define PRIV_B2_SSCH 0x33 75 #define PRIV_B2_STSCH 0x34 76 #define PRIV_B2_TSCH 0x35 77 #define PRIV_B2_TPI 0x36 78 #define PRIV_B2_SAL 0x37 79 #define PRIV_B2_RSCH 0x38 80 #define PRIV_B2_STCRW 0x39 81 #define PRIV_B2_STCPS 0x3a 82 #define PRIV_B2_RCHP 0x3b 83 #define PRIV_B2_SCHM 0x3c 84 #define PRIV_B2_CHSC 0x5f 85 #define PRIV_B2_SIGA 0x74 86 #define PRIV_B2_XSCH 0x76 87 88 #define PRIV_EB_SQBS 0x8a 89 #define PRIV_EB_PCISTB 0xd0 90 #define PRIV_EB_SIC 0xd1 91 92 #define PRIV_B9_EQBS 0x9c 93 #define PRIV_B9_CLP 0xa0 94 #define PRIV_B9_PCISTG 0xd0 95 #define PRIV_B9_PCILG 0xd2 96 #define PRIV_B9_RPCIT 0xd3 97 98 #define PRIV_E3_MPCIFC 0xd0 99 #define PRIV_E3_STPCIFC 0xd4 100 101 #define DIAG_TIMEREVENT 0x288 102 #define DIAG_IPL 0x308 103 #define DIAG_KVM_HYPERCALL 0x500 104 #define DIAG_KVM_BREAKPOINT 0x501 105 106 #define ICPT_INSTRUCTION 0x04 107 #define ICPT_PROGRAM 0x08 108 #define ICPT_EXT_INT 0x14 109 #define ICPT_WAITPSW 0x1c 110 #define ICPT_SOFT_INTERCEPT 0x24 111 #define ICPT_CPU_STOP 0x28 112 #define ICPT_OPEREXC 0x2c 113 #define ICPT_IO 0x40 114 115 #define NR_LOCAL_IRQS 32 116 /* 117 * Needs to be big enough to contain max_cpus emergency signals 118 * and in addition NR_LOCAL_IRQS interrupts 119 */ 120 #define VCPU_IRQ_BUF_SIZE (sizeof(struct kvm_s390_irq) * \ 121 (max_cpus + NR_LOCAL_IRQS)) 122 123 static CPUWatchpoint hw_watchpoint; 124 /* 125 * We don't use a list because this structure is also used to transmit the 126 * hardware breakpoints to the kernel. 127 */ 128 static struct kvm_hw_breakpoint *hw_breakpoints; 129 static int nb_hw_breakpoints; 130 131 const KVMCapabilityInfo kvm_arch_required_capabilities[] = { 132 KVM_CAP_LAST_INFO 133 }; 134 135 static QemuMutex qemu_sigp_mutex; 136 137 static int cap_sync_regs; 138 static int cap_async_pf; 139 static int cap_mem_op; 140 static int cap_s390_irq; 141 static int cap_ri; 142 143 static void *legacy_s390_alloc(size_t size, uint64_t *align); 144 145 static int kvm_s390_query_mem_limit(KVMState *s, uint64_t *memory_limit) 146 { 147 struct kvm_device_attr attr = { 148 .group = KVM_S390_VM_MEM_CTRL, 149 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 150 .addr = (uint64_t) memory_limit, 151 }; 152 153 return kvm_vm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr); 154 } 155 156 int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit) 157 { 158 int rc; 159 160 struct kvm_device_attr attr = { 161 .group = KVM_S390_VM_MEM_CTRL, 162 .attr = KVM_S390_VM_MEM_LIMIT_SIZE, 163 .addr = (uint64_t) &new_limit, 164 }; 165 166 if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_LIMIT_SIZE)) { 167 return 0; 168 } 169 170 rc = kvm_s390_query_mem_limit(s, hw_limit); 171 if (rc) { 172 return rc; 173 } else if (*hw_limit < new_limit) { 174 return -E2BIG; 175 } 176 177 return kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr); 178 } 179 180 static bool kvm_s390_cmma_available(void) 181 { 182 static bool initialized, value; 183 184 if (!initialized) { 185 initialized = true; 186 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) && 187 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA); 188 } 189 return value; 190 } 191 192 void kvm_s390_cmma_reset(void) 193 { 194 int rc; 195 struct kvm_device_attr attr = { 196 .group = KVM_S390_VM_MEM_CTRL, 197 .attr = KVM_S390_VM_MEM_CLR_CMMA, 198 }; 199 200 if (!mem_path || !kvm_s390_cmma_available()) { 201 return; 202 } 203 204 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 205 trace_kvm_clear_cmma(rc); 206 } 207 208 static void kvm_s390_enable_cmma(void) 209 { 210 int rc; 211 struct kvm_device_attr attr = { 212 .group = KVM_S390_VM_MEM_CTRL, 213 .attr = KVM_S390_VM_MEM_ENABLE_CMMA, 214 }; 215 216 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 217 trace_kvm_enable_cmma(rc); 218 } 219 220 static void kvm_s390_set_attr(uint64_t attr) 221 { 222 struct kvm_device_attr attribute = { 223 .group = KVM_S390_VM_CRYPTO, 224 .attr = attr, 225 }; 226 227 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute); 228 229 if (ret) { 230 error_report("Failed to set crypto device attribute %lu: %s", 231 attr, strerror(-ret)); 232 } 233 } 234 235 static void kvm_s390_init_aes_kw(void) 236 { 237 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW; 238 239 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap", 240 NULL)) { 241 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW; 242 } 243 244 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 245 kvm_s390_set_attr(attr); 246 } 247 } 248 249 static void kvm_s390_init_dea_kw(void) 250 { 251 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW; 252 253 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap", 254 NULL)) { 255 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW; 256 } 257 258 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) { 259 kvm_s390_set_attr(attr); 260 } 261 } 262 263 void kvm_s390_crypto_reset(void) 264 { 265 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) { 266 kvm_s390_init_aes_kw(); 267 kvm_s390_init_dea_kw(); 268 } 269 } 270 271 int kvm_arch_init(MachineState *ms, KVMState *s) 272 { 273 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); 274 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); 275 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP); 276 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ); 277 278 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP) 279 || !kvm_check_extension(s, KVM_CAP_S390_COW)) { 280 phys_mem_set_alloc(legacy_s390_alloc); 281 } 282 283 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0); 284 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0); 285 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0); 286 if (ri_allowed()) { 287 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) { 288 cap_ri = 1; 289 } 290 } 291 292 qemu_mutex_init(&qemu_sigp_mutex); 293 294 return 0; 295 } 296 297 unsigned long kvm_arch_vcpu_id(CPUState *cpu) 298 { 299 return cpu->cpu_index; 300 } 301 302 int kvm_arch_init_vcpu(CPUState *cs) 303 { 304 S390CPU *cpu = S390_CPU(cs); 305 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); 306 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE); 307 return 0; 308 } 309 310 void kvm_s390_reset_vcpu(S390CPU *cpu) 311 { 312 CPUState *cs = CPU(cpu); 313 314 /* The initial reset call is needed here to reset in-kernel 315 * vcpu data that we can't access directly from QEMU 316 * (i.e. with older kernels which don't support sync_regs/ONE_REG). 317 * Before this ioctl cpu_synchronize_state() is called in common kvm 318 * code (kvm-all) */ 319 if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) { 320 error_report("Initial CPU reset failed on CPU %i", cs->cpu_index); 321 } 322 } 323 324 static int can_sync_regs(CPUState *cs, int regs) 325 { 326 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs; 327 } 328 329 int kvm_arch_put_registers(CPUState *cs, int level) 330 { 331 S390CPU *cpu = S390_CPU(cs); 332 CPUS390XState *env = &cpu->env; 333 struct kvm_sregs sregs; 334 struct kvm_regs regs; 335 struct kvm_fpu fpu = {}; 336 int r; 337 int i; 338 339 /* always save the PSW and the GPRS*/ 340 cs->kvm_run->psw_addr = env->psw.addr; 341 cs->kvm_run->psw_mask = env->psw.mask; 342 343 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 344 for (i = 0; i < 16; i++) { 345 cs->kvm_run->s.regs.gprs[i] = env->regs[i]; 346 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; 347 } 348 } else { 349 for (i = 0; i < 16; i++) { 350 regs.gprs[i] = env->regs[i]; 351 } 352 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); 353 if (r < 0) { 354 return r; 355 } 356 } 357 358 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 359 for (i = 0; i < 32; i++) { 360 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0].ll; 361 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1].ll; 362 } 363 cs->kvm_run->s.regs.fpc = env->fpc; 364 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS; 365 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 366 for (i = 0; i < 16; i++) { 367 cs->kvm_run->s.regs.fprs[i] = get_freg(env, i)->ll; 368 } 369 cs->kvm_run->s.regs.fpc = env->fpc; 370 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS; 371 } else { 372 /* Floating point */ 373 for (i = 0; i < 16; i++) { 374 fpu.fprs[i] = get_freg(env, i)->ll; 375 } 376 fpu.fpc = env->fpc; 377 378 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); 379 if (r < 0) { 380 return r; 381 } 382 } 383 384 /* Do we need to save more than that? */ 385 if (level == KVM_PUT_RUNTIME_STATE) { 386 return 0; 387 } 388 389 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 390 cs->kvm_run->s.regs.cputm = env->cputm; 391 cs->kvm_run->s.regs.ckc = env->ckc; 392 cs->kvm_run->s.regs.todpr = env->todpr; 393 cs->kvm_run->s.regs.gbea = env->gbea; 394 cs->kvm_run->s.regs.pp = env->pp; 395 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0; 396 } else { 397 /* 398 * These ONE_REGS are not protected by a capability. As they are only 399 * necessary for migration we just trace a possible error, but don't 400 * return with an error return code. 401 */ 402 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 403 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 404 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 405 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 406 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); 407 } 408 409 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 410 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64); 411 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB; 412 } 413 414 /* pfault parameters */ 415 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 416 cs->kvm_run->s.regs.pft = env->pfault_token; 417 cs->kvm_run->s.regs.pfs = env->pfault_select; 418 cs->kvm_run->s.regs.pfc = env->pfault_compare; 419 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT; 420 } else if (cap_async_pf) { 421 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 422 if (r < 0) { 423 return r; 424 } 425 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 426 if (r < 0) { 427 return r; 428 } 429 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 430 if (r < 0) { 431 return r; 432 } 433 } 434 435 /* access registers and control registers*/ 436 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 437 for (i = 0; i < 16; i++) { 438 cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; 439 cs->kvm_run->s.regs.crs[i] = env->cregs[i]; 440 } 441 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; 442 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; 443 } else { 444 for (i = 0; i < 16; i++) { 445 sregs.acrs[i] = env->aregs[i]; 446 sregs.crs[i] = env->cregs[i]; 447 } 448 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); 449 if (r < 0) { 450 return r; 451 } 452 } 453 454 /* Finally the prefix */ 455 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 456 cs->kvm_run->s.regs.prefix = env->psa; 457 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; 458 } else { 459 /* prefix is only supported via sync regs */ 460 } 461 return 0; 462 } 463 464 int kvm_arch_get_registers(CPUState *cs) 465 { 466 S390CPU *cpu = S390_CPU(cs); 467 CPUS390XState *env = &cpu->env; 468 struct kvm_sregs sregs; 469 struct kvm_regs regs; 470 struct kvm_fpu fpu; 471 int i, r; 472 473 /* get the PSW */ 474 env->psw.addr = cs->kvm_run->psw_addr; 475 env->psw.mask = cs->kvm_run->psw_mask; 476 477 /* the GPRS */ 478 if (can_sync_regs(cs, KVM_SYNC_GPRS)) { 479 for (i = 0; i < 16; i++) { 480 env->regs[i] = cs->kvm_run->s.regs.gprs[i]; 481 } 482 } else { 483 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); 484 if (r < 0) { 485 return r; 486 } 487 for (i = 0; i < 16; i++) { 488 env->regs[i] = regs.gprs[i]; 489 } 490 } 491 492 /* The ACRS and CRS */ 493 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) { 494 for (i = 0; i < 16; i++) { 495 env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; 496 env->cregs[i] = cs->kvm_run->s.regs.crs[i]; 497 } 498 } else { 499 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); 500 if (r < 0) { 501 return r; 502 } 503 for (i = 0; i < 16; i++) { 504 env->aregs[i] = sregs.acrs[i]; 505 env->cregs[i] = sregs.crs[i]; 506 } 507 } 508 509 /* Floating point and vector registers */ 510 if (can_sync_regs(cs, KVM_SYNC_VRS)) { 511 for (i = 0; i < 32; i++) { 512 env->vregs[i][0].ll = cs->kvm_run->s.regs.vrs[i][0]; 513 env->vregs[i][1].ll = cs->kvm_run->s.regs.vrs[i][1]; 514 } 515 env->fpc = cs->kvm_run->s.regs.fpc; 516 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) { 517 for (i = 0; i < 16; i++) { 518 get_freg(env, i)->ll = cs->kvm_run->s.regs.fprs[i]; 519 } 520 env->fpc = cs->kvm_run->s.regs.fpc; 521 } else { 522 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); 523 if (r < 0) { 524 return r; 525 } 526 for (i = 0; i < 16; i++) { 527 get_freg(env, i)->ll = fpu.fprs[i]; 528 } 529 env->fpc = fpu.fpc; 530 } 531 532 /* The prefix */ 533 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) { 534 env->psa = cs->kvm_run->s.regs.prefix; 535 } 536 537 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) { 538 env->cputm = cs->kvm_run->s.regs.cputm; 539 env->ckc = cs->kvm_run->s.regs.ckc; 540 env->todpr = cs->kvm_run->s.regs.todpr; 541 env->gbea = cs->kvm_run->s.regs.gbea; 542 env->pp = cs->kvm_run->s.regs.pp; 543 } else { 544 /* 545 * These ONE_REGS are not protected by a capability. As they are only 546 * necessary for migration we just trace a possible error, but don't 547 * return with an error return code. 548 */ 549 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); 550 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); 551 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); 552 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); 553 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); 554 } 555 556 if (can_sync_regs(cs, KVM_SYNC_RICCB)) { 557 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64); 558 } 559 560 /* pfault parameters */ 561 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) { 562 env->pfault_token = cs->kvm_run->s.regs.pft; 563 env->pfault_select = cs->kvm_run->s.regs.pfs; 564 env->pfault_compare = cs->kvm_run->s.regs.pfc; 565 } else if (cap_async_pf) { 566 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); 567 if (r < 0) { 568 return r; 569 } 570 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); 571 if (r < 0) { 572 return r; 573 } 574 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); 575 if (r < 0) { 576 return r; 577 } 578 } 579 580 return 0; 581 } 582 583 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low) 584 { 585 int r; 586 struct kvm_device_attr attr = { 587 .group = KVM_S390_VM_TOD, 588 .attr = KVM_S390_VM_TOD_LOW, 589 .addr = (uint64_t)tod_low, 590 }; 591 592 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 593 if (r) { 594 return r; 595 } 596 597 attr.attr = KVM_S390_VM_TOD_HIGH; 598 attr.addr = (uint64_t)tod_high; 599 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 600 } 601 602 int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low) 603 { 604 int r; 605 606 struct kvm_device_attr attr = { 607 .group = KVM_S390_VM_TOD, 608 .attr = KVM_S390_VM_TOD_LOW, 609 .addr = (uint64_t)tod_low, 610 }; 611 612 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 613 if (r) { 614 return r; 615 } 616 617 attr.attr = KVM_S390_VM_TOD_HIGH; 618 attr.addr = (uint64_t)tod_high; 619 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 620 } 621 622 /** 623 * kvm_s390_mem_op: 624 * @addr: the logical start address in guest memory 625 * @ar: the access register number 626 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying 627 * @len: length that should be transferred 628 * @is_write: true = write, false = read 629 * Returns: 0 on success, non-zero if an exception or error occurred 630 * 631 * Use KVM ioctl to read/write from/to guest memory. An access exception 632 * is injected into the vCPU in case of translation errors. 633 */ 634 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf, 635 int len, bool is_write) 636 { 637 struct kvm_s390_mem_op mem_op = { 638 .gaddr = addr, 639 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION, 640 .size = len, 641 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE 642 : KVM_S390_MEMOP_LOGICAL_READ, 643 .buf = (uint64_t)hostbuf, 644 .ar = ar, 645 }; 646 int ret; 647 648 if (!cap_mem_op) { 649 return -ENOSYS; 650 } 651 if (!hostbuf) { 652 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY; 653 } 654 655 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op); 656 if (ret < 0) { 657 error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret)); 658 } 659 return ret; 660 } 661 662 /* 663 * Legacy layout for s390: 664 * Older S390 KVM requires the topmost vma of the RAM to be 665 * smaller than an system defined value, which is at least 256GB. 666 * Larger systems have larger values. We put the guest between 667 * the end of data segment (system break) and this value. We 668 * use 32GB as a base to have enough room for the system break 669 * to grow. We also have to use MAP parameters that avoid 670 * read-only mapping of guest pages. 671 */ 672 static void *legacy_s390_alloc(size_t size, uint64_t *align) 673 { 674 void *mem; 675 676 mem = mmap((void *) 0x800000000ULL, size, 677 PROT_EXEC|PROT_READ|PROT_WRITE, 678 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); 679 return mem == MAP_FAILED ? NULL : mem; 680 } 681 682 static uint8_t const *sw_bp_inst; 683 static uint8_t sw_bp_ilen; 684 685 static void determine_sw_breakpoint_instr(void) 686 { 687 /* DIAG 501 is used for sw breakpoints with old kernels */ 688 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; 689 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */ 690 static const uint8_t instr_0x0000[] = {0x00, 0x00}; 691 692 if (sw_bp_inst) { 693 return; 694 } 695 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) { 696 sw_bp_inst = diag_501; 697 sw_bp_ilen = sizeof(diag_501); 698 DPRINTF("KVM: will use 4-byte sw breakpoints.\n"); 699 } else { 700 sw_bp_inst = instr_0x0000; 701 sw_bp_ilen = sizeof(instr_0x0000); 702 DPRINTF("KVM: will use 2-byte sw breakpoints.\n"); 703 } 704 } 705 706 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 707 { 708 determine_sw_breakpoint_instr(); 709 710 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 711 sw_bp_ilen, 0) || 712 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) { 713 return -EINVAL; 714 } 715 return 0; 716 } 717 718 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) 719 { 720 uint8_t t[MAX_ILEN]; 721 722 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) { 723 return -EINVAL; 724 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) { 725 return -EINVAL; 726 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 727 sw_bp_ilen, 1)) { 728 return -EINVAL; 729 } 730 731 return 0; 732 } 733 734 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, 735 int len, int type) 736 { 737 int n; 738 739 for (n = 0; n < nb_hw_breakpoints; n++) { 740 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && 741 (hw_breakpoints[n].len == len || len == -1)) { 742 return &hw_breakpoints[n]; 743 } 744 } 745 746 return NULL; 747 } 748 749 static int insert_hw_breakpoint(target_ulong addr, int len, int type) 750 { 751 int size; 752 753 if (find_hw_breakpoint(addr, len, type)) { 754 return -EEXIST; 755 } 756 757 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); 758 759 if (!hw_breakpoints) { 760 nb_hw_breakpoints = 0; 761 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); 762 } else { 763 hw_breakpoints = 764 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); 765 } 766 767 if (!hw_breakpoints) { 768 nb_hw_breakpoints = 0; 769 return -ENOMEM; 770 } 771 772 hw_breakpoints[nb_hw_breakpoints].addr = addr; 773 hw_breakpoints[nb_hw_breakpoints].len = len; 774 hw_breakpoints[nb_hw_breakpoints].type = type; 775 776 nb_hw_breakpoints++; 777 778 return 0; 779 } 780 781 int kvm_arch_insert_hw_breakpoint(target_ulong addr, 782 target_ulong len, int type) 783 { 784 switch (type) { 785 case GDB_BREAKPOINT_HW: 786 type = KVM_HW_BP; 787 break; 788 case GDB_WATCHPOINT_WRITE: 789 if (len < 1) { 790 return -EINVAL; 791 } 792 type = KVM_HW_WP_WRITE; 793 break; 794 default: 795 return -ENOSYS; 796 } 797 return insert_hw_breakpoint(addr, len, type); 798 } 799 800 int kvm_arch_remove_hw_breakpoint(target_ulong addr, 801 target_ulong len, int type) 802 { 803 int size; 804 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); 805 806 if (bp == NULL) { 807 return -ENOENT; 808 } 809 810 nb_hw_breakpoints--; 811 if (nb_hw_breakpoints > 0) { 812 /* 813 * In order to trim the array, move the last element to the position to 814 * be removed - if necessary. 815 */ 816 if (bp != &hw_breakpoints[nb_hw_breakpoints]) { 817 *bp = hw_breakpoints[nb_hw_breakpoints]; 818 } 819 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); 820 hw_breakpoints = 821 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); 822 } else { 823 g_free(hw_breakpoints); 824 hw_breakpoints = NULL; 825 } 826 827 return 0; 828 } 829 830 void kvm_arch_remove_all_hw_breakpoints(void) 831 { 832 nb_hw_breakpoints = 0; 833 g_free(hw_breakpoints); 834 hw_breakpoints = NULL; 835 } 836 837 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) 838 { 839 int i; 840 841 if (nb_hw_breakpoints > 0) { 842 dbg->arch.nr_hw_bp = nb_hw_breakpoints; 843 dbg->arch.hw_bp = hw_breakpoints; 844 845 for (i = 0; i < nb_hw_breakpoints; ++i) { 846 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, 847 hw_breakpoints[i].addr); 848 } 849 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; 850 } else { 851 dbg->arch.nr_hw_bp = 0; 852 dbg->arch.hw_bp = NULL; 853 } 854 } 855 856 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) 857 { 858 } 859 860 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) 861 { 862 return MEMTXATTRS_UNSPECIFIED; 863 } 864 865 int kvm_arch_process_async_events(CPUState *cs) 866 { 867 return cs->halted; 868 } 869 870 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq, 871 struct kvm_s390_interrupt *interrupt) 872 { 873 int r = 0; 874 875 interrupt->type = irq->type; 876 switch (irq->type) { 877 case KVM_S390_INT_VIRTIO: 878 interrupt->parm = irq->u.ext.ext_params; 879 /* fall through */ 880 case KVM_S390_INT_PFAULT_INIT: 881 case KVM_S390_INT_PFAULT_DONE: 882 interrupt->parm64 = irq->u.ext.ext_params2; 883 break; 884 case KVM_S390_PROGRAM_INT: 885 interrupt->parm = irq->u.pgm.code; 886 break; 887 case KVM_S390_SIGP_SET_PREFIX: 888 interrupt->parm = irq->u.prefix.address; 889 break; 890 case KVM_S390_INT_SERVICE: 891 interrupt->parm = irq->u.ext.ext_params; 892 break; 893 case KVM_S390_MCHK: 894 interrupt->parm = irq->u.mchk.cr14; 895 interrupt->parm64 = irq->u.mchk.mcic; 896 break; 897 case KVM_S390_INT_EXTERNAL_CALL: 898 interrupt->parm = irq->u.extcall.code; 899 break; 900 case KVM_S390_INT_EMERGENCY: 901 interrupt->parm = irq->u.emerg.code; 902 break; 903 case KVM_S390_SIGP_STOP: 904 case KVM_S390_RESTART: 905 break; /* These types have no parameters */ 906 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX: 907 interrupt->parm = irq->u.io.subchannel_id << 16; 908 interrupt->parm |= irq->u.io.subchannel_nr; 909 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32; 910 interrupt->parm64 |= irq->u.io.io_int_word; 911 break; 912 default: 913 r = -EINVAL; 914 break; 915 } 916 return r; 917 } 918 919 static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq) 920 { 921 struct kvm_s390_interrupt kvmint = {}; 922 int r; 923 924 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 925 if (r < 0) { 926 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 927 exit(1); 928 } 929 930 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); 931 if (r < 0) { 932 fprintf(stderr, "KVM failed to inject interrupt\n"); 933 exit(1); 934 } 935 } 936 937 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq) 938 { 939 CPUState *cs = CPU(cpu); 940 int r; 941 942 if (cap_s390_irq) { 943 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq); 944 if (!r) { 945 return; 946 } 947 error_report("KVM failed to inject interrupt %llx", irq->type); 948 exit(1); 949 } 950 951 inject_vcpu_irq_legacy(cs, irq); 952 } 953 954 static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq) 955 { 956 struct kvm_s390_interrupt kvmint = {}; 957 int r; 958 959 r = s390_kvm_irq_to_interrupt(irq, &kvmint); 960 if (r < 0) { 961 fprintf(stderr, "%s called with bogus interrupt\n", __func__); 962 exit(1); 963 } 964 965 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint); 966 if (r < 0) { 967 fprintf(stderr, "KVM failed to inject interrupt\n"); 968 exit(1); 969 } 970 } 971 972 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq) 973 { 974 static bool use_flic = true; 975 int r; 976 977 if (use_flic) { 978 r = kvm_s390_inject_flic(irq); 979 if (r == -ENOSYS) { 980 use_flic = false; 981 } 982 if (!r) { 983 return; 984 } 985 } 986 __kvm_s390_floating_interrupt(irq); 987 } 988 989 void kvm_s390_service_interrupt(uint32_t parm) 990 { 991 struct kvm_s390_irq irq = { 992 .type = KVM_S390_INT_SERVICE, 993 .u.ext.ext_params = parm, 994 }; 995 996 kvm_s390_floating_interrupt(&irq); 997 } 998 999 static void enter_pgmcheck(S390CPU *cpu, uint16_t code) 1000 { 1001 struct kvm_s390_irq irq = { 1002 .type = KVM_S390_PROGRAM_INT, 1003 .u.pgm.code = code, 1004 }; 1005 1006 kvm_s390_vcpu_interrupt(cpu, &irq); 1007 } 1008 1009 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code) 1010 { 1011 struct kvm_s390_irq irq = { 1012 .type = KVM_S390_PROGRAM_INT, 1013 .u.pgm.code = code, 1014 .u.pgm.trans_exc_code = te_code, 1015 .u.pgm.exc_access_id = te_code & 3, 1016 }; 1017 1018 kvm_s390_vcpu_interrupt(cpu, &irq); 1019 } 1020 1021 static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, 1022 uint16_t ipbh0) 1023 { 1024 CPUS390XState *env = &cpu->env; 1025 uint64_t sccb; 1026 uint32_t code; 1027 int r = 0; 1028 1029 cpu_synchronize_state(CPU(cpu)); 1030 sccb = env->regs[ipbh0 & 0xf]; 1031 code = env->regs[(ipbh0 & 0xf0) >> 4]; 1032 1033 r = sclp_service_call(env, sccb, code); 1034 if (r < 0) { 1035 enter_pgmcheck(cpu, -r); 1036 } else { 1037 setcc(cpu, r); 1038 } 1039 1040 return 0; 1041 } 1042 1043 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1044 { 1045 CPUS390XState *env = &cpu->env; 1046 int rc = 0; 1047 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; 1048 1049 cpu_synchronize_state(CPU(cpu)); 1050 1051 switch (ipa1) { 1052 case PRIV_B2_XSCH: 1053 ioinst_handle_xsch(cpu, env->regs[1]); 1054 break; 1055 case PRIV_B2_CSCH: 1056 ioinst_handle_csch(cpu, env->regs[1]); 1057 break; 1058 case PRIV_B2_HSCH: 1059 ioinst_handle_hsch(cpu, env->regs[1]); 1060 break; 1061 case PRIV_B2_MSCH: 1062 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb); 1063 break; 1064 case PRIV_B2_SSCH: 1065 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb); 1066 break; 1067 case PRIV_B2_STCRW: 1068 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb); 1069 break; 1070 case PRIV_B2_STSCH: 1071 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb); 1072 break; 1073 case PRIV_B2_TSCH: 1074 /* We should only get tsch via KVM_EXIT_S390_TSCH. */ 1075 fprintf(stderr, "Spurious tsch intercept\n"); 1076 break; 1077 case PRIV_B2_CHSC: 1078 ioinst_handle_chsc(cpu, run->s390_sieic.ipb); 1079 break; 1080 case PRIV_B2_TPI: 1081 /* This should have been handled by kvm already. */ 1082 fprintf(stderr, "Spurious tpi intercept\n"); 1083 break; 1084 case PRIV_B2_SCHM: 1085 ioinst_handle_schm(cpu, env->regs[1], env->regs[2], 1086 run->s390_sieic.ipb); 1087 break; 1088 case PRIV_B2_RSCH: 1089 ioinst_handle_rsch(cpu, env->regs[1]); 1090 break; 1091 case PRIV_B2_RCHP: 1092 ioinst_handle_rchp(cpu, env->regs[1]); 1093 break; 1094 case PRIV_B2_STCPS: 1095 /* We do not provide this instruction, it is suppressed. */ 1096 break; 1097 case PRIV_B2_SAL: 1098 ioinst_handle_sal(cpu, env->regs[1]); 1099 break; 1100 case PRIV_B2_SIGA: 1101 /* Not provided, set CC = 3 for subchannel not operational */ 1102 setcc(cpu, 3); 1103 break; 1104 case PRIV_B2_SCLP_CALL: 1105 rc = kvm_sclp_service_call(cpu, run, ipbh0); 1106 break; 1107 default: 1108 rc = -1; 1109 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); 1110 break; 1111 } 1112 1113 return rc; 1114 } 1115 1116 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run, 1117 uint8_t *ar) 1118 { 1119 CPUS390XState *env = &cpu->env; 1120 uint32_t x2 = (run->s390_sieic.ipa & 0x000f); 1121 uint32_t base2 = run->s390_sieic.ipb >> 28; 1122 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1123 ((run->s390_sieic.ipb & 0xff00) << 4); 1124 1125 if (disp2 & 0x80000) { 1126 disp2 += 0xfff00000; 1127 } 1128 if (ar) { 1129 *ar = base2; 1130 } 1131 1132 return (base2 ? env->regs[base2] : 0) + 1133 (x2 ? env->regs[x2] : 0) + (long)(int)disp2; 1134 } 1135 1136 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run, 1137 uint8_t *ar) 1138 { 1139 CPUS390XState *env = &cpu->env; 1140 uint32_t base2 = run->s390_sieic.ipb >> 28; 1141 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) + 1142 ((run->s390_sieic.ipb & 0xff00) << 4); 1143 1144 if (disp2 & 0x80000) { 1145 disp2 += 0xfff00000; 1146 } 1147 if (ar) { 1148 *ar = base2; 1149 } 1150 1151 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2; 1152 } 1153 1154 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run) 1155 { 1156 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1157 1158 return clp_service_call(cpu, r2); 1159 } 1160 1161 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run) 1162 { 1163 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1164 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1165 1166 return pcilg_service_call(cpu, r1, r2); 1167 } 1168 1169 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run) 1170 { 1171 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1172 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1173 1174 return pcistg_service_call(cpu, r1, r2); 1175 } 1176 1177 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1178 { 1179 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1180 uint64_t fiba; 1181 uint8_t ar; 1182 1183 cpu_synchronize_state(CPU(cpu)); 1184 fiba = get_base_disp_rxy(cpu, run, &ar); 1185 1186 return stpcifc_service_call(cpu, r1, fiba, ar); 1187 } 1188 1189 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run) 1190 { 1191 /* NOOP */ 1192 return 0; 1193 } 1194 1195 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run) 1196 { 1197 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20; 1198 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16; 1199 1200 return rpcit_service_call(cpu, r1, r2); 1201 } 1202 1203 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run) 1204 { 1205 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1206 uint8_t r3 = run->s390_sieic.ipa & 0x000f; 1207 uint64_t gaddr; 1208 uint8_t ar; 1209 1210 cpu_synchronize_state(CPU(cpu)); 1211 gaddr = get_base_disp_rsy(cpu, run, &ar); 1212 1213 return pcistb_service_call(cpu, r1, r3, gaddr, ar); 1214 } 1215 1216 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run) 1217 { 1218 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1219 uint64_t fiba; 1220 uint8_t ar; 1221 1222 cpu_synchronize_state(CPU(cpu)); 1223 fiba = get_base_disp_rxy(cpu, run, &ar); 1224 1225 return mpcifc_service_call(cpu, r1, fiba, ar); 1226 } 1227 1228 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1229 { 1230 int r = 0; 1231 1232 switch (ipa1) { 1233 case PRIV_B9_CLP: 1234 r = kvm_clp_service_call(cpu, run); 1235 break; 1236 case PRIV_B9_PCISTG: 1237 r = kvm_pcistg_service_call(cpu, run); 1238 break; 1239 case PRIV_B9_PCILG: 1240 r = kvm_pcilg_service_call(cpu, run); 1241 break; 1242 case PRIV_B9_RPCIT: 1243 r = kvm_rpcit_service_call(cpu, run); 1244 break; 1245 case PRIV_B9_EQBS: 1246 /* just inject exception */ 1247 r = -1; 1248 break; 1249 default: 1250 r = -1; 1251 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); 1252 break; 1253 } 1254 1255 return r; 1256 } 1257 1258 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1259 { 1260 int r = 0; 1261 1262 switch (ipbl) { 1263 case PRIV_EB_PCISTB: 1264 r = kvm_pcistb_service_call(cpu, run); 1265 break; 1266 case PRIV_EB_SIC: 1267 r = kvm_sic_service_call(cpu, run); 1268 break; 1269 case PRIV_EB_SQBS: 1270 /* just inject exception */ 1271 r = -1; 1272 break; 1273 default: 1274 r = -1; 1275 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl); 1276 break; 1277 } 1278 1279 return r; 1280 } 1281 1282 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl) 1283 { 1284 int r = 0; 1285 1286 switch (ipbl) { 1287 case PRIV_E3_MPCIFC: 1288 r = kvm_mpcifc_service_call(cpu, run); 1289 break; 1290 case PRIV_E3_STPCIFC: 1291 r = kvm_stpcifc_service_call(cpu, run); 1292 break; 1293 default: 1294 r = -1; 1295 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl); 1296 break; 1297 } 1298 1299 return r; 1300 } 1301 1302 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) 1303 { 1304 CPUS390XState *env = &cpu->env; 1305 int ret; 1306 1307 cpu_synchronize_state(CPU(cpu)); 1308 ret = s390_virtio_hypercall(env); 1309 if (ret == -EINVAL) { 1310 enter_pgmcheck(cpu, PGM_SPECIFICATION); 1311 return 0; 1312 } 1313 1314 return ret; 1315 } 1316 1317 static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run) 1318 { 1319 uint64_t r1, r3; 1320 int rc; 1321 1322 cpu_synchronize_state(CPU(cpu)); 1323 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1324 r3 = run->s390_sieic.ipa & 0x000f; 1325 rc = handle_diag_288(&cpu->env, r1, r3); 1326 if (rc) { 1327 enter_pgmcheck(cpu, PGM_SPECIFICATION); 1328 } 1329 } 1330 1331 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) 1332 { 1333 uint64_t r1, r3; 1334 1335 cpu_synchronize_state(CPU(cpu)); 1336 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4; 1337 r3 = run->s390_sieic.ipa & 0x000f; 1338 handle_diag_308(&cpu->env, r1, r3); 1339 } 1340 1341 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) 1342 { 1343 CPUS390XState *env = &cpu->env; 1344 unsigned long pc; 1345 1346 cpu_synchronize_state(CPU(cpu)); 1347 1348 pc = env->psw.addr - sw_bp_ilen; 1349 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { 1350 env->psw.addr = pc; 1351 return EXCP_DEBUG; 1352 } 1353 1354 return -ENOENT; 1355 } 1356 1357 #define DIAG_KVM_CODE_MASK 0x000000000000ffff 1358 1359 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) 1360 { 1361 int r = 0; 1362 uint16_t func_code; 1363 1364 /* 1365 * For any diagnose call we support, bits 48-63 of the resulting 1366 * address specify the function code; the remainder is ignored. 1367 */ 1368 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK; 1369 switch (func_code) { 1370 case DIAG_TIMEREVENT: 1371 kvm_handle_diag_288(cpu, run); 1372 break; 1373 case DIAG_IPL: 1374 kvm_handle_diag_308(cpu, run); 1375 break; 1376 case DIAG_KVM_HYPERCALL: 1377 r = handle_hypercall(cpu, run); 1378 break; 1379 case DIAG_KVM_BREAKPOINT: 1380 r = handle_sw_breakpoint(cpu, run); 1381 break; 1382 default: 1383 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); 1384 enter_pgmcheck(cpu, PGM_SPECIFICATION); 1385 break; 1386 } 1387 1388 return r; 1389 } 1390 1391 typedef struct SigpInfo { 1392 uint64_t param; 1393 int cc; 1394 uint64_t *status_reg; 1395 } SigpInfo; 1396 1397 static void set_sigp_status(SigpInfo *si, uint64_t status) 1398 { 1399 *si->status_reg &= 0xffffffff00000000ULL; 1400 *si->status_reg |= status; 1401 si->cc = SIGP_CC_STATUS_STORED; 1402 } 1403 1404 static void sigp_start(CPUState *cs, run_on_cpu_data arg) 1405 { 1406 S390CPU *cpu = S390_CPU(cs); 1407 SigpInfo *si = arg.host_ptr; 1408 1409 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { 1410 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1411 return; 1412 } 1413 1414 s390_cpu_set_state(CPU_STATE_OPERATING, cpu); 1415 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1416 } 1417 1418 static void sigp_stop(CPUState *cs, run_on_cpu_data arg) 1419 { 1420 S390CPU *cpu = S390_CPU(cs); 1421 SigpInfo *si = arg.host_ptr; 1422 struct kvm_s390_irq irq = { 1423 .type = KVM_S390_SIGP_STOP, 1424 }; 1425 1426 if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) { 1427 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1428 return; 1429 } 1430 1431 /* disabled wait - sleeping in user space */ 1432 if (cs->halted) { 1433 s390_cpu_set_state(CPU_STATE_STOPPED, cpu); 1434 } else { 1435 /* execute the stop function */ 1436 cpu->env.sigp_order = SIGP_STOP; 1437 kvm_s390_vcpu_interrupt(cpu, &irq); 1438 } 1439 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1440 } 1441 1442 #define ADTL_SAVE_AREA_SIZE 1024 1443 static int kvm_s390_store_adtl_status(S390CPU *cpu, hwaddr addr) 1444 { 1445 void *mem; 1446 hwaddr len = ADTL_SAVE_AREA_SIZE; 1447 1448 mem = cpu_physical_memory_map(addr, &len, 1); 1449 if (!mem) { 1450 return -EFAULT; 1451 } 1452 if (len != ADTL_SAVE_AREA_SIZE) { 1453 cpu_physical_memory_unmap(mem, len, 1, 0); 1454 return -EFAULT; 1455 } 1456 1457 memcpy(mem, &cpu->env.vregs, 512); 1458 1459 cpu_physical_memory_unmap(mem, len, 1, len); 1460 1461 return 0; 1462 } 1463 1464 #define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area) 1465 #define SAVE_AREA_SIZE 512 1466 static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch) 1467 { 1468 static const uint8_t ar_id = 1; 1469 uint64_t ckc = cpu->env.ckc >> 8; 1470 void *mem; 1471 int i; 1472 hwaddr len = SAVE_AREA_SIZE; 1473 1474 mem = cpu_physical_memory_map(addr, &len, 1); 1475 if (!mem) { 1476 return -EFAULT; 1477 } 1478 if (len != SAVE_AREA_SIZE) { 1479 cpu_physical_memory_unmap(mem, len, 1, 0); 1480 return -EFAULT; 1481 } 1482 1483 if (store_arch) { 1484 cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1); 1485 } 1486 for (i = 0; i < 16; ++i) { 1487 *((uint64_t *)mem + i) = get_freg(&cpu->env, i)->ll; 1488 } 1489 memcpy(mem + 128, &cpu->env.regs, 128); 1490 memcpy(mem + 256, &cpu->env.psw, 16); 1491 memcpy(mem + 280, &cpu->env.psa, 4); 1492 memcpy(mem + 284, &cpu->env.fpc, 4); 1493 memcpy(mem + 292, &cpu->env.todpr, 4); 1494 memcpy(mem + 296, &cpu->env.cputm, 8); 1495 memcpy(mem + 304, &ckc, 8); 1496 memcpy(mem + 320, &cpu->env.aregs, 64); 1497 memcpy(mem + 384, &cpu->env.cregs, 128); 1498 1499 cpu_physical_memory_unmap(mem, len, 1, len); 1500 1501 return 0; 1502 } 1503 1504 static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg) 1505 { 1506 S390CPU *cpu = S390_CPU(cs); 1507 SigpInfo *si = arg.host_ptr; 1508 struct kvm_s390_irq irq = { 1509 .type = KVM_S390_SIGP_STOP, 1510 }; 1511 1512 /* disabled wait - sleeping in user space */ 1513 if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) { 1514 s390_cpu_set_state(CPU_STATE_STOPPED, cpu); 1515 } 1516 1517 switch (s390_cpu_get_state(cpu)) { 1518 case CPU_STATE_OPERATING: 1519 cpu->env.sigp_order = SIGP_STOP_STORE_STATUS; 1520 kvm_s390_vcpu_interrupt(cpu, &irq); 1521 /* store will be performed when handling the stop intercept */ 1522 break; 1523 case CPU_STATE_STOPPED: 1524 /* already stopped, just store the status */ 1525 cpu_synchronize_state(cs); 1526 kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true); 1527 break; 1528 } 1529 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1530 } 1531 1532 static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg) 1533 { 1534 S390CPU *cpu = S390_CPU(cs); 1535 SigpInfo *si = arg.host_ptr; 1536 uint32_t address = si->param & 0x7ffffe00u; 1537 1538 /* cpu has to be stopped */ 1539 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { 1540 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); 1541 return; 1542 } 1543 1544 cpu_synchronize_state(cs); 1545 1546 if (kvm_s390_store_status(cpu, address, false)) { 1547 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 1548 return; 1549 } 1550 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1551 } 1552 1553 static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg) 1554 { 1555 S390CPU *cpu = S390_CPU(cs); 1556 SigpInfo *si = arg.host_ptr; 1557 1558 if (!s390_has_feat(S390_FEAT_VECTOR)) { 1559 set_sigp_status(si, SIGP_STAT_INVALID_ORDER); 1560 return; 1561 } 1562 1563 /* cpu has to be stopped */ 1564 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { 1565 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); 1566 return; 1567 } 1568 1569 /* parameter must be aligned to 1024-byte boundary */ 1570 if (si->param & 0x3ff) { 1571 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 1572 return; 1573 } 1574 1575 cpu_synchronize_state(cs); 1576 1577 if (kvm_s390_store_adtl_status(cpu, si->param)) { 1578 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 1579 return; 1580 } 1581 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1582 } 1583 1584 static void sigp_restart(CPUState *cs, run_on_cpu_data arg) 1585 { 1586 S390CPU *cpu = S390_CPU(cs); 1587 SigpInfo *si = arg.host_ptr; 1588 struct kvm_s390_irq irq = { 1589 .type = KVM_S390_RESTART, 1590 }; 1591 1592 switch (s390_cpu_get_state(cpu)) { 1593 case CPU_STATE_STOPPED: 1594 /* the restart irq has to be delivered prior to any other pending irq */ 1595 cpu_synchronize_state(cs); 1596 do_restart_interrupt(&cpu->env); 1597 s390_cpu_set_state(CPU_STATE_OPERATING, cpu); 1598 break; 1599 case CPU_STATE_OPERATING: 1600 kvm_s390_vcpu_interrupt(cpu, &irq); 1601 break; 1602 } 1603 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1604 } 1605 1606 int kvm_s390_cpu_restart(S390CPU *cpu) 1607 { 1608 SigpInfo si = {}; 1609 1610 run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); 1611 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env); 1612 return 0; 1613 } 1614 1615 static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg) 1616 { 1617 S390CPU *cpu = S390_CPU(cs); 1618 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); 1619 SigpInfo *si = arg.host_ptr; 1620 1621 cpu_synchronize_state(cs); 1622 scc->initial_cpu_reset(cs); 1623 cpu_synchronize_post_reset(cs); 1624 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1625 } 1626 1627 static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg) 1628 { 1629 S390CPU *cpu = S390_CPU(cs); 1630 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); 1631 SigpInfo *si = arg.host_ptr; 1632 1633 cpu_synchronize_state(cs); 1634 scc->cpu_reset(cs); 1635 cpu_synchronize_post_reset(cs); 1636 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1637 } 1638 1639 static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg) 1640 { 1641 S390CPU *cpu = S390_CPU(cs); 1642 SigpInfo *si = arg.host_ptr; 1643 uint32_t addr = si->param & 0x7fffe000u; 1644 1645 cpu_synchronize_state(cs); 1646 1647 if (!address_space_access_valid(&address_space_memory, addr, 1648 sizeof(struct LowCore), false)) { 1649 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER); 1650 return; 1651 } 1652 1653 /* cpu has to be stopped */ 1654 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) { 1655 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE); 1656 return; 1657 } 1658 1659 cpu->env.psa = addr; 1660 cpu_synchronize_post_init(cs); 1661 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; 1662 } 1663 1664 static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order, 1665 uint64_t param, uint64_t *status_reg) 1666 { 1667 SigpInfo si = { 1668 .param = param, 1669 .status_reg = status_reg, 1670 }; 1671 1672 /* cpu available? */ 1673 if (dst_cpu == NULL) { 1674 return SIGP_CC_NOT_OPERATIONAL; 1675 } 1676 1677 /* only resets can break pending orders */ 1678 if (dst_cpu->env.sigp_order != 0 && 1679 order != SIGP_CPU_RESET && 1680 order != SIGP_INITIAL_CPU_RESET) { 1681 return SIGP_CC_BUSY; 1682 } 1683 1684 switch (order) { 1685 case SIGP_START: 1686 run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si)); 1687 break; 1688 case SIGP_STOP: 1689 run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si)); 1690 break; 1691 case SIGP_RESTART: 1692 run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si)); 1693 break; 1694 case SIGP_STOP_STORE_STATUS: 1695 run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si)); 1696 break; 1697 case SIGP_STORE_STATUS_ADDR: 1698 run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si)); 1699 break; 1700 case SIGP_STORE_ADTL_STATUS: 1701 run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si)); 1702 break; 1703 case SIGP_SET_PREFIX: 1704 run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si)); 1705 break; 1706 case SIGP_INITIAL_CPU_RESET: 1707 run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); 1708 break; 1709 case SIGP_CPU_RESET: 1710 run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si)); 1711 break; 1712 default: 1713 DPRINTF("KVM: unknown SIGP: 0x%x\n", order); 1714 set_sigp_status(&si, SIGP_STAT_INVALID_ORDER); 1715 } 1716 1717 return si.cc; 1718 } 1719 1720 static int sigp_set_architecture(S390CPU *cpu, uint32_t param, 1721 uint64_t *status_reg) 1722 { 1723 CPUState *cur_cs; 1724 S390CPU *cur_cpu; 1725 1726 /* due to the BQL, we are the only active cpu */ 1727 CPU_FOREACH(cur_cs) { 1728 cur_cpu = S390_CPU(cur_cs); 1729 if (cur_cpu->env.sigp_order != 0) { 1730 return SIGP_CC_BUSY; 1731 } 1732 cpu_synchronize_state(cur_cs); 1733 /* all but the current one have to be stopped */ 1734 if (cur_cpu != cpu && 1735 s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) { 1736 *status_reg &= 0xffffffff00000000ULL; 1737 *status_reg |= SIGP_STAT_INCORRECT_STATE; 1738 return SIGP_CC_STATUS_STORED; 1739 } 1740 } 1741 1742 switch (param & 0xff) { 1743 case SIGP_MODE_ESA_S390: 1744 /* not supported */ 1745 return SIGP_CC_NOT_OPERATIONAL; 1746 case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW: 1747 case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW: 1748 CPU_FOREACH(cur_cs) { 1749 cur_cpu = S390_CPU(cur_cs); 1750 cur_cpu->env.pfault_token = -1UL; 1751 } 1752 break; 1753 default: 1754 *status_reg &= 0xffffffff00000000ULL; 1755 *status_reg |= SIGP_STAT_INVALID_PARAMETER; 1756 return SIGP_CC_STATUS_STORED; 1757 } 1758 1759 return SIGP_CC_ORDER_CODE_ACCEPTED; 1760 } 1761 1762 #define SIGP_ORDER_MASK 0x000000ff 1763 1764 static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) 1765 { 1766 CPUS390XState *env = &cpu->env; 1767 const uint8_t r1 = ipa1 >> 4; 1768 const uint8_t r3 = ipa1 & 0x0f; 1769 int ret; 1770 uint8_t order; 1771 uint64_t *status_reg; 1772 uint64_t param; 1773 S390CPU *dst_cpu = NULL; 1774 1775 cpu_synchronize_state(CPU(cpu)); 1776 1777 /* get order code */ 1778 order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL) 1779 & SIGP_ORDER_MASK; 1780 status_reg = &env->regs[r1]; 1781 param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1]; 1782 1783 if (qemu_mutex_trylock(&qemu_sigp_mutex)) { 1784 ret = SIGP_CC_BUSY; 1785 goto out; 1786 } 1787 1788 switch (order) { 1789 case SIGP_SET_ARCH: 1790 ret = sigp_set_architecture(cpu, param, status_reg); 1791 break; 1792 default: 1793 /* all other sigp orders target a single vcpu */ 1794 dst_cpu = s390_cpu_addr2state(env->regs[r3]); 1795 ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg); 1796 } 1797 qemu_mutex_unlock(&qemu_sigp_mutex); 1798 1799 out: 1800 trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index, 1801 dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret); 1802 1803 if (ret >= 0) { 1804 setcc(cpu, ret); 1805 return 0; 1806 } 1807 1808 return ret; 1809 } 1810 1811 static int handle_instruction(S390CPU *cpu, struct kvm_run *run) 1812 { 1813 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); 1814 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; 1815 int r = -1; 1816 1817 DPRINTF("handle_instruction 0x%x 0x%x\n", 1818 run->s390_sieic.ipa, run->s390_sieic.ipb); 1819 switch (ipa0) { 1820 case IPA0_B2: 1821 r = handle_b2(cpu, run, ipa1); 1822 break; 1823 case IPA0_B9: 1824 r = handle_b9(cpu, run, ipa1); 1825 break; 1826 case IPA0_EB: 1827 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff); 1828 break; 1829 case IPA0_E3: 1830 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff); 1831 break; 1832 case IPA0_DIAG: 1833 r = handle_diag(cpu, run, run->s390_sieic.ipb); 1834 break; 1835 case IPA0_SIGP: 1836 r = handle_sigp(cpu, run, ipa1); 1837 break; 1838 } 1839 1840 if (r < 0) { 1841 r = 0; 1842 enter_pgmcheck(cpu, 0x0001); 1843 } 1844 1845 return r; 1846 } 1847 1848 static bool is_special_wait_psw(CPUState *cs) 1849 { 1850 /* signal quiesce */ 1851 return cs->kvm_run->psw_addr == 0xfffUL; 1852 } 1853 1854 static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset) 1855 { 1856 CPUState *cs = CPU(cpu); 1857 1858 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx", 1859 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset), 1860 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8)); 1861 s390_cpu_halt(cpu); 1862 qemu_system_guest_panicked(); 1863 } 1864 1865 static int handle_intercept(S390CPU *cpu) 1866 { 1867 CPUState *cs = CPU(cpu); 1868 struct kvm_run *run = cs->kvm_run; 1869 int icpt_code = run->s390_sieic.icptcode; 1870 int r = 0; 1871 1872 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, 1873 (long)cs->kvm_run->psw_addr); 1874 switch (icpt_code) { 1875 case ICPT_INSTRUCTION: 1876 r = handle_instruction(cpu, run); 1877 break; 1878 case ICPT_PROGRAM: 1879 unmanageable_intercept(cpu, "program interrupt", 1880 offsetof(LowCore, program_new_psw)); 1881 r = EXCP_HALTED; 1882 break; 1883 case ICPT_EXT_INT: 1884 unmanageable_intercept(cpu, "external interrupt", 1885 offsetof(LowCore, external_new_psw)); 1886 r = EXCP_HALTED; 1887 break; 1888 case ICPT_WAITPSW: 1889 /* disabled wait, since enabled wait is handled in kernel */ 1890 cpu_synchronize_state(cs); 1891 if (s390_cpu_halt(cpu) == 0) { 1892 if (is_special_wait_psw(cs)) { 1893 qemu_system_shutdown_request(); 1894 } else { 1895 qemu_system_guest_panicked(); 1896 } 1897 } 1898 r = EXCP_HALTED; 1899 break; 1900 case ICPT_CPU_STOP: 1901 if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) { 1902 qemu_system_shutdown_request(); 1903 } 1904 if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) { 1905 kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, 1906 true); 1907 } 1908 cpu->env.sigp_order = 0; 1909 r = EXCP_HALTED; 1910 break; 1911 case ICPT_OPEREXC: 1912 /* currently only instr 0x0000 after enabled via capability */ 1913 r = handle_sw_breakpoint(cpu, run); 1914 if (r == -ENOENT) { 1915 enter_pgmcheck(cpu, PGM_OPERATION); 1916 r = 0; 1917 } 1918 break; 1919 case ICPT_SOFT_INTERCEPT: 1920 fprintf(stderr, "KVM unimplemented icpt SOFT\n"); 1921 exit(1); 1922 break; 1923 case ICPT_IO: 1924 fprintf(stderr, "KVM unimplemented icpt IO\n"); 1925 exit(1); 1926 break; 1927 default: 1928 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); 1929 exit(1); 1930 break; 1931 } 1932 1933 return r; 1934 } 1935 1936 static int handle_tsch(S390CPU *cpu) 1937 { 1938 CPUState *cs = CPU(cpu); 1939 struct kvm_run *run = cs->kvm_run; 1940 int ret; 1941 1942 cpu_synchronize_state(cs); 1943 1944 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb); 1945 if (ret < 0) { 1946 /* 1947 * Failure. 1948 * If an I/O interrupt had been dequeued, we have to reinject it. 1949 */ 1950 if (run->s390_tsch.dequeued) { 1951 kvm_s390_io_interrupt(run->s390_tsch.subchannel_id, 1952 run->s390_tsch.subchannel_nr, 1953 run->s390_tsch.io_int_parm, 1954 run->s390_tsch.io_int_word); 1955 } 1956 ret = 0; 1957 } 1958 return ret; 1959 } 1960 1961 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar) 1962 { 1963 struct sysib_322 sysib; 1964 int del; 1965 1966 if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) { 1967 return; 1968 } 1969 /* Shift the stack of Extended Names to prepare for our own data */ 1970 memmove(&sysib.ext_names[1], &sysib.ext_names[0], 1971 sizeof(sysib.ext_names[0]) * (sysib.count - 1)); 1972 /* First virt level, that doesn't provide Ext Names delimits stack. It is 1973 * assumed it's not capable of managing Extended Names for lower levels. 1974 */ 1975 for (del = 1; del < sysib.count; del++) { 1976 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) { 1977 break; 1978 } 1979 } 1980 if (del < sysib.count) { 1981 memset(sysib.ext_names[del], 0, 1982 sizeof(sysib.ext_names[0]) * (sysib.count - del)); 1983 } 1984 /* Insert short machine name in EBCDIC, padded with blanks */ 1985 if (qemu_name) { 1986 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name)); 1987 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name), 1988 strlen(qemu_name))); 1989 } 1990 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */ 1991 memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0])); 1992 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's 1993 * considered by s390 as not capable of providing any Extended Name. 1994 * Therefore if no name was specified on qemu invocation, we go with the 1995 * same "KVMguest" default, which KVM has filled into short name field. 1996 */ 1997 if (qemu_name) { 1998 strncpy((char *)sysib.ext_names[0], qemu_name, 1999 sizeof(sysib.ext_names[0])); 2000 } else { 2001 strcpy((char *)sysib.ext_names[0], "KVMguest"); 2002 } 2003 /* Insert UUID */ 2004 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid)); 2005 2006 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib)); 2007 } 2008 2009 static int handle_stsi(S390CPU *cpu) 2010 { 2011 CPUState *cs = CPU(cpu); 2012 struct kvm_run *run = cs->kvm_run; 2013 2014 switch (run->s390_stsi.fc) { 2015 case 3: 2016 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) { 2017 return 0; 2018 } 2019 /* Only sysib 3.2.2 needs post-handling for now. */ 2020 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar); 2021 return 0; 2022 default: 2023 return 0; 2024 } 2025 } 2026 2027 static int kvm_arch_handle_debug_exit(S390CPU *cpu) 2028 { 2029 CPUState *cs = CPU(cpu); 2030 struct kvm_run *run = cs->kvm_run; 2031 2032 int ret = 0; 2033 struct kvm_debug_exit_arch *arch_info = &run->debug.arch; 2034 2035 switch (arch_info->type) { 2036 case KVM_HW_WP_WRITE: 2037 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 2038 cs->watchpoint_hit = &hw_watchpoint; 2039 hw_watchpoint.vaddr = arch_info->addr; 2040 hw_watchpoint.flags = BP_MEM_WRITE; 2041 ret = EXCP_DEBUG; 2042 } 2043 break; 2044 case KVM_HW_BP: 2045 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { 2046 ret = EXCP_DEBUG; 2047 } 2048 break; 2049 case KVM_SINGLESTEP: 2050 if (cs->singlestep_enabled) { 2051 ret = EXCP_DEBUG; 2052 } 2053 break; 2054 default: 2055 ret = -ENOSYS; 2056 } 2057 2058 return ret; 2059 } 2060 2061 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) 2062 { 2063 S390CPU *cpu = S390_CPU(cs); 2064 int ret = 0; 2065 2066 qemu_mutex_lock_iothread(); 2067 2068 switch (run->exit_reason) { 2069 case KVM_EXIT_S390_SIEIC: 2070 ret = handle_intercept(cpu); 2071 break; 2072 case KVM_EXIT_S390_RESET: 2073 s390_reipl_request(); 2074 break; 2075 case KVM_EXIT_S390_TSCH: 2076 ret = handle_tsch(cpu); 2077 break; 2078 case KVM_EXIT_S390_STSI: 2079 ret = handle_stsi(cpu); 2080 break; 2081 case KVM_EXIT_DEBUG: 2082 ret = kvm_arch_handle_debug_exit(cpu); 2083 break; 2084 default: 2085 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); 2086 break; 2087 } 2088 qemu_mutex_unlock_iothread(); 2089 2090 if (ret == 0) { 2091 ret = EXCP_INTERRUPT; 2092 } 2093 return ret; 2094 } 2095 2096 bool kvm_arch_stop_on_emulation_error(CPUState *cpu) 2097 { 2098 return true; 2099 } 2100 2101 int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) 2102 { 2103 return 1; 2104 } 2105 2106 int kvm_arch_on_sigbus(int code, void *addr) 2107 { 2108 return 1; 2109 } 2110 2111 void kvm_s390_io_interrupt(uint16_t subchannel_id, 2112 uint16_t subchannel_nr, uint32_t io_int_parm, 2113 uint32_t io_int_word) 2114 { 2115 struct kvm_s390_irq irq = { 2116 .u.io.subchannel_id = subchannel_id, 2117 .u.io.subchannel_nr = subchannel_nr, 2118 .u.io.io_int_parm = io_int_parm, 2119 .u.io.io_int_word = io_int_word, 2120 }; 2121 2122 if (io_int_word & IO_INT_WORD_AI) { 2123 irq.type = KVM_S390_INT_IO(1, 0, 0, 0); 2124 } else { 2125 irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8, 2126 (subchannel_id & 0x0006), 2127 subchannel_nr); 2128 } 2129 kvm_s390_floating_interrupt(&irq); 2130 } 2131 2132 static uint64_t build_channel_report_mcic(void) 2133 { 2134 uint64_t mcic; 2135 2136 /* subclass: indicate channel report pending */ 2137 mcic = MCIC_SC_CP | 2138 /* subclass modifiers: none */ 2139 /* storage errors: none */ 2140 /* validity bits: no damage */ 2141 MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP | 2142 MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR | 2143 MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC; 2144 if (s390_has_feat(S390_FEAT_VECTOR)) { 2145 mcic |= MCIC_VB_VR; 2146 } 2147 return mcic; 2148 } 2149 2150 void kvm_s390_crw_mchk(void) 2151 { 2152 struct kvm_s390_irq irq = { 2153 .type = KVM_S390_MCHK, 2154 .u.mchk.cr14 = 1 << 28, 2155 .u.mchk.mcic = build_channel_report_mcic(), 2156 }; 2157 kvm_s390_floating_interrupt(&irq); 2158 } 2159 2160 void kvm_s390_enable_css_support(S390CPU *cpu) 2161 { 2162 int r; 2163 2164 /* Activate host kernel channel subsystem support. */ 2165 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); 2166 assert(r == 0); 2167 } 2168 2169 void kvm_arch_init_irq_routing(KVMState *s) 2170 { 2171 /* 2172 * Note that while irqchip capabilities generally imply that cpustates 2173 * are handled in-kernel, it is not true for s390 (yet); therefore, we 2174 * have to override the common code kvm_halt_in_kernel_allowed setting. 2175 */ 2176 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { 2177 kvm_gsi_routing_allowed = true; 2178 kvm_halt_in_kernel_allowed = false; 2179 } 2180 } 2181 2182 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, 2183 int vq, bool assign) 2184 { 2185 struct kvm_ioeventfd kick = { 2186 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | 2187 KVM_IOEVENTFD_FLAG_DATAMATCH, 2188 .fd = event_notifier_get_fd(notifier), 2189 .datamatch = vq, 2190 .addr = sch, 2191 .len = 8, 2192 }; 2193 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { 2194 return -ENOSYS; 2195 } 2196 if (!assign) { 2197 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; 2198 } 2199 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); 2200 } 2201 2202 int kvm_s390_get_memslot_count(KVMState *s) 2203 { 2204 return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); 2205 } 2206 2207 int kvm_s390_get_ri(void) 2208 { 2209 return cap_ri; 2210 } 2211 2212 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state) 2213 { 2214 struct kvm_mp_state mp_state = {}; 2215 int ret; 2216 2217 /* the kvm part might not have been initialized yet */ 2218 if (CPU(cpu)->kvm_state == NULL) { 2219 return 0; 2220 } 2221 2222 switch (cpu_state) { 2223 case CPU_STATE_STOPPED: 2224 mp_state.mp_state = KVM_MP_STATE_STOPPED; 2225 break; 2226 case CPU_STATE_CHECK_STOP: 2227 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP; 2228 break; 2229 case CPU_STATE_OPERATING: 2230 mp_state.mp_state = KVM_MP_STATE_OPERATING; 2231 break; 2232 case CPU_STATE_LOAD: 2233 mp_state.mp_state = KVM_MP_STATE_LOAD; 2234 break; 2235 default: 2236 error_report("Requested CPU state is not a valid S390 CPU state: %u", 2237 cpu_state); 2238 exit(1); 2239 } 2240 2241 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); 2242 if (ret) { 2243 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state, 2244 strerror(-ret)); 2245 } 2246 2247 return ret; 2248 } 2249 2250 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu) 2251 { 2252 struct kvm_s390_irq_state irq_state; 2253 CPUState *cs = CPU(cpu); 2254 int32_t bytes; 2255 2256 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2257 return; 2258 } 2259 2260 irq_state.buf = (uint64_t) cpu->irqstate; 2261 irq_state.len = VCPU_IRQ_BUF_SIZE; 2262 2263 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state); 2264 if (bytes < 0) { 2265 cpu->irqstate_saved_size = 0; 2266 error_report("Migration of interrupt state failed"); 2267 return; 2268 } 2269 2270 cpu->irqstate_saved_size = bytes; 2271 } 2272 2273 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) 2274 { 2275 CPUState *cs = CPU(cpu); 2276 struct kvm_s390_irq_state irq_state; 2277 int r; 2278 2279 if (cpu->irqstate_saved_size == 0) { 2280 return 0; 2281 } 2282 2283 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { 2284 return -ENOSYS; 2285 } 2286 2287 irq_state.buf = (uint64_t) cpu->irqstate; 2288 irq_state.len = cpu->irqstate_saved_size; 2289 2290 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state); 2291 if (r) { 2292 error_report("Setting interrupt state failed %d", r); 2293 } 2294 return r; 2295 } 2296 2297 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, 2298 uint64_t address, uint32_t data, PCIDevice *dev) 2299 { 2300 S390PCIBusDevice *pbdev; 2301 uint32_t idx = data >> ZPCI_MSI_VEC_BITS; 2302 uint32_t vec = data & ZPCI_MSI_VEC_MASK; 2303 2304 pbdev = s390_pci_find_dev_by_idx(s390_get_phb(), idx); 2305 if (!pbdev) { 2306 DPRINTF("add_msi_route no dev\n"); 2307 return -ENODEV; 2308 } 2309 2310 pbdev->routes.adapter.ind_offset = vec; 2311 2312 route->type = KVM_IRQ_ROUTING_S390_ADAPTER; 2313 route->flags = 0; 2314 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr; 2315 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr; 2316 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset; 2317 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset; 2318 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id; 2319 return 0; 2320 } 2321 2322 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, 2323 int vector, PCIDevice *dev) 2324 { 2325 return 0; 2326 } 2327 2328 int kvm_arch_release_virq_post(int virq) 2329 { 2330 return 0; 2331 } 2332 2333 int kvm_arch_msi_data_to_gsi(uint32_t data) 2334 { 2335 abort(); 2336 } 2337 2338 static inline int test_bit_inv(long nr, const unsigned long *addr) 2339 { 2340 return test_bit(BE_BIT_NR(nr), addr); 2341 } 2342 2343 static inline void set_bit_inv(long nr, unsigned long *addr) 2344 { 2345 set_bit(BE_BIT_NR(nr), addr); 2346 } 2347 2348 static int query_cpu_subfunc(S390FeatBitmap features) 2349 { 2350 struct kvm_s390_vm_cpu_subfunc prop; 2351 struct kvm_device_attr attr = { 2352 .group = KVM_S390_VM_CPU_MODEL, 2353 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC, 2354 .addr = (uint64_t) &prop, 2355 }; 2356 int rc; 2357 2358 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2359 if (rc) { 2360 return rc; 2361 } 2362 2363 /* 2364 * We're going to add all subfunctions now, if the corresponding feature 2365 * is available that unlocks the query functions. 2366 */ 2367 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2368 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2369 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2370 } 2371 if (test_bit(S390_FEAT_MSA, features)) { 2372 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2373 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2374 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2375 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2376 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2377 } 2378 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2379 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2380 } 2381 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2382 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2383 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2384 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2385 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2386 } 2387 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2388 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2389 } 2390 return 0; 2391 } 2392 2393 static int configure_cpu_subfunc(const S390FeatBitmap features) 2394 { 2395 struct kvm_s390_vm_cpu_subfunc prop = {}; 2396 struct kvm_device_attr attr = { 2397 .group = KVM_S390_VM_CPU_MODEL, 2398 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC, 2399 .addr = (uint64_t) &prop, 2400 }; 2401 2402 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2403 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) { 2404 /* hardware support might be missing, IBC will handle most of this */ 2405 return 0; 2406 } 2407 2408 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo); 2409 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) { 2410 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff); 2411 prop.ptff[0] |= 0x80; /* query is always available */ 2412 } 2413 if (test_bit(S390_FEAT_MSA, features)) { 2414 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac); 2415 prop.kmac[0] |= 0x80; /* query is always available */ 2416 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc); 2417 prop.kmc[0] |= 0x80; /* query is always available */ 2418 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km); 2419 prop.km[0] |= 0x80; /* query is always available */ 2420 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd); 2421 prop.kimd[0] |= 0x80; /* query is always available */ 2422 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd); 2423 prop.klmd[0] |= 0x80; /* query is always available */ 2424 } 2425 if (test_bit(S390_FEAT_MSA_EXT_3, features)) { 2426 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo); 2427 prop.pckmo[0] |= 0x80; /* query is always available */ 2428 } 2429 if (test_bit(S390_FEAT_MSA_EXT_4, features)) { 2430 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr); 2431 prop.kmctr[0] |= 0x80; /* query is always available */ 2432 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf); 2433 prop.kmf[0] |= 0x80; /* query is always available */ 2434 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo); 2435 prop.kmo[0] |= 0x80; /* query is always available */ 2436 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc); 2437 prop.pcc[0] |= 0x80; /* query is always available */ 2438 } 2439 if (test_bit(S390_FEAT_MSA_EXT_5, features)) { 2440 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno); 2441 prop.ppno[0] |= 0x80; /* query is always available */ 2442 } 2443 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2444 } 2445 2446 static int kvm_to_feat[][2] = { 2447 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP }, 2448 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 }, 2449 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO }, 2450 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF }, 2451 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE }, 2452 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS }, 2453 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB }, 2454 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI }, 2455 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS }, 2456 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY }, 2457 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA }, 2458 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI}, 2459 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF}, 2460 }; 2461 2462 static int query_cpu_feat(S390FeatBitmap features) 2463 { 2464 struct kvm_s390_vm_cpu_feat prop; 2465 struct kvm_device_attr attr = { 2466 .group = KVM_S390_VM_CPU_MODEL, 2467 .attr = KVM_S390_VM_CPU_MACHINE_FEAT, 2468 .addr = (uint64_t) &prop, 2469 }; 2470 int rc; 2471 int i; 2472 2473 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2474 if (rc) { 2475 return rc; 2476 } 2477 2478 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2479 if (test_bit_inv(kvm_to_feat[i][0], (unsigned long *)prop.feat)) { 2480 set_bit(kvm_to_feat[i][1], features); 2481 } 2482 } 2483 return 0; 2484 } 2485 2486 static int configure_cpu_feat(const S390FeatBitmap features) 2487 { 2488 struct kvm_s390_vm_cpu_feat prop = {}; 2489 struct kvm_device_attr attr = { 2490 .group = KVM_S390_VM_CPU_MODEL, 2491 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT, 2492 .addr = (uint64_t) &prop, 2493 }; 2494 int i; 2495 2496 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) { 2497 if (test_bit(kvm_to_feat[i][1], features)) { 2498 set_bit_inv(kvm_to_feat[i][0], (unsigned long *)prop.feat); 2499 } 2500 } 2501 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2502 } 2503 2504 bool kvm_s390_cpu_models_supported(void) 2505 { 2506 if (!cpu_model_allowed()) { 2507 /* compatibility machines interfere with the cpu model */ 2508 return false; 2509 } 2510 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2511 KVM_S390_VM_CPU_MACHINE) && 2512 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2513 KVM_S390_VM_CPU_PROCESSOR) && 2514 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2515 KVM_S390_VM_CPU_MACHINE_FEAT) && 2516 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2517 KVM_S390_VM_CPU_PROCESSOR_FEAT) && 2518 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL, 2519 KVM_S390_VM_CPU_MACHINE_SUBFUNC); 2520 } 2521 2522 void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp) 2523 { 2524 struct kvm_s390_vm_cpu_machine prop = {}; 2525 struct kvm_device_attr attr = { 2526 .group = KVM_S390_VM_CPU_MODEL, 2527 .attr = KVM_S390_VM_CPU_MACHINE, 2528 .addr = (uint64_t) &prop, 2529 }; 2530 uint16_t unblocked_ibc = 0, cpu_type = 0; 2531 int rc; 2532 2533 memset(model, 0, sizeof(*model)); 2534 2535 if (!kvm_s390_cpu_models_supported()) { 2536 error_setg(errp, "KVM doesn't support CPU models"); 2537 return; 2538 } 2539 2540 /* query the basic cpu model properties */ 2541 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr); 2542 if (rc) { 2543 error_setg(errp, "KVM: Error querying host CPU model: %d", rc); 2544 return; 2545 } 2546 2547 cpu_type = cpuid_type(prop.cpuid); 2548 if (has_ibc(prop.ibc)) { 2549 model->lowest_ibc = lowest_ibc(prop.ibc); 2550 unblocked_ibc = unblocked_ibc(prop.ibc); 2551 } 2552 model->cpu_id = cpuid_id(prop.cpuid); 2553 model->cpu_ver = 0xff; 2554 2555 /* get supported cpu features indicated via STFL(E) */ 2556 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL, 2557 (uint8_t *) prop.fac_mask); 2558 /* dat-enhancement facility 2 has no bit but was introduced with stfle */ 2559 if (test_bit(S390_FEAT_STFLE, model->features)) { 2560 set_bit(S390_FEAT_DAT_ENH_2, model->features); 2561 } 2562 /* get supported cpu features indicated e.g. via SCLP */ 2563 rc = query_cpu_feat(model->features); 2564 if (rc) { 2565 error_setg(errp, "KVM: Error querying CPU features: %d", rc); 2566 return; 2567 } 2568 /* get supported cpu subfunctions indicated via query / test bit */ 2569 rc = query_cpu_subfunc(model->features); 2570 if (rc) { 2571 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc); 2572 return; 2573 } 2574 2575 /* with cpu model support, CMM is only indicated if really available */ 2576 if (kvm_s390_cmma_available()) { 2577 set_bit(S390_FEAT_CMM, model->features); 2578 } 2579 2580 if (s390_known_cpu_type(cpu_type)) { 2581 /* we want the exact model, even if some features are missing */ 2582 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc), 2583 ibc_ec_ga(unblocked_ibc), NULL); 2584 } else { 2585 /* model unknown, e.g. too new - search using features */ 2586 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc), 2587 ibc_ec_ga(unblocked_ibc), 2588 model->features); 2589 } 2590 if (!model->def) { 2591 error_setg(errp, "KVM: host CPU model could not be identified"); 2592 return; 2593 } 2594 /* strip of features that are not part of the maximum model */ 2595 bitmap_and(model->features, model->features, model->def->full_feat, 2596 S390_FEAT_MAX); 2597 } 2598 2599 void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp) 2600 { 2601 struct kvm_s390_vm_cpu_processor prop = { 2602 .fac_list = { 0 }, 2603 }; 2604 struct kvm_device_attr attr = { 2605 .group = KVM_S390_VM_CPU_MODEL, 2606 .attr = KVM_S390_VM_CPU_PROCESSOR, 2607 .addr = (uint64_t) &prop, 2608 }; 2609 int rc; 2610 2611 if (!model) { 2612 /* compatibility handling if cpu models are disabled */ 2613 if (kvm_s390_cmma_available() && !mem_path) { 2614 kvm_s390_enable_cmma(); 2615 } 2616 return; 2617 } 2618 if (!kvm_s390_cpu_models_supported()) { 2619 error_setg(errp, "KVM doesn't support CPU models"); 2620 return; 2621 } 2622 prop.cpuid = s390_cpuid_from_cpu_model(model); 2623 prop.ibc = s390_ibc_from_cpu_model(model); 2624 /* configure cpu features indicated via STFL(e) */ 2625 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL, 2626 (uint8_t *) prop.fac_list); 2627 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr); 2628 if (rc) { 2629 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc); 2630 return; 2631 } 2632 /* configure cpu features indicated e.g. via SCLP */ 2633 rc = configure_cpu_feat(model->features); 2634 if (rc) { 2635 error_setg(errp, "KVM: Error configuring CPU features: %d", rc); 2636 return; 2637 } 2638 /* configure cpu subfunctions indicated via query / test bit */ 2639 rc = configure_cpu_subfunc(model->features); 2640 if (rc) { 2641 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc); 2642 return; 2643 } 2644 /* enable CMM via CMMA - disable on hugetlbfs */ 2645 if (test_bit(S390_FEAT_CMM, model->features)) { 2646 if (mem_path) { 2647 error_report("Warning: CMM will not be enabled because it is not " 2648 "compatible to hugetlbfs."); 2649 } else { 2650 kvm_s390_enable_cmma(); 2651 } 2652 } 2653 } 2654