1 #include "qemu/osdep.h" 2 #include "qemu/cutils.h" 3 #include "exec/exec-all.h" 4 #include "helper_regs.h" 5 #include "hw/ppc/ppc.h" 6 #include "hw/ppc/spapr.h" 7 #include "hw/ppc/spapr_cpu_core.h" 8 #include "hw/ppc/spapr_nested.h" 9 #include "mmu-book3s-v3.h" 10 #include "cpu-models.h" 11 #include "qemu/log.h" 12 13 void spapr_nested_reset(SpaprMachineState *spapr) 14 { 15 if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { 16 spapr->nested.api = NESTED_API_KVM_HV; 17 spapr_unregister_nested_hv(); 18 spapr_register_nested_hv(); 19 } else { 20 spapr->nested.api = 0; 21 spapr->nested.capabilities_set = false; 22 spapr_nested_gsb_init(); 23 } 24 } 25 26 uint8_t spapr_nested_api(SpaprMachineState *spapr) 27 { 28 return spapr->nested.api; 29 } 30 31 #ifdef CONFIG_TCG 32 33 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, 34 target_ulong lpid, ppc_v3_pate_t *entry) 35 { 36 uint64_t patb, pats; 37 38 assert(lpid != 0); 39 40 patb = spapr->nested.ptcr & PTCR_PATB; 41 pats = spapr->nested.ptcr & PTCR_PATS; 42 43 /* Check if partition table is properly aligned */ 44 if (patb & MAKE_64BIT_MASK(0, pats + 12)) { 45 return false; 46 } 47 48 /* Calculate number of entries */ 49 pats = 1ull << (pats + 12 - 4); 50 if (pats <= lpid) { 51 return false; 52 } 53 54 /* Grab entry */ 55 patb += 16 * lpid; 56 entry->dw0 = ldq_phys(CPU(cpu)->as, patb); 57 entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); 58 return true; 59 } 60 61 static 62 SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr, 63 target_ulong guestid) 64 { 65 SpaprMachineStateNestedGuest *guest; 66 67 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); 68 return guest; 69 } 70 71 bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, 72 target_ulong lpid, ppc_v3_pate_t *entry) 73 { 74 SpaprMachineStateNestedGuest *guest; 75 assert(lpid != 0); 76 guest = spapr_get_nested_guest(spapr, lpid); 77 if (!guest) { 78 return false; 79 } 80 81 entry->dw0 = guest->parttbl[0]; 82 entry->dw1 = guest->parttbl[1]; 83 return true; 84 } 85 86 #define PRTS_MASK 0x1f 87 88 static target_ulong h_set_ptbl(PowerPCCPU *cpu, 89 SpaprMachineState *spapr, 90 target_ulong opcode, 91 target_ulong *args) 92 { 93 target_ulong ptcr = args[0]; 94 95 if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { 96 return H_FUNCTION; 97 } 98 99 if ((ptcr & PRTS_MASK) + 12 - 4 > 12) { 100 return H_PARAMETER; 101 } 102 103 spapr->nested.ptcr = ptcr; /* Save new partition table */ 104 105 return H_SUCCESS; 106 } 107 108 static target_ulong h_tlb_invalidate(PowerPCCPU *cpu, 109 SpaprMachineState *spapr, 110 target_ulong opcode, 111 target_ulong *args) 112 { 113 /* 114 * The spapr virtual hypervisor nested HV implementation retains no L2 115 * translation state except for TLB. And the TLB is always invalidated 116 * across L1<->L2 transitions, so nothing is required here. 117 */ 118 119 return H_SUCCESS; 120 } 121 122 static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu, 123 SpaprMachineState *spapr, 124 target_ulong opcode, 125 target_ulong *args) 126 { 127 /* 128 * This HCALL is not required, L1 KVM will take a slow path and walk the 129 * page tables manually to do the data copy. 130 */ 131 return H_FUNCTION; 132 } 133 134 static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu) 135 { 136 CPUPPCState *env = &cpu->env; 137 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 138 139 memcpy(save->gpr, env->gpr, sizeof(save->gpr)); 140 141 save->lr = env->lr; 142 save->ctr = env->ctr; 143 save->cfar = env->cfar; 144 save->msr = env->msr; 145 save->nip = env->nip; 146 147 save->cr = ppc_get_cr(env); 148 save->xer = cpu_read_xer(env); 149 150 save->lpcr = env->spr[SPR_LPCR]; 151 save->lpidr = env->spr[SPR_LPIDR]; 152 save->pcr = env->spr[SPR_PCR]; 153 save->dpdes = env->spr[SPR_DPDES]; 154 save->hfscr = env->spr[SPR_HFSCR]; 155 save->srr0 = env->spr[SPR_SRR0]; 156 save->srr1 = env->spr[SPR_SRR1]; 157 save->sprg0 = env->spr[SPR_SPRG0]; 158 save->sprg1 = env->spr[SPR_SPRG1]; 159 save->sprg2 = env->spr[SPR_SPRG2]; 160 save->sprg3 = env->spr[SPR_SPRG3]; 161 save->pidr = env->spr[SPR_BOOKS_PID]; 162 save->ppr = env->spr[SPR_PPR]; 163 164 if (spapr_nested_api(spapr) == NESTED_API_PAPR) { 165 save->amor = env->spr[SPR_AMOR]; 166 save->dawr0 = env->spr[SPR_DAWR0]; 167 save->dawrx0 = env->spr[SPR_DAWRX0]; 168 save->ciabr = env->spr[SPR_CIABR]; 169 save->purr = env->spr[SPR_PURR]; 170 save->spurr = env->spr[SPR_SPURR]; 171 save->ic = env->spr[SPR_IC]; 172 save->vtb = env->spr[SPR_VTB]; 173 save->hdar = env->spr[SPR_HDAR]; 174 save->hdsisr = env->spr[SPR_HDSISR]; 175 save->heir = env->spr[SPR_HEIR]; 176 save->asdr = env->spr[SPR_ASDR]; 177 save->dawr1 = env->spr[SPR_DAWR1]; 178 save->dawrx1 = env->spr[SPR_DAWRX1]; 179 save->dexcr = env->spr[SPR_DEXCR]; 180 save->hdexcr = env->spr[SPR_HDEXCR]; 181 save->hashkeyr = env->spr[SPR_HASHKEYR]; 182 save->hashpkeyr = env->spr[SPR_HASHPKEYR]; 183 memcpy(save->vsr, env->vsr, sizeof(save->vsr)); 184 save->ebbhr = env->spr[SPR_EBBHR]; 185 save->tar = env->spr[SPR_TAR]; 186 save->ebbrr = env->spr[SPR_EBBRR]; 187 save->bescr = env->spr[SPR_BESCR]; 188 save->iamr = env->spr[SPR_IAMR]; 189 save->amr = env->spr[SPR_AMR]; 190 save->uamor = env->spr[SPR_UAMOR]; 191 save->dscr = env->spr[SPR_DSCR]; 192 save->fscr = env->spr[SPR_FSCR]; 193 save->pspb = env->spr[SPR_PSPB]; 194 save->ctrl = env->spr[SPR_CTRL]; 195 save->vrsave = env->spr[SPR_VRSAVE]; 196 save->dar = env->spr[SPR_DAR]; 197 save->dsisr = env->spr[SPR_DSISR]; 198 save->pmc1 = env->spr[SPR_POWER_PMC1]; 199 save->pmc2 = env->spr[SPR_POWER_PMC2]; 200 save->pmc3 = env->spr[SPR_POWER_PMC3]; 201 save->pmc4 = env->spr[SPR_POWER_PMC4]; 202 save->pmc5 = env->spr[SPR_POWER_PMC5]; 203 save->pmc6 = env->spr[SPR_POWER_PMC6]; 204 save->mmcr0 = env->spr[SPR_POWER_MMCR0]; 205 save->mmcr1 = env->spr[SPR_POWER_MMCR1]; 206 save->mmcr2 = env->spr[SPR_POWER_MMCR2]; 207 save->mmcra = env->spr[SPR_POWER_MMCRA]; 208 save->sdar = env->spr[SPR_POWER_SDAR]; 209 save->siar = env->spr[SPR_POWER_SIAR]; 210 save->sier = env->spr[SPR_POWER_SIER]; 211 save->vscr = ppc_get_vscr(env); 212 save->fpscr = env->fpscr; 213 } 214 215 save->tb_offset = env->tb_env->tb_offset; 216 } 217 218 static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load) 219 { 220 CPUState *cs = CPU(cpu); 221 CPUPPCState *env = &cpu->env; 222 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 223 224 memcpy(env->gpr, load->gpr, sizeof(env->gpr)); 225 226 env->lr = load->lr; 227 env->ctr = load->ctr; 228 env->cfar = load->cfar; 229 env->msr = load->msr; 230 env->nip = load->nip; 231 232 ppc_set_cr(env, load->cr); 233 cpu_write_xer(env, load->xer); 234 235 env->spr[SPR_LPCR] = load->lpcr; 236 env->spr[SPR_LPIDR] = load->lpidr; 237 env->spr[SPR_PCR] = load->pcr; 238 env->spr[SPR_DPDES] = load->dpdes; 239 env->spr[SPR_HFSCR] = load->hfscr; 240 env->spr[SPR_SRR0] = load->srr0; 241 env->spr[SPR_SRR1] = load->srr1; 242 env->spr[SPR_SPRG0] = load->sprg0; 243 env->spr[SPR_SPRG1] = load->sprg1; 244 env->spr[SPR_SPRG2] = load->sprg2; 245 env->spr[SPR_SPRG3] = load->sprg3; 246 env->spr[SPR_BOOKS_PID] = load->pidr; 247 env->spr[SPR_PPR] = load->ppr; 248 249 if (spapr_nested_api(spapr) == NESTED_API_PAPR) { 250 env->spr[SPR_AMOR] = load->amor; 251 env->spr[SPR_DAWR0] = load->dawr0; 252 env->spr[SPR_DAWRX0] = load->dawrx0; 253 env->spr[SPR_CIABR] = load->ciabr; 254 env->spr[SPR_PURR] = load->purr; 255 env->spr[SPR_SPURR] = load->purr; 256 env->spr[SPR_IC] = load->ic; 257 env->spr[SPR_VTB] = load->vtb; 258 env->spr[SPR_HDAR] = load->hdar; 259 env->spr[SPR_HDSISR] = load->hdsisr; 260 env->spr[SPR_HEIR] = load->heir; 261 env->spr[SPR_ASDR] = load->asdr; 262 env->spr[SPR_DAWR1] = load->dawr1; 263 env->spr[SPR_DAWRX1] = load->dawrx1; 264 env->spr[SPR_DEXCR] = load->dexcr; 265 env->spr[SPR_HDEXCR] = load->hdexcr; 266 env->spr[SPR_HASHKEYR] = load->hashkeyr; 267 env->spr[SPR_HASHPKEYR] = load->hashpkeyr; 268 memcpy(env->vsr, load->vsr, sizeof(env->vsr)); 269 env->spr[SPR_EBBHR] = load->ebbhr; 270 env->spr[SPR_TAR] = load->tar; 271 env->spr[SPR_EBBRR] = load->ebbrr; 272 env->spr[SPR_BESCR] = load->bescr; 273 env->spr[SPR_IAMR] = load->iamr; 274 env->spr[SPR_AMR] = load->amr; 275 env->spr[SPR_UAMOR] = load->uamor; 276 env->spr[SPR_DSCR] = load->dscr; 277 env->spr[SPR_FSCR] = load->fscr; 278 env->spr[SPR_PSPB] = load->pspb; 279 env->spr[SPR_CTRL] = load->ctrl; 280 env->spr[SPR_VRSAVE] = load->vrsave; 281 env->spr[SPR_DAR] = load->dar; 282 env->spr[SPR_DSISR] = load->dsisr; 283 env->spr[SPR_POWER_PMC1] = load->pmc1; 284 env->spr[SPR_POWER_PMC2] = load->pmc2; 285 env->spr[SPR_POWER_PMC3] = load->pmc3; 286 env->spr[SPR_POWER_PMC4] = load->pmc4; 287 env->spr[SPR_POWER_PMC5] = load->pmc5; 288 env->spr[SPR_POWER_PMC6] = load->pmc6; 289 env->spr[SPR_POWER_MMCR0] = load->mmcr0; 290 env->spr[SPR_POWER_MMCR1] = load->mmcr1; 291 env->spr[SPR_POWER_MMCR2] = load->mmcr2; 292 env->spr[SPR_POWER_MMCRA] = load->mmcra; 293 env->spr[SPR_POWER_SDAR] = load->sdar; 294 env->spr[SPR_POWER_SIAR] = load->siar; 295 env->spr[SPR_POWER_SIER] = load->sier; 296 ppc_store_vscr(env, load->vscr); 297 ppc_store_fpscr(env, load->fpscr); 298 } 299 300 env->tb_env->tb_offset = load->tb_offset; 301 302 /* 303 * MSR updated, compute hflags and possible interrupts. 304 */ 305 hreg_compute_hflags(env); 306 ppc_maybe_interrupt(env); 307 308 /* 309 * Nested HV does not tag TLB entries between L1 and L2, so must 310 * flush on transition. 311 */ 312 tlb_flush(cs); 313 env->reserve_addr = -1; /* Reset the reservation */ 314 } 315 316 /* 317 * When this handler returns, the environment is switched to the L2 guest 318 * and TCG begins running that. spapr_exit_nested() performs the switch from 319 * L2 back to L1 and returns from the H_ENTER_NESTED hcall. 320 */ 321 static target_ulong h_enter_nested(PowerPCCPU *cpu, 322 SpaprMachineState *spapr, 323 target_ulong opcode, 324 target_ulong *args) 325 { 326 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 327 CPUPPCState *env = &cpu->env; 328 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 329 struct nested_ppc_state l2_state; 330 target_ulong hv_ptr = args[0]; 331 target_ulong regs_ptr = args[1]; 332 target_ulong hdec, now = cpu_ppc_load_tbl(env); 333 target_ulong lpcr, lpcr_mask; 334 struct kvmppc_hv_guest_state *hvstate; 335 struct kvmppc_hv_guest_state hv_state; 336 struct kvmppc_pt_regs *regs; 337 hwaddr len; 338 339 if (spapr->nested.ptcr == 0) { 340 return H_NOT_AVAILABLE; 341 } 342 343 len = sizeof(*hvstate); 344 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false, 345 MEMTXATTRS_UNSPECIFIED); 346 if (len != sizeof(*hvstate)) { 347 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false); 348 return H_PARAMETER; 349 } 350 351 memcpy(&hv_state, hvstate, len); 352 353 address_space_unmap(CPU(cpu)->as, hvstate, len, len, false); 354 355 /* 356 * We accept versions 1 and 2. Version 2 fields are unused because TCG 357 * does not implement DAWR*. 358 */ 359 if (hv_state.version > HV_GUEST_STATE_VERSION) { 360 return H_PARAMETER; 361 } 362 363 if (hv_state.lpid == 0) { 364 return H_PARAMETER; 365 } 366 367 spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1); 368 if (!spapr_cpu->nested_host_state) { 369 return H_NO_MEM; 370 } 371 372 assert(env->spr[SPR_LPIDR] == 0); 373 assert(env->spr[SPR_DPDES] == 0); 374 nested_save_state(spapr_cpu->nested_host_state, cpu); 375 376 len = sizeof(*regs); 377 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false, 378 MEMTXATTRS_UNSPECIFIED); 379 if (!regs || len != sizeof(*regs)) { 380 address_space_unmap(CPU(cpu)->as, regs, len, 0, false); 381 g_free(spapr_cpu->nested_host_state); 382 return H_P2; 383 } 384 385 len = sizeof(l2_state.gpr); 386 assert(len == sizeof(regs->gpr)); 387 memcpy(l2_state.gpr, regs->gpr, len); 388 389 l2_state.lr = regs->link; 390 l2_state.ctr = regs->ctr; 391 l2_state.xer = regs->xer; 392 l2_state.cr = regs->ccr; 393 l2_state.msr = regs->msr; 394 l2_state.nip = regs->nip; 395 396 address_space_unmap(CPU(cpu)->as, regs, len, len, false); 397 398 l2_state.cfar = hv_state.cfar; 399 l2_state.lpidr = hv_state.lpid; 400 401 lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER; 402 lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask); 403 lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE; 404 lpcr &= ~LPCR_LPES0; 405 l2_state.lpcr = lpcr & pcc->lpcr_mask; 406 407 l2_state.pcr = hv_state.pcr; 408 /* hv_state.amor is not used */ 409 l2_state.dpdes = hv_state.dpdes; 410 l2_state.hfscr = hv_state.hfscr; 411 /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/ 412 l2_state.srr0 = hv_state.srr0; 413 l2_state.srr1 = hv_state.srr1; 414 l2_state.sprg0 = hv_state.sprg[0]; 415 l2_state.sprg1 = hv_state.sprg[1]; 416 l2_state.sprg2 = hv_state.sprg[2]; 417 l2_state.sprg3 = hv_state.sprg[3]; 418 l2_state.pidr = hv_state.pidr; 419 l2_state.ppr = hv_state.ppr; 420 l2_state.tb_offset = env->tb_env->tb_offset + hv_state.tb_offset; 421 422 /* 423 * Switch to the nested guest environment and start the "hdec" timer. 424 */ 425 nested_load_state(cpu, &l2_state); 426 427 hdec = hv_state.hdec_expiry - now; 428 cpu_ppc_hdecr_init(env); 429 cpu_ppc_store_hdecr(env, hdec); 430 431 /* 432 * The hv_state.vcpu_token is not needed. It is used by the KVM 433 * implementation to remember which L2 vCPU last ran on which physical 434 * CPU so as to invalidate process scope translations if it is moved 435 * between physical CPUs. For now TLBs are always flushed on L1<->L2 436 * transitions so this is not a problem. 437 * 438 * Could validate that the same vcpu_token does not attempt to run on 439 * different L1 vCPUs at the same time, but that would be a L1 KVM bug 440 * and it's not obviously worth a new data structure to do it. 441 */ 442 443 spapr_cpu->in_nested = true; 444 445 /* 446 * The spapr hcall helper sets env->gpr[3] to the return value, but at 447 * this point the L1 is not returning from the hcall but rather we 448 * start running the L2, so r3 must not be clobbered, so return env->gpr[3] 449 * to leave it unchanged. 450 */ 451 return env->gpr[3]; 452 } 453 454 static void spapr_exit_nested_hv(PowerPCCPU *cpu, int excp) 455 { 456 CPUPPCState *env = &cpu->env; 457 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 458 struct nested_ppc_state l2_state; 459 target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4]; 460 target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5]; 461 target_ulong hsrr0, hsrr1, hdar, asdr, hdsisr; 462 struct kvmppc_hv_guest_state *hvstate; 463 struct kvmppc_pt_regs *regs; 464 hwaddr len; 465 466 nested_save_state(&l2_state, cpu); 467 hsrr0 = env->spr[SPR_HSRR0]; 468 hsrr1 = env->spr[SPR_HSRR1]; 469 hdar = env->spr[SPR_HDAR]; 470 hdsisr = env->spr[SPR_HDSISR]; 471 asdr = env->spr[SPR_ASDR]; 472 473 /* 474 * Switch back to the host environment (including for any error). 475 */ 476 assert(env->spr[SPR_LPIDR] != 0); 477 nested_load_state(cpu, spapr_cpu->nested_host_state); 478 env->gpr[3] = env->excp_vectors[excp]; /* hcall return value */ 479 480 cpu_ppc_hdecr_exit(env); 481 482 spapr_cpu->in_nested = false; 483 484 g_free(spapr_cpu->nested_host_state); 485 spapr_cpu->nested_host_state = NULL; 486 487 len = sizeof(*hvstate); 488 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true, 489 MEMTXATTRS_UNSPECIFIED); 490 if (len != sizeof(*hvstate)) { 491 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true); 492 env->gpr[3] = H_PARAMETER; 493 return; 494 } 495 496 hvstate->cfar = l2_state.cfar; 497 hvstate->lpcr = l2_state.lpcr; 498 hvstate->pcr = l2_state.pcr; 499 hvstate->dpdes = l2_state.dpdes; 500 hvstate->hfscr = l2_state.hfscr; 501 502 if (excp == POWERPC_EXCP_HDSI) { 503 hvstate->hdar = hdar; 504 hvstate->hdsisr = hdsisr; 505 hvstate->asdr = asdr; 506 } else if (excp == POWERPC_EXCP_HISI) { 507 hvstate->asdr = asdr; 508 } 509 510 /* HEIR should be implemented for HV mode and saved here. */ 511 hvstate->srr0 = l2_state.srr0; 512 hvstate->srr1 = l2_state.srr1; 513 hvstate->sprg[0] = l2_state.sprg0; 514 hvstate->sprg[1] = l2_state.sprg1; 515 hvstate->sprg[2] = l2_state.sprg2; 516 hvstate->sprg[3] = l2_state.sprg3; 517 hvstate->pidr = l2_state.pidr; 518 hvstate->ppr = l2_state.ppr; 519 520 /* Is it okay to specify write length larger than actual data written? */ 521 address_space_unmap(CPU(cpu)->as, hvstate, len, len, true); 522 523 len = sizeof(*regs); 524 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true, 525 MEMTXATTRS_UNSPECIFIED); 526 if (!regs || len != sizeof(*regs)) { 527 address_space_unmap(CPU(cpu)->as, regs, len, 0, true); 528 env->gpr[3] = H_P2; 529 return; 530 } 531 532 len = sizeof(env->gpr); 533 assert(len == sizeof(regs->gpr)); 534 memcpy(regs->gpr, l2_state.gpr, len); 535 536 regs->link = l2_state.lr; 537 regs->ctr = l2_state.ctr; 538 regs->xer = l2_state.xer; 539 regs->ccr = l2_state.cr; 540 541 if (excp == POWERPC_EXCP_MCHECK || 542 excp == POWERPC_EXCP_RESET || 543 excp == POWERPC_EXCP_SYSCALL) { 544 regs->nip = l2_state.srr0; 545 regs->msr = l2_state.srr1 & env->msr_mask; 546 } else { 547 regs->nip = hsrr0; 548 regs->msr = hsrr1 & env->msr_mask; 549 } 550 551 /* Is it okay to specify write length larger than actual data written? */ 552 address_space_unmap(CPU(cpu)->as, regs, len, len, true); 553 } 554 555 void spapr_exit_nested(PowerPCCPU *cpu, int excp) 556 { 557 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 558 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 559 560 assert(spapr_cpu->in_nested); 561 if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { 562 spapr_exit_nested_hv(cpu, excp); 563 } else { 564 g_assert_not_reached(); 565 } 566 } 567 568 static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest, 569 target_ulong vcpuid, bool inoutbuf) 570 { 571 struct SpaprMachineStateNestedGuestVcpu *vcpu; 572 /* 573 * Perform sanity checks for the provided vcpuid of a guest. 574 * For now, ensure its valid, allocated and enabled for use. 575 */ 576 577 if (vcpuid >= PAPR_NESTED_GUEST_VCPU_MAX) { 578 return false; 579 } 580 581 if (!(vcpuid < guest->nr_vcpus)) { 582 return false; 583 } 584 585 vcpu = &guest->vcpus[vcpuid]; 586 if (!vcpu->enabled) { 587 return false; 588 } 589 590 if (!inoutbuf) { 591 return true; 592 } 593 594 /* Check to see if the in/out buffers are registered */ 595 if (vcpu->runbufin.addr && vcpu->runbufout.addr) { 596 return true; 597 } 598 599 return false; 600 } 601 602 static void *get_vcpu_state_ptr(SpaprMachineStateNestedGuest *guest, 603 target_ulong vcpuid) 604 { 605 assert(spapr_nested_vcpu_check(guest, vcpuid, false)); 606 return &guest->vcpus[vcpuid].state; 607 } 608 609 static void *get_vcpu_ptr(SpaprMachineStateNestedGuest *guest, 610 target_ulong vcpuid) 611 { 612 assert(spapr_nested_vcpu_check(guest, vcpuid, false)); 613 return &guest->vcpus[vcpuid]; 614 } 615 616 static void *get_guest_ptr(SpaprMachineStateNestedGuest *guest, 617 target_ulong vcpuid) 618 { 619 return guest; /* for GSBE_NESTED */ 620 } 621 622 /* 623 * set=1 means the L1 is trying to set some state 624 * set=0 means the L1 is trying to get some state 625 */ 626 static void copy_state_8to8(void *a, void *b, bool set) 627 { 628 /* set takes from the Big endian element_buf and sets internal buffer */ 629 630 if (set) { 631 *(uint64_t *)a = be64_to_cpu(*(uint64_t *)b); 632 } else { 633 *(uint64_t *)b = cpu_to_be64(*(uint64_t *)a); 634 } 635 } 636 637 static void copy_state_4to4(void *a, void *b, bool set) 638 { 639 if (set) { 640 *(uint32_t *)a = be32_to_cpu(*(uint32_t *)b); 641 } else { 642 *(uint32_t *)b = cpu_to_be32(*((uint32_t *)a)); 643 } 644 } 645 646 static void copy_state_16to16(void *a, void *b, bool set) 647 { 648 uint64_t *src, *dst; 649 650 if (set) { 651 src = b; 652 dst = a; 653 654 dst[1] = be64_to_cpu(src[0]); 655 dst[0] = be64_to_cpu(src[1]); 656 } else { 657 src = a; 658 dst = b; 659 660 dst[1] = cpu_to_be64(src[0]); 661 dst[0] = cpu_to_be64(src[1]); 662 } 663 } 664 665 static void copy_state_4to8(void *a, void *b, bool set) 666 { 667 if (set) { 668 *(uint64_t *)a = (uint64_t) be32_to_cpu(*(uint32_t *)b); 669 } else { 670 *(uint32_t *)b = cpu_to_be32((uint32_t) (*((uint64_t *)a))); 671 } 672 } 673 674 static void copy_state_pagetbl(void *a, void *b, bool set) 675 { 676 uint64_t *pagetbl; 677 uint64_t *buf; /* 3 double words */ 678 uint64_t rts; 679 680 assert(set); 681 682 pagetbl = a; 683 buf = b; 684 685 *pagetbl = be64_to_cpu(buf[0]); 686 /* as per ISA section 6.7.6.1 */ 687 *pagetbl |= PATE0_HR; /* Host Radix bit is 1 */ 688 689 /* RTS */ 690 rts = be64_to_cpu(buf[1]); 691 assert(rts == 52); 692 rts = rts - 31; /* since radix tree size = 2^(RTS+31) */ 693 *pagetbl |= ((rts & 0x7) << 5); /* RTS2 is bit 56:58 */ 694 *pagetbl |= (((rts >> 3) & 0x3) << 61); /* RTS1 is bit 1:2 */ 695 696 /* RPDS {Size = 2^(RPDS+3) , RPDS >=5} */ 697 *pagetbl |= 63 - clz64(be64_to_cpu(buf[2])) - 3; 698 } 699 700 static void copy_state_proctbl(void *a, void *b, bool set) 701 { 702 uint64_t *proctbl; 703 uint64_t *buf; /* 2 double words */ 704 705 assert(set); 706 707 proctbl = a; 708 buf = b; 709 /* PRTB: Process Table Base */ 710 *proctbl = be64_to_cpu(buf[0]); 711 /* PRTS: Process Table Size = 2^(12+PRTS) */ 712 if (be64_to_cpu(buf[1]) == (1ULL << 12)) { 713 *proctbl |= 0; 714 } else if (be64_to_cpu(buf[1]) == (1ULL << 24)) { 715 *proctbl |= 12; 716 } else { 717 g_assert_not_reached(); 718 } 719 } 720 721 static void copy_state_runbuf(void *a, void *b, bool set) 722 { 723 uint64_t *buf; /* 2 double words */ 724 struct SpaprMachineStateNestedGuestVcpuRunBuf *runbuf; 725 726 assert(set); 727 728 runbuf = a; 729 buf = b; 730 731 runbuf->addr = be64_to_cpu(buf[0]); 732 assert(runbuf->addr); 733 734 /* per spec */ 735 assert(be64_to_cpu(buf[1]) <= 16384); 736 737 /* 738 * This will also hit in the input buffer but should be fine for 739 * now. If not we can split this function. 740 */ 741 assert(be64_to_cpu(buf[1]) >= VCPU_OUT_BUF_MIN_SZ); 742 743 runbuf->size = be64_to_cpu(buf[1]); 744 } 745 746 /* tell the L1 how big we want the output vcpu run buffer */ 747 static void out_buf_min_size(void *a, void *b, bool set) 748 { 749 uint64_t *buf; /* 1 double word */ 750 751 assert(!set); 752 753 buf = b; 754 755 buf[0] = cpu_to_be64(VCPU_OUT_BUF_MIN_SZ); 756 } 757 758 static void copy_logical_pvr(void *a, void *b, bool set) 759 { 760 SpaprMachineStateNestedGuest *guest; 761 uint32_t *buf; /* 1 word */ 762 uint32_t *pvr_logical_ptr; 763 uint32_t pvr_logical; 764 target_ulong pcr = 0; 765 766 pvr_logical_ptr = a; 767 buf = b; 768 769 if (!set) { 770 buf[0] = cpu_to_be32(*pvr_logical_ptr); 771 return; 772 } 773 774 pvr_logical = be32_to_cpu(buf[0]); 775 776 *pvr_logical_ptr = pvr_logical; 777 778 if (*pvr_logical_ptr) { 779 switch (*pvr_logical_ptr) { 780 case CPU_POWERPC_LOGICAL_3_10: 781 pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00; 782 break; 783 case CPU_POWERPC_LOGICAL_3_00: 784 pcr = PCR_COMPAT_3_00; 785 break; 786 default: 787 qemu_log_mask(LOG_GUEST_ERROR, 788 "Could not set PCR for LPVR=0x%08x\n", 789 *pvr_logical_ptr); 790 return; 791 } 792 } 793 794 guest = container_of(pvr_logical_ptr, 795 struct SpaprMachineStateNestedGuest, 796 pvr_logical); 797 for (int i = 0; i < guest->nr_vcpus; i++) { 798 guest->vcpus[i].state.pcr = ~pcr | HVMASK_PCR; 799 } 800 } 801 802 static void copy_tb_offset(void *a, void *b, bool set) 803 { 804 SpaprMachineStateNestedGuest *guest; 805 uint64_t *buf; /* 1 double word */ 806 uint64_t *tb_offset_ptr; 807 uint64_t tb_offset; 808 809 tb_offset_ptr = a; 810 buf = b; 811 812 if (!set) { 813 buf[0] = cpu_to_be64(*tb_offset_ptr); 814 return; 815 } 816 817 tb_offset = be64_to_cpu(buf[0]); 818 /* need to copy this to the individual tb_offset for each vcpu */ 819 guest = container_of(tb_offset_ptr, 820 struct SpaprMachineStateNestedGuest, 821 tb_offset); 822 for (int i = 0; i < guest->nr_vcpus; i++) { 823 guest->vcpus[i].tb_offset = tb_offset; 824 } 825 } 826 827 static void copy_state_hdecr(void *a, void *b, bool set) 828 { 829 uint64_t *buf; /* 1 double word */ 830 uint64_t *hdecr_expiry_tb; 831 832 hdecr_expiry_tb = a; 833 buf = b; 834 835 if (!set) { 836 buf[0] = cpu_to_be64(*hdecr_expiry_tb); 837 return; 838 } 839 840 *hdecr_expiry_tb = be64_to_cpu(buf[0]); 841 } 842 843 struct guest_state_element_type guest_state_element_types[] = { 844 GUEST_STATE_ELEMENT_NOP(GSB_HV_VCPU_IGNORED_ID, 0), 845 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR0, gpr[0]), 846 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR1, gpr[1]), 847 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR2, gpr[2]), 848 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR3, gpr[3]), 849 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR4, gpr[4]), 850 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR5, gpr[5]), 851 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR6, gpr[6]), 852 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR7, gpr[7]), 853 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR8, gpr[8]), 854 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR9, gpr[9]), 855 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR10, gpr[10]), 856 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR11, gpr[11]), 857 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR12, gpr[12]), 858 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR13, gpr[13]), 859 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR14, gpr[14]), 860 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR15, gpr[15]), 861 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR16, gpr[16]), 862 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR17, gpr[17]), 863 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR18, gpr[18]), 864 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR19, gpr[19]), 865 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR20, gpr[20]), 866 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR21, gpr[21]), 867 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR22, gpr[22]), 868 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR23, gpr[23]), 869 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR24, gpr[24]), 870 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR25, gpr[25]), 871 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR26, gpr[26]), 872 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR27, gpr[27]), 873 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR28, gpr[28]), 874 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR29, gpr[29]), 875 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR30, gpr[30]), 876 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR31, gpr[31]), 877 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_NIA, nip), 878 GSE_ENV_DWM(GSB_VCPU_SPR_MSR, msr, HVMASK_MSR), 879 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTR, ctr), 880 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_LR, lr), 881 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_XER, xer), 882 GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_CR, cr), 883 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_MMCR3), 884 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER2), 885 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER3), 886 GUEST_STATE_ELEMENT_NOP_W(GSB_VCPU_SPR_WORT), 887 GSE_ENV_DWM(GSB_VCPU_SPR_LPCR, lpcr, HVMASK_LPCR), 888 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMOR, amor), 889 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HFSCR, hfscr), 890 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR0, dawr0), 891 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX0, dawrx0), 892 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CIABR, ciabr), 893 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PURR, purr), 894 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPURR, spurr), 895 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IC, ic), 896 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_VTB, vtb), 897 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HDAR, hdar), 898 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HDSISR, hdsisr), 899 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HEIR, heir), 900 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_ASDR, asdr), 901 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR0, srr0), 902 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR1, srr1), 903 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG0, sprg0), 904 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG1, sprg1), 905 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG2, sprg2), 906 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG3, sprg3), 907 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PIDR, pidr), 908 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CFAR, cfar), 909 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PPR, ppr), 910 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR1, dawr1), 911 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX1, dawrx1), 912 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DEXCR, dexcr), 913 GSE_ENV_DWM(GSB_VCPU_SPR_HDEXCR, hdexcr, HVMASK_HDEXCR), 914 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHKEYR, hashkeyr), 915 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHPKEYR, hashpkeyr), 916 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR0, vsr[0]), 917 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR1, vsr[1]), 918 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR2, vsr[2]), 919 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR3, vsr[3]), 920 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR4, vsr[4]), 921 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR5, vsr[5]), 922 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR6, vsr[6]), 923 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR7, vsr[7]), 924 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR8, vsr[8]), 925 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR9, vsr[9]), 926 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR10, vsr[10]), 927 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR11, vsr[11]), 928 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR12, vsr[12]), 929 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR13, vsr[13]), 930 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR14, vsr[14]), 931 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR15, vsr[15]), 932 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR16, vsr[16]), 933 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR17, vsr[17]), 934 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR18, vsr[18]), 935 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR19, vsr[19]), 936 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR20, vsr[20]), 937 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR21, vsr[21]), 938 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR22, vsr[22]), 939 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR23, vsr[23]), 940 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR24, vsr[24]), 941 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR25, vsr[25]), 942 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR26, vsr[26]), 943 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR27, vsr[27]), 944 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR28, vsr[28]), 945 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR29, vsr[29]), 946 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR30, vsr[30]), 947 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR31, vsr[31]), 948 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR32, vsr[32]), 949 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR33, vsr[33]), 950 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR34, vsr[34]), 951 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR35, vsr[35]), 952 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR36, vsr[36]), 953 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR37, vsr[37]), 954 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR38, vsr[38]), 955 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR39, vsr[39]), 956 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR40, vsr[40]), 957 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR41, vsr[41]), 958 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR42, vsr[42]), 959 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR43, vsr[43]), 960 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR44, vsr[44]), 961 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR45, vsr[45]), 962 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR46, vsr[46]), 963 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR47, vsr[47]), 964 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR48, vsr[48]), 965 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR49, vsr[49]), 966 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR50, vsr[50]), 967 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR51, vsr[51]), 968 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR52, vsr[52]), 969 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR53, vsr[53]), 970 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR54, vsr[54]), 971 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR55, vsr[55]), 972 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR56, vsr[56]), 973 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR57, vsr[57]), 974 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR58, vsr[58]), 975 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR59, vsr[59]), 976 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR60, vsr[60]), 977 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR61, vsr[61]), 978 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR62, vsr[62]), 979 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR63, vsr[63]), 980 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBHR, ebbhr), 981 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_TAR, tar), 982 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBRR, ebbrr), 983 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_BESCR, bescr), 984 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IAMR, iamr), 985 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMR, amr), 986 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_UAMOR, uamor), 987 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DSCR, dscr), 988 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr), 989 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb), 990 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl), 991 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave), 992 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar), 993 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr), 994 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC1, pmc1), 995 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC2, pmc2), 996 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC3, pmc3), 997 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC4, pmc4), 998 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC5, pmc5), 999 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC6, pmc6), 1000 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR0, mmcr0), 1001 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR1, mmcr1), 1002 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR2, mmcr2), 1003 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCRA, mmcra), 1004 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SDAR , sdar), 1005 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIAR , siar), 1006 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIER , sier), 1007 GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_VSCR, vscr), 1008 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FPSCR, fpscr), 1009 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_DEC_EXPIRE_TB, dec_expiry_tb), 1010 GSBE_NESTED(GSB_PART_SCOPED_PAGETBL, 0x18, parttbl[0], copy_state_pagetbl), 1011 GSBE_NESTED(GSB_PROCESS_TBL, 0x10, parttbl[1], copy_state_proctbl), 1012 GSBE_NESTED(GSB_VCPU_LPVR, 0x4, pvr_logical, copy_logical_pvr), 1013 GSBE_NESTED_MSK(GSB_TB_OFFSET, 0x8, tb_offset, copy_tb_offset, 1014 HVMASK_TB_OFFSET), 1015 GSBE_NESTED_VCPU(GSB_VCPU_IN_BUFFER, 0x10, runbufin, copy_state_runbuf), 1016 GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf), 1017 GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size), 1018 GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb, 1019 copy_state_hdecr) 1020 }; 1021 1022 void spapr_nested_gsb_init(void) 1023 { 1024 struct guest_state_element_type *type; 1025 1026 /* Init the guest state elements lookup table, flags for now */ 1027 for (int i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) { 1028 type = &guest_state_element_types[i]; 1029 1030 assert(type->id <= GSB_LAST); 1031 if (type->id >= GSB_VCPU_SPR_HDAR) 1032 /* 0xf000 - 0xf005 Thread + RO */ 1033 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY; 1034 else if (type->id >= GSB_VCPU_IN_BUFFER) 1035 /* 0x0c00 - 0xf000 Thread + RW */ 1036 type->flags = 0; 1037 else if (type->id >= GSB_VCPU_LPVR) 1038 /* 0x0003 - 0x0bff Guest + RW */ 1039 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; 1040 else if (type->id >= GSB_HV_VCPU_STATE_SIZE) 1041 /* 0x0001 - 0x0002 Guest + RO */ 1042 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY | 1043 GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; 1044 } 1045 } 1046 1047 static struct guest_state_element *guest_state_element_next( 1048 struct guest_state_element *element, 1049 int64_t *len, 1050 int64_t *num_elements) 1051 { 1052 uint16_t size; 1053 1054 /* size is of element->value[] only. Not whole guest_state_element */ 1055 size = be16_to_cpu(element->size); 1056 1057 if (len) { 1058 *len -= size + offsetof(struct guest_state_element, value); 1059 } 1060 1061 if (num_elements) { 1062 *num_elements -= 1; 1063 } 1064 1065 return (struct guest_state_element *)(element->value + size); 1066 } 1067 1068 static 1069 struct guest_state_element_type *guest_state_element_type_find(uint16_t id) 1070 { 1071 int i; 1072 1073 for (i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) 1074 if (id == guest_state_element_types[i].id) { 1075 return &guest_state_element_types[i]; 1076 } 1077 1078 return NULL; 1079 } 1080 1081 static void log_element(struct guest_state_element *element, 1082 struct guest_state_request *gsr) 1083 { 1084 qemu_log_mask(LOG_GUEST_ERROR, "h_guest_%s_state id:0x%04x size:0x%04x", 1085 gsr->flags & GUEST_STATE_REQUEST_SET ? "set" : "get", 1086 be16_to_cpu(element->id), be16_to_cpu(element->size)); 1087 qemu_log_mask(LOG_GUEST_ERROR, "buf:0x%016"PRIx64" ...\n", 1088 be64_to_cpu(*(uint64_t *)element->value)); 1089 } 1090 1091 static bool guest_state_request_check(struct guest_state_request *gsr) 1092 { 1093 int64_t num_elements, len = gsr->len; 1094 struct guest_state_buffer *gsb = gsr->gsb; 1095 struct guest_state_element *element; 1096 struct guest_state_element_type *type; 1097 uint16_t id, size; 1098 1099 /* gsb->num_elements = 0 == 32 bits long */ 1100 assert(len >= 4); 1101 1102 num_elements = be32_to_cpu(gsb->num_elements); 1103 element = gsb->elements; 1104 len -= sizeof(gsb->num_elements); 1105 1106 /* Walk the buffer to validate the length */ 1107 while (num_elements) { 1108 1109 id = be16_to_cpu(element->id); 1110 size = be16_to_cpu(element->size); 1111 1112 if (false) { 1113 log_element(element, gsr); 1114 } 1115 /* buffer size too small */ 1116 if (len < 0) { 1117 return false; 1118 } 1119 1120 type = guest_state_element_type_find(id); 1121 if (!type) { 1122 qemu_log_mask(LOG_GUEST_ERROR, "Element ID %04x unknown\n", id); 1123 log_element(element, gsr); 1124 return false; 1125 } 1126 1127 if (id == GSB_HV_VCPU_IGNORED_ID) { 1128 goto next_element; 1129 } 1130 1131 if (size != type->size) { 1132 qemu_log_mask(LOG_GUEST_ERROR, "Size mismatch. Element ID:%04x." 1133 "Size Exp:%i Got:%i\n", id, type->size, size); 1134 log_element(element, gsr); 1135 return false; 1136 } 1137 1138 if ((type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY) && 1139 (gsr->flags & GUEST_STATE_REQUEST_SET)) { 1140 qemu_log_mask(LOG_GUEST_ERROR, "Trying to set a read-only Element " 1141 "ID:%04x.\n", id); 1142 return false; 1143 } 1144 1145 if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) { 1146 /* guest wide element type */ 1147 if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) { 1148 qemu_log_mask(LOG_GUEST_ERROR, "trying to set a guest wide " 1149 "Element ID:%04x.\n", id); 1150 return false; 1151 } 1152 } else { 1153 /* thread wide element type */ 1154 if (gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE) { 1155 qemu_log_mask(LOG_GUEST_ERROR, "trying to set a thread wide " 1156 "Element ID:%04x.\n", id); 1157 return false; 1158 } 1159 } 1160 next_element: 1161 element = guest_state_element_next(element, &len, &num_elements); 1162 1163 } 1164 return true; 1165 } 1166 1167 static bool is_gsr_invalid(struct guest_state_request *gsr, 1168 struct guest_state_element *element, 1169 struct guest_state_element_type *type) 1170 { 1171 if ((gsr->flags & GUEST_STATE_REQUEST_SET) && 1172 (*(uint64_t *)(element->value) & ~(type->mask))) { 1173 log_element(element, gsr); 1174 qemu_log_mask(LOG_GUEST_ERROR, "L1 can't set reserved bits " 1175 "(allowed mask: 0x%08"PRIx64")\n", type->mask); 1176 return true; 1177 } 1178 return false; 1179 } 1180 1181 static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu, 1182 SpaprMachineState *spapr, 1183 target_ulong opcode, 1184 target_ulong *args) 1185 { 1186 CPUPPCState *env = &cpu->env; 1187 target_ulong flags = args[0]; 1188 1189 if (flags) { /* don't handle any flags capabilities for now */ 1190 return H_PARAMETER; 1191 } 1192 1193 /* P10 capabilities */ 1194 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, 1195 spapr->max_compat_pvr)) { 1196 env->gpr[4] |= H_GUEST_CAPABILITIES_P10_MODE; 1197 } 1198 1199 /* P9 capabilities */ 1200 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, 1201 spapr->max_compat_pvr)) { 1202 env->gpr[4] |= H_GUEST_CAPABILITIES_P9_MODE; 1203 } 1204 1205 return H_SUCCESS; 1206 } 1207 1208 static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu, 1209 SpaprMachineState *spapr, 1210 target_ulong opcode, 1211 target_ulong *args) 1212 { 1213 CPUPPCState *env = &cpu->env; 1214 target_ulong flags = args[0]; 1215 target_ulong capabilities = args[1]; 1216 env->gpr[4] = 0; 1217 1218 if (flags) { /* don't handle any flags capabilities for now */ 1219 return H_PARAMETER; 1220 } 1221 1222 if (capabilities & H_GUEST_CAPABILITIES_COPY_MEM) { 1223 env->gpr[4] = 1; 1224 return H_P2; /* isn't supported */ 1225 } 1226 1227 /* 1228 * If there are no capabilities configured, set the R5 to the index of 1229 * the first supported Power Processor Mode 1230 */ 1231 if (!capabilities) { 1232 env->gpr[4] = 1; 1233 1234 /* set R5 to the first supported Power Processor Mode */ 1235 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, 1236 spapr->max_compat_pvr)) { 1237 env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP; 1238 } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, 1239 spapr->max_compat_pvr)) { 1240 env->gpr[5] = H_GUEST_CAP_P9_MODE_BMAP; 1241 } 1242 1243 return H_P2; 1244 } 1245 1246 /* 1247 * If an invalid capability is set, R5 should contain the index of the 1248 * invalid capability bit 1249 */ 1250 if (capabilities & ~H_GUEST_CAP_VALID_MASK) { 1251 env->gpr[4] = 1; 1252 1253 /* Set R5 to the index of the invalid capability */ 1254 env->gpr[5] = 63 - ctz64(capabilities); 1255 1256 return H_P2; 1257 } 1258 1259 if (!spapr->nested.capabilities_set) { 1260 spapr->nested.capabilities_set = true; 1261 spapr->nested.pvr_base = env->spr[SPR_PVR]; 1262 return H_SUCCESS; 1263 } else { 1264 return H_STATE; 1265 } 1266 } 1267 1268 static void 1269 destroy_guest_helper(gpointer value) 1270 { 1271 struct SpaprMachineStateNestedGuest *guest = value; 1272 g_free(guest->vcpus); 1273 g_free(guest); 1274 } 1275 1276 static target_ulong h_guest_create(PowerPCCPU *cpu, 1277 SpaprMachineState *spapr, 1278 target_ulong opcode, 1279 target_ulong *args) 1280 { 1281 CPUPPCState *env = &cpu->env; 1282 target_ulong flags = args[0]; 1283 target_ulong continue_token = args[1]; 1284 uint64_t guestid; 1285 int nguests = 0; 1286 struct SpaprMachineStateNestedGuest *guest; 1287 1288 if (flags) { /* don't handle any flags for now */ 1289 return H_UNSUPPORTED_FLAG; 1290 } 1291 1292 if (continue_token != -1) { 1293 return H_P2; 1294 } 1295 1296 if (!spapr->nested.capabilities_set) { 1297 return H_STATE; 1298 } 1299 1300 if (!spapr->nested.guests) { 1301 spapr->nested.guests = g_hash_table_new_full(NULL, 1302 NULL, 1303 NULL, 1304 destroy_guest_helper); 1305 } 1306 1307 nguests = g_hash_table_size(spapr->nested.guests); 1308 1309 if (nguests == PAPR_NESTED_GUEST_MAX) { 1310 return H_NO_MEM; 1311 } 1312 1313 /* Lookup for available guestid */ 1314 for (guestid = 1; guestid < PAPR_NESTED_GUEST_MAX; guestid++) { 1315 if (!(g_hash_table_lookup(spapr->nested.guests, 1316 GINT_TO_POINTER(guestid)))) { 1317 break; 1318 } 1319 } 1320 1321 if (guestid == PAPR_NESTED_GUEST_MAX) { 1322 return H_NO_MEM; 1323 } 1324 1325 guest = g_try_new0(struct SpaprMachineStateNestedGuest, 1); 1326 if (!guest) { 1327 return H_NO_MEM; 1328 } 1329 1330 guest->pvr_logical = spapr->nested.pvr_base; 1331 g_hash_table_insert(spapr->nested.guests, GINT_TO_POINTER(guestid), guest); 1332 env->gpr[4] = guestid; 1333 1334 return H_SUCCESS; 1335 } 1336 1337 static target_ulong h_guest_delete(PowerPCCPU *cpu, 1338 SpaprMachineState *spapr, 1339 target_ulong opcode, 1340 target_ulong *args) 1341 { 1342 target_ulong flags = args[0]; 1343 target_ulong guestid = args[1]; 1344 struct SpaprMachineStateNestedGuest *guest; 1345 1346 /* 1347 * handle flag deleteAllGuests, if set: 1348 * guestid is ignored and all guests are deleted 1349 * 1350 */ 1351 if (flags & ~H_GUEST_DELETE_ALL_FLAG) { 1352 return H_UNSUPPORTED_FLAG; /* other flag bits reserved */ 1353 } else if (flags & H_GUEST_DELETE_ALL_FLAG) { 1354 g_hash_table_destroy(spapr->nested.guests); 1355 return H_SUCCESS; 1356 } 1357 1358 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); 1359 if (!guest) { 1360 return H_P2; 1361 } 1362 1363 g_hash_table_remove(spapr->nested.guests, GINT_TO_POINTER(guestid)); 1364 1365 return H_SUCCESS; 1366 } 1367 1368 static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu, 1369 SpaprMachineState *spapr, 1370 target_ulong opcode, 1371 target_ulong *args) 1372 { 1373 target_ulong flags = args[0]; 1374 target_ulong guestid = args[1]; 1375 target_ulong vcpuid = args[2]; 1376 SpaprMachineStateNestedGuest *guest; 1377 1378 if (flags) { /* don't handle any flags for now */ 1379 return H_UNSUPPORTED_FLAG; 1380 } 1381 1382 guest = spapr_get_nested_guest(spapr, guestid); 1383 if (!guest) { 1384 return H_P2; 1385 } 1386 1387 if (vcpuid < guest->nr_vcpus) { 1388 qemu_log_mask(LOG_UNIMP, "vcpuid " TARGET_FMT_ld " already in use.", 1389 vcpuid); 1390 return H_IN_USE; 1391 } 1392 /* linear vcpuid allocation only */ 1393 assert(vcpuid == guest->nr_vcpus); 1394 1395 if (guest->nr_vcpus >= PAPR_NESTED_GUEST_VCPU_MAX) { 1396 return H_P3; 1397 } 1398 1399 SpaprMachineStateNestedGuestVcpu *vcpus, *curr_vcpu; 1400 vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu, 1401 guest->vcpus, 1402 guest->nr_vcpus + 1); 1403 if (!vcpus) { 1404 return H_NO_MEM; 1405 } 1406 guest->vcpus = vcpus; 1407 curr_vcpu = &vcpus[guest->nr_vcpus]; 1408 memset(curr_vcpu, 0, sizeof(SpaprMachineStateNestedGuestVcpu)); 1409 1410 curr_vcpu->enabled = true; 1411 guest->nr_vcpus++; 1412 1413 return H_SUCCESS; 1414 } 1415 1416 static target_ulong getset_state(SpaprMachineStateNestedGuest *guest, 1417 uint64_t vcpuid, 1418 struct guest_state_request *gsr) 1419 { 1420 void *ptr; 1421 uint16_t id; 1422 struct guest_state_element *element; 1423 struct guest_state_element_type *type; 1424 int64_t lenleft, num_elements; 1425 1426 lenleft = gsr->len; 1427 1428 if (!guest_state_request_check(gsr)) { 1429 return H_P3; 1430 } 1431 1432 num_elements = be32_to_cpu(gsr->gsb->num_elements); 1433 element = gsr->gsb->elements; 1434 /* Process the elements */ 1435 while (num_elements) { 1436 type = NULL; 1437 /* log_element(element, gsr); */ 1438 1439 id = be16_to_cpu(element->id); 1440 if (id == GSB_HV_VCPU_IGNORED_ID) { 1441 goto next_element; 1442 } 1443 1444 type = guest_state_element_type_find(id); 1445 assert(type); 1446 1447 /* Get pointer to guest data to get/set */ 1448 if (type->location && type->copy) { 1449 ptr = type->location(guest, vcpuid); 1450 assert(ptr); 1451 if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) { 1452 return H_INVALID_ELEMENT_VALUE; 1453 } 1454 type->copy(ptr + type->offset, element->value, 1455 gsr->flags & GUEST_STATE_REQUEST_SET ? true : false); 1456 } 1457 1458 next_element: 1459 element = guest_state_element_next(element, &lenleft, &num_elements); 1460 } 1461 1462 return H_SUCCESS; 1463 } 1464 1465 static target_ulong map_and_getset_state(PowerPCCPU *cpu, 1466 SpaprMachineStateNestedGuest *guest, 1467 uint64_t vcpuid, 1468 struct guest_state_request *gsr) 1469 { 1470 target_ulong rc; 1471 int64_t len; 1472 bool is_write; 1473 1474 len = gsr->len; 1475 /* only get_state would require write access to the provided buffer */ 1476 is_write = (gsr->flags & GUEST_STATE_REQUEST_SET) ? false : true; 1477 gsr->gsb = address_space_map(CPU(cpu)->as, gsr->buf, (uint64_t *)&len, 1478 is_write, MEMTXATTRS_UNSPECIFIED); 1479 if (!gsr->gsb) { 1480 rc = H_P3; 1481 goto out1; 1482 } 1483 1484 if (len != gsr->len) { 1485 rc = H_P3; 1486 goto out1; 1487 } 1488 1489 rc = getset_state(guest, vcpuid, gsr); 1490 1491 out1: 1492 address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len); 1493 return rc; 1494 } 1495 1496 static target_ulong h_guest_getset_state(PowerPCCPU *cpu, 1497 SpaprMachineState *spapr, 1498 target_ulong *args, 1499 bool set) 1500 { 1501 target_ulong flags = args[0]; 1502 target_ulong lpid = args[1]; 1503 target_ulong vcpuid = args[2]; 1504 target_ulong buf = args[3]; 1505 target_ulong buflen = args[4]; 1506 struct guest_state_request gsr; 1507 SpaprMachineStateNestedGuest *guest; 1508 1509 guest = spapr_get_nested_guest(spapr, lpid); 1510 if (!guest) { 1511 return H_P2; 1512 } 1513 gsr.buf = buf; 1514 assert(buflen <= GSB_MAX_BUF_SIZE); 1515 gsr.len = buflen; 1516 gsr.flags = 0; 1517 if (flags & H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { 1518 gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE; 1519 } 1520 if (flags & !H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { 1521 return H_PARAMETER; /* flag not supported yet */ 1522 } 1523 1524 if (set) { 1525 gsr.flags |= GUEST_STATE_REQUEST_SET; 1526 } 1527 return map_and_getset_state(cpu, guest, vcpuid, &gsr); 1528 } 1529 1530 static target_ulong h_guest_set_state(PowerPCCPU *cpu, 1531 SpaprMachineState *spapr, 1532 target_ulong opcode, 1533 target_ulong *args) 1534 { 1535 return h_guest_getset_state(cpu, spapr, args, true); 1536 } 1537 1538 static target_ulong h_guest_get_state(PowerPCCPU *cpu, 1539 SpaprMachineState *spapr, 1540 target_ulong opcode, 1541 target_ulong *args) 1542 { 1543 return h_guest_getset_state(cpu, spapr, args, false); 1544 } 1545 1546 void spapr_register_nested_hv(void) 1547 { 1548 spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl); 1549 spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested); 1550 spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate); 1551 spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest); 1552 } 1553 1554 void spapr_unregister_nested_hv(void) 1555 { 1556 spapr_unregister_hypercall(KVMPPC_H_SET_PARTITION_TABLE); 1557 spapr_unregister_hypercall(KVMPPC_H_ENTER_NESTED); 1558 spapr_unregister_hypercall(KVMPPC_H_TLB_INVALIDATE); 1559 spapr_unregister_hypercall(KVMPPC_H_COPY_TOFROM_GUEST); 1560 } 1561 1562 void spapr_register_nested_papr(void) 1563 { 1564 spapr_register_hypercall(H_GUEST_GET_CAPABILITIES, 1565 h_guest_get_capabilities); 1566 spapr_register_hypercall(H_GUEST_SET_CAPABILITIES, 1567 h_guest_set_capabilities); 1568 spapr_register_hypercall(H_GUEST_CREATE, h_guest_create); 1569 spapr_register_hypercall(H_GUEST_DELETE, h_guest_delete); 1570 spapr_register_hypercall(H_GUEST_CREATE_VCPU, h_guest_create_vcpu); 1571 spapr_register_hypercall(H_GUEST_SET_STATE, h_guest_set_state); 1572 spapr_register_hypercall(H_GUEST_GET_STATE, h_guest_get_state); 1573 } 1574 1575 void spapr_unregister_nested_papr(void) 1576 { 1577 spapr_unregister_hypercall(H_GUEST_GET_CAPABILITIES); 1578 spapr_unregister_hypercall(H_GUEST_SET_CAPABILITIES); 1579 spapr_unregister_hypercall(H_GUEST_CREATE); 1580 spapr_unregister_hypercall(H_GUEST_DELETE); 1581 spapr_unregister_hypercall(H_GUEST_CREATE_VCPU); 1582 spapr_unregister_hypercall(H_GUEST_SET_STATE); 1583 spapr_unregister_hypercall(H_GUEST_GET_STATE); 1584 } 1585 1586 #else 1587 void spapr_exit_nested(PowerPCCPU *cpu, int excp) 1588 { 1589 g_assert_not_reached(); 1590 } 1591 1592 void spapr_register_nested_hv(void) 1593 { 1594 /* DO NOTHING */ 1595 } 1596 1597 void spapr_unregister_nested_hv(void) 1598 { 1599 /* DO NOTHING */ 1600 } 1601 1602 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, 1603 target_ulong lpid, ppc_v3_pate_t *entry) 1604 { 1605 return false; 1606 } 1607 1608 bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, 1609 target_ulong lpid, ppc_v3_pate_t *entry) 1610 { 1611 return false; 1612 } 1613 1614 void spapr_register_nested_papr(void) 1615 { 1616 /* DO NOTHING */ 1617 } 1618 1619 void spapr_unregister_nested_papr(void) 1620 { 1621 /* DO NOTHING */ 1622 } 1623 1624 void spapr_nested_gsb_init(void) 1625 { 1626 /* DO NOTHING */ 1627 } 1628 1629 #endif 1630