1 #include "qemu/osdep.h" 2 #include "qemu/cutils.h" 3 #include "exec/exec-all.h" 4 #include "helper_regs.h" 5 #include "hw/ppc/ppc.h" 6 #include "hw/ppc/spapr.h" 7 #include "hw/ppc/spapr_cpu_core.h" 8 #include "hw/ppc/spapr_nested.h" 9 #include "mmu-book3s-v3.h" 10 #include "cpu-models.h" 11 #include "qemu/log.h" 12 13 void spapr_nested_reset(SpaprMachineState *spapr) 14 { 15 if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { 16 spapr->nested.api = NESTED_API_KVM_HV; 17 spapr_unregister_nested_hv(); 18 spapr_register_nested_hv(); 19 } else { 20 spapr->nested.api = 0; 21 spapr->nested.capabilities_set = false; 22 spapr_nested_gsb_init(); 23 } 24 } 25 26 uint8_t spapr_nested_api(SpaprMachineState *spapr) 27 { 28 return spapr->nested.api; 29 } 30 31 #ifdef CONFIG_TCG 32 33 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, 34 target_ulong lpid, ppc_v3_pate_t *entry) 35 { 36 uint64_t patb, pats; 37 38 assert(lpid != 0); 39 40 patb = spapr->nested.ptcr & PTCR_PATB; 41 pats = spapr->nested.ptcr & PTCR_PATS; 42 43 /* Check if partition table is properly aligned */ 44 if (patb & MAKE_64BIT_MASK(0, pats + 12)) { 45 return false; 46 } 47 48 /* Calculate number of entries */ 49 pats = 1ull << (pats + 12 - 4); 50 if (pats <= lpid) { 51 return false; 52 } 53 54 /* Grab entry */ 55 patb += 16 * lpid; 56 entry->dw0 = ldq_phys(CPU(cpu)->as, patb); 57 entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8); 58 return true; 59 } 60 61 #define PRTS_MASK 0x1f 62 63 static target_ulong h_set_ptbl(PowerPCCPU *cpu, 64 SpaprMachineState *spapr, 65 target_ulong opcode, 66 target_ulong *args) 67 { 68 target_ulong ptcr = args[0]; 69 70 if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) { 71 return H_FUNCTION; 72 } 73 74 if ((ptcr & PRTS_MASK) + 12 - 4 > 12) { 75 return H_PARAMETER; 76 } 77 78 spapr->nested.ptcr = ptcr; /* Save new partition table */ 79 80 return H_SUCCESS; 81 } 82 83 static target_ulong h_tlb_invalidate(PowerPCCPU *cpu, 84 SpaprMachineState *spapr, 85 target_ulong opcode, 86 target_ulong *args) 87 { 88 /* 89 * The spapr virtual hypervisor nested HV implementation retains no L2 90 * translation state except for TLB. And the TLB is always invalidated 91 * across L1<->L2 transitions, so nothing is required here. 92 */ 93 94 return H_SUCCESS; 95 } 96 97 static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu, 98 SpaprMachineState *spapr, 99 target_ulong opcode, 100 target_ulong *args) 101 { 102 /* 103 * This HCALL is not required, L1 KVM will take a slow path and walk the 104 * page tables manually to do the data copy. 105 */ 106 return H_FUNCTION; 107 } 108 109 static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu) 110 { 111 CPUPPCState *env = &cpu->env; 112 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 113 114 memcpy(save->gpr, env->gpr, sizeof(save->gpr)); 115 116 save->lr = env->lr; 117 save->ctr = env->ctr; 118 save->cfar = env->cfar; 119 save->msr = env->msr; 120 save->nip = env->nip; 121 122 save->cr = ppc_get_cr(env); 123 save->xer = cpu_read_xer(env); 124 125 save->lpcr = env->spr[SPR_LPCR]; 126 save->lpidr = env->spr[SPR_LPIDR]; 127 save->pcr = env->spr[SPR_PCR]; 128 save->dpdes = env->spr[SPR_DPDES]; 129 save->hfscr = env->spr[SPR_HFSCR]; 130 save->srr0 = env->spr[SPR_SRR0]; 131 save->srr1 = env->spr[SPR_SRR1]; 132 save->sprg0 = env->spr[SPR_SPRG0]; 133 save->sprg1 = env->spr[SPR_SPRG1]; 134 save->sprg2 = env->spr[SPR_SPRG2]; 135 save->sprg3 = env->spr[SPR_SPRG3]; 136 save->pidr = env->spr[SPR_BOOKS_PID]; 137 save->ppr = env->spr[SPR_PPR]; 138 139 if (spapr_nested_api(spapr) == NESTED_API_PAPR) { 140 save->amor = env->spr[SPR_AMOR]; 141 save->dawr0 = env->spr[SPR_DAWR0]; 142 save->dawrx0 = env->spr[SPR_DAWRX0]; 143 save->ciabr = env->spr[SPR_CIABR]; 144 save->purr = env->spr[SPR_PURR]; 145 save->spurr = env->spr[SPR_SPURR]; 146 save->ic = env->spr[SPR_IC]; 147 save->vtb = env->spr[SPR_VTB]; 148 save->hdar = env->spr[SPR_HDAR]; 149 save->hdsisr = env->spr[SPR_HDSISR]; 150 save->heir = env->spr[SPR_HEIR]; 151 save->asdr = env->spr[SPR_ASDR]; 152 save->dawr1 = env->spr[SPR_DAWR1]; 153 save->dawrx1 = env->spr[SPR_DAWRX1]; 154 save->dexcr = env->spr[SPR_DEXCR]; 155 save->hdexcr = env->spr[SPR_HDEXCR]; 156 save->hashkeyr = env->spr[SPR_HASHKEYR]; 157 save->hashpkeyr = env->spr[SPR_HASHPKEYR]; 158 memcpy(save->vsr, env->vsr, sizeof(save->vsr)); 159 save->ebbhr = env->spr[SPR_EBBHR]; 160 save->tar = env->spr[SPR_TAR]; 161 save->ebbrr = env->spr[SPR_EBBRR]; 162 save->bescr = env->spr[SPR_BESCR]; 163 save->iamr = env->spr[SPR_IAMR]; 164 save->amr = env->spr[SPR_AMR]; 165 save->uamor = env->spr[SPR_UAMOR]; 166 save->dscr = env->spr[SPR_DSCR]; 167 save->fscr = env->spr[SPR_FSCR]; 168 save->pspb = env->spr[SPR_PSPB]; 169 save->ctrl = env->spr[SPR_CTRL]; 170 save->vrsave = env->spr[SPR_VRSAVE]; 171 save->dar = env->spr[SPR_DAR]; 172 save->dsisr = env->spr[SPR_DSISR]; 173 save->pmc1 = env->spr[SPR_POWER_PMC1]; 174 save->pmc2 = env->spr[SPR_POWER_PMC2]; 175 save->pmc3 = env->spr[SPR_POWER_PMC3]; 176 save->pmc4 = env->spr[SPR_POWER_PMC4]; 177 save->pmc5 = env->spr[SPR_POWER_PMC5]; 178 save->pmc6 = env->spr[SPR_POWER_PMC6]; 179 save->mmcr0 = env->spr[SPR_POWER_MMCR0]; 180 save->mmcr1 = env->spr[SPR_POWER_MMCR1]; 181 save->mmcr2 = env->spr[SPR_POWER_MMCR2]; 182 save->mmcra = env->spr[SPR_POWER_MMCRA]; 183 save->sdar = env->spr[SPR_POWER_SDAR]; 184 save->siar = env->spr[SPR_POWER_SIAR]; 185 save->sier = env->spr[SPR_POWER_SIER]; 186 save->vscr = ppc_get_vscr(env); 187 save->fpscr = env->fpscr; 188 } 189 190 save->tb_offset = env->tb_env->tb_offset; 191 } 192 193 static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load) 194 { 195 CPUState *cs = CPU(cpu); 196 CPUPPCState *env = &cpu->env; 197 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 198 199 memcpy(env->gpr, load->gpr, sizeof(env->gpr)); 200 201 env->lr = load->lr; 202 env->ctr = load->ctr; 203 env->cfar = load->cfar; 204 env->msr = load->msr; 205 env->nip = load->nip; 206 207 ppc_set_cr(env, load->cr); 208 cpu_write_xer(env, load->xer); 209 210 env->spr[SPR_LPCR] = load->lpcr; 211 env->spr[SPR_LPIDR] = load->lpidr; 212 env->spr[SPR_PCR] = load->pcr; 213 env->spr[SPR_DPDES] = load->dpdes; 214 env->spr[SPR_HFSCR] = load->hfscr; 215 env->spr[SPR_SRR0] = load->srr0; 216 env->spr[SPR_SRR1] = load->srr1; 217 env->spr[SPR_SPRG0] = load->sprg0; 218 env->spr[SPR_SPRG1] = load->sprg1; 219 env->spr[SPR_SPRG2] = load->sprg2; 220 env->spr[SPR_SPRG3] = load->sprg3; 221 env->spr[SPR_BOOKS_PID] = load->pidr; 222 env->spr[SPR_PPR] = load->ppr; 223 224 if (spapr_nested_api(spapr) == NESTED_API_PAPR) { 225 env->spr[SPR_AMOR] = load->amor; 226 env->spr[SPR_DAWR0] = load->dawr0; 227 env->spr[SPR_DAWRX0] = load->dawrx0; 228 env->spr[SPR_CIABR] = load->ciabr; 229 env->spr[SPR_PURR] = load->purr; 230 env->spr[SPR_SPURR] = load->purr; 231 env->spr[SPR_IC] = load->ic; 232 env->spr[SPR_VTB] = load->vtb; 233 env->spr[SPR_HDAR] = load->hdar; 234 env->spr[SPR_HDSISR] = load->hdsisr; 235 env->spr[SPR_HEIR] = load->heir; 236 env->spr[SPR_ASDR] = load->asdr; 237 env->spr[SPR_DAWR1] = load->dawr1; 238 env->spr[SPR_DAWRX1] = load->dawrx1; 239 env->spr[SPR_DEXCR] = load->dexcr; 240 env->spr[SPR_HDEXCR] = load->hdexcr; 241 env->spr[SPR_HASHKEYR] = load->hashkeyr; 242 env->spr[SPR_HASHPKEYR] = load->hashpkeyr; 243 memcpy(env->vsr, load->vsr, sizeof(env->vsr)); 244 env->spr[SPR_EBBHR] = load->ebbhr; 245 env->spr[SPR_TAR] = load->tar; 246 env->spr[SPR_EBBRR] = load->ebbrr; 247 env->spr[SPR_BESCR] = load->bescr; 248 env->spr[SPR_IAMR] = load->iamr; 249 env->spr[SPR_AMR] = load->amr; 250 env->spr[SPR_UAMOR] = load->uamor; 251 env->spr[SPR_DSCR] = load->dscr; 252 env->spr[SPR_FSCR] = load->fscr; 253 env->spr[SPR_PSPB] = load->pspb; 254 env->spr[SPR_CTRL] = load->ctrl; 255 env->spr[SPR_VRSAVE] = load->vrsave; 256 env->spr[SPR_DAR] = load->dar; 257 env->spr[SPR_DSISR] = load->dsisr; 258 env->spr[SPR_POWER_PMC1] = load->pmc1; 259 env->spr[SPR_POWER_PMC2] = load->pmc2; 260 env->spr[SPR_POWER_PMC3] = load->pmc3; 261 env->spr[SPR_POWER_PMC4] = load->pmc4; 262 env->spr[SPR_POWER_PMC5] = load->pmc5; 263 env->spr[SPR_POWER_PMC6] = load->pmc6; 264 env->spr[SPR_POWER_MMCR0] = load->mmcr0; 265 env->spr[SPR_POWER_MMCR1] = load->mmcr1; 266 env->spr[SPR_POWER_MMCR2] = load->mmcr2; 267 env->spr[SPR_POWER_MMCRA] = load->mmcra; 268 env->spr[SPR_POWER_SDAR] = load->sdar; 269 env->spr[SPR_POWER_SIAR] = load->siar; 270 env->spr[SPR_POWER_SIER] = load->sier; 271 ppc_store_vscr(env, load->vscr); 272 ppc_store_fpscr(env, load->fpscr); 273 } 274 275 env->tb_env->tb_offset = load->tb_offset; 276 277 /* 278 * MSR updated, compute hflags and possible interrupts. 279 */ 280 hreg_compute_hflags(env); 281 ppc_maybe_interrupt(env); 282 283 /* 284 * Nested HV does not tag TLB entries between L1 and L2, so must 285 * flush on transition. 286 */ 287 tlb_flush(cs); 288 env->reserve_addr = -1; /* Reset the reservation */ 289 } 290 291 /* 292 * When this handler returns, the environment is switched to the L2 guest 293 * and TCG begins running that. spapr_exit_nested() performs the switch from 294 * L2 back to L1 and returns from the H_ENTER_NESTED hcall. 295 */ 296 static target_ulong h_enter_nested(PowerPCCPU *cpu, 297 SpaprMachineState *spapr, 298 target_ulong opcode, 299 target_ulong *args) 300 { 301 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 302 CPUPPCState *env = &cpu->env; 303 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 304 struct nested_ppc_state l2_state; 305 target_ulong hv_ptr = args[0]; 306 target_ulong regs_ptr = args[1]; 307 target_ulong hdec, now = cpu_ppc_load_tbl(env); 308 target_ulong lpcr, lpcr_mask; 309 struct kvmppc_hv_guest_state *hvstate; 310 struct kvmppc_hv_guest_state hv_state; 311 struct kvmppc_pt_regs *regs; 312 hwaddr len; 313 314 if (spapr->nested.ptcr == 0) { 315 return H_NOT_AVAILABLE; 316 } 317 318 len = sizeof(*hvstate); 319 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false, 320 MEMTXATTRS_UNSPECIFIED); 321 if (len != sizeof(*hvstate)) { 322 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false); 323 return H_PARAMETER; 324 } 325 326 memcpy(&hv_state, hvstate, len); 327 328 address_space_unmap(CPU(cpu)->as, hvstate, len, len, false); 329 330 /* 331 * We accept versions 1 and 2. Version 2 fields are unused because TCG 332 * does not implement DAWR*. 333 */ 334 if (hv_state.version > HV_GUEST_STATE_VERSION) { 335 return H_PARAMETER; 336 } 337 338 if (hv_state.lpid == 0) { 339 return H_PARAMETER; 340 } 341 342 spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1); 343 if (!spapr_cpu->nested_host_state) { 344 return H_NO_MEM; 345 } 346 347 assert(env->spr[SPR_LPIDR] == 0); 348 assert(env->spr[SPR_DPDES] == 0); 349 nested_save_state(spapr_cpu->nested_host_state, cpu); 350 351 len = sizeof(*regs); 352 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false, 353 MEMTXATTRS_UNSPECIFIED); 354 if (!regs || len != sizeof(*regs)) { 355 address_space_unmap(CPU(cpu)->as, regs, len, 0, false); 356 g_free(spapr_cpu->nested_host_state); 357 return H_P2; 358 } 359 360 len = sizeof(l2_state.gpr); 361 assert(len == sizeof(regs->gpr)); 362 memcpy(l2_state.gpr, regs->gpr, len); 363 364 l2_state.lr = regs->link; 365 l2_state.ctr = regs->ctr; 366 l2_state.xer = regs->xer; 367 l2_state.cr = regs->ccr; 368 l2_state.msr = regs->msr; 369 l2_state.nip = regs->nip; 370 371 address_space_unmap(CPU(cpu)->as, regs, len, len, false); 372 373 l2_state.cfar = hv_state.cfar; 374 l2_state.lpidr = hv_state.lpid; 375 376 lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER; 377 lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask); 378 lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE; 379 lpcr &= ~LPCR_LPES0; 380 l2_state.lpcr = lpcr & pcc->lpcr_mask; 381 382 l2_state.pcr = hv_state.pcr; 383 /* hv_state.amor is not used */ 384 l2_state.dpdes = hv_state.dpdes; 385 l2_state.hfscr = hv_state.hfscr; 386 /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/ 387 l2_state.srr0 = hv_state.srr0; 388 l2_state.srr1 = hv_state.srr1; 389 l2_state.sprg0 = hv_state.sprg[0]; 390 l2_state.sprg1 = hv_state.sprg[1]; 391 l2_state.sprg2 = hv_state.sprg[2]; 392 l2_state.sprg3 = hv_state.sprg[3]; 393 l2_state.pidr = hv_state.pidr; 394 l2_state.ppr = hv_state.ppr; 395 l2_state.tb_offset = env->tb_env->tb_offset + hv_state.tb_offset; 396 397 /* 398 * Switch to the nested guest environment and start the "hdec" timer. 399 */ 400 nested_load_state(cpu, &l2_state); 401 402 hdec = hv_state.hdec_expiry - now; 403 cpu_ppc_hdecr_init(env); 404 cpu_ppc_store_hdecr(env, hdec); 405 406 /* 407 * The hv_state.vcpu_token is not needed. It is used by the KVM 408 * implementation to remember which L2 vCPU last ran on which physical 409 * CPU so as to invalidate process scope translations if it is moved 410 * between physical CPUs. For now TLBs are always flushed on L1<->L2 411 * transitions so this is not a problem. 412 * 413 * Could validate that the same vcpu_token does not attempt to run on 414 * different L1 vCPUs at the same time, but that would be a L1 KVM bug 415 * and it's not obviously worth a new data structure to do it. 416 */ 417 418 spapr_cpu->in_nested = true; 419 420 /* 421 * The spapr hcall helper sets env->gpr[3] to the return value, but at 422 * this point the L1 is not returning from the hcall but rather we 423 * start running the L2, so r3 must not be clobbered, so return env->gpr[3] 424 * to leave it unchanged. 425 */ 426 return env->gpr[3]; 427 } 428 429 static void spapr_exit_nested_hv(PowerPCCPU *cpu, int excp) 430 { 431 CPUPPCState *env = &cpu->env; 432 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 433 struct nested_ppc_state l2_state; 434 target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4]; 435 target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5]; 436 target_ulong hsrr0, hsrr1, hdar, asdr, hdsisr; 437 struct kvmppc_hv_guest_state *hvstate; 438 struct kvmppc_pt_regs *regs; 439 hwaddr len; 440 441 nested_save_state(&l2_state, cpu); 442 hsrr0 = env->spr[SPR_HSRR0]; 443 hsrr1 = env->spr[SPR_HSRR1]; 444 hdar = env->spr[SPR_HDAR]; 445 hdsisr = env->spr[SPR_HDSISR]; 446 asdr = env->spr[SPR_ASDR]; 447 448 /* 449 * Switch back to the host environment (including for any error). 450 */ 451 assert(env->spr[SPR_LPIDR] != 0); 452 nested_load_state(cpu, spapr_cpu->nested_host_state); 453 env->gpr[3] = env->excp_vectors[excp]; /* hcall return value */ 454 455 cpu_ppc_hdecr_exit(env); 456 457 spapr_cpu->in_nested = false; 458 459 g_free(spapr_cpu->nested_host_state); 460 spapr_cpu->nested_host_state = NULL; 461 462 len = sizeof(*hvstate); 463 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true, 464 MEMTXATTRS_UNSPECIFIED); 465 if (len != sizeof(*hvstate)) { 466 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true); 467 env->gpr[3] = H_PARAMETER; 468 return; 469 } 470 471 hvstate->cfar = l2_state.cfar; 472 hvstate->lpcr = l2_state.lpcr; 473 hvstate->pcr = l2_state.pcr; 474 hvstate->dpdes = l2_state.dpdes; 475 hvstate->hfscr = l2_state.hfscr; 476 477 if (excp == POWERPC_EXCP_HDSI) { 478 hvstate->hdar = hdar; 479 hvstate->hdsisr = hdsisr; 480 hvstate->asdr = asdr; 481 } else if (excp == POWERPC_EXCP_HISI) { 482 hvstate->asdr = asdr; 483 } 484 485 /* HEIR should be implemented for HV mode and saved here. */ 486 hvstate->srr0 = l2_state.srr0; 487 hvstate->srr1 = l2_state.srr1; 488 hvstate->sprg[0] = l2_state.sprg0; 489 hvstate->sprg[1] = l2_state.sprg1; 490 hvstate->sprg[2] = l2_state.sprg2; 491 hvstate->sprg[3] = l2_state.sprg3; 492 hvstate->pidr = l2_state.pidr; 493 hvstate->ppr = l2_state.ppr; 494 495 /* Is it okay to specify write length larger than actual data written? */ 496 address_space_unmap(CPU(cpu)->as, hvstate, len, len, true); 497 498 len = sizeof(*regs); 499 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true, 500 MEMTXATTRS_UNSPECIFIED); 501 if (!regs || len != sizeof(*regs)) { 502 address_space_unmap(CPU(cpu)->as, regs, len, 0, true); 503 env->gpr[3] = H_P2; 504 return; 505 } 506 507 len = sizeof(env->gpr); 508 assert(len == sizeof(regs->gpr)); 509 memcpy(regs->gpr, l2_state.gpr, len); 510 511 regs->link = l2_state.lr; 512 regs->ctr = l2_state.ctr; 513 regs->xer = l2_state.xer; 514 regs->ccr = l2_state.cr; 515 516 if (excp == POWERPC_EXCP_MCHECK || 517 excp == POWERPC_EXCP_RESET || 518 excp == POWERPC_EXCP_SYSCALL) { 519 regs->nip = l2_state.srr0; 520 regs->msr = l2_state.srr1 & env->msr_mask; 521 } else { 522 regs->nip = hsrr0; 523 regs->msr = hsrr1 & env->msr_mask; 524 } 525 526 /* Is it okay to specify write length larger than actual data written? */ 527 address_space_unmap(CPU(cpu)->as, regs, len, len, true); 528 } 529 530 void spapr_exit_nested(PowerPCCPU *cpu, int excp) 531 { 532 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 533 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu); 534 535 assert(spapr_cpu->in_nested); 536 if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) { 537 spapr_exit_nested_hv(cpu, excp); 538 } else { 539 g_assert_not_reached(); 540 } 541 } 542 543 static 544 SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr, 545 target_ulong guestid) 546 { 547 SpaprMachineStateNestedGuest *guest; 548 549 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); 550 return guest; 551 } 552 553 static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest, 554 target_ulong vcpuid, bool inoutbuf) 555 { 556 struct SpaprMachineStateNestedGuestVcpu *vcpu; 557 /* 558 * Perform sanity checks for the provided vcpuid of a guest. 559 * For now, ensure its valid, allocated and enabled for use. 560 */ 561 562 if (vcpuid >= PAPR_NESTED_GUEST_VCPU_MAX) { 563 return false; 564 } 565 566 if (!(vcpuid < guest->nr_vcpus)) { 567 return false; 568 } 569 570 vcpu = &guest->vcpus[vcpuid]; 571 if (!vcpu->enabled) { 572 return false; 573 } 574 575 if (!inoutbuf) { 576 return true; 577 } 578 579 /* Check to see if the in/out buffers are registered */ 580 if (vcpu->runbufin.addr && vcpu->runbufout.addr) { 581 return true; 582 } 583 584 return false; 585 } 586 587 static void *get_vcpu_state_ptr(SpaprMachineStateNestedGuest *guest, 588 target_ulong vcpuid) 589 { 590 assert(spapr_nested_vcpu_check(guest, vcpuid, false)); 591 return &guest->vcpus[vcpuid].state; 592 } 593 594 static void *get_vcpu_ptr(SpaprMachineStateNestedGuest *guest, 595 target_ulong vcpuid) 596 { 597 assert(spapr_nested_vcpu_check(guest, vcpuid, false)); 598 return &guest->vcpus[vcpuid]; 599 } 600 601 static void *get_guest_ptr(SpaprMachineStateNestedGuest *guest, 602 target_ulong vcpuid) 603 { 604 return guest; /* for GSBE_NESTED */ 605 } 606 607 /* 608 * set=1 means the L1 is trying to set some state 609 * set=0 means the L1 is trying to get some state 610 */ 611 static void copy_state_8to8(void *a, void *b, bool set) 612 { 613 /* set takes from the Big endian element_buf and sets internal buffer */ 614 615 if (set) { 616 *(uint64_t *)a = be64_to_cpu(*(uint64_t *)b); 617 } else { 618 *(uint64_t *)b = cpu_to_be64(*(uint64_t *)a); 619 } 620 } 621 622 static void copy_state_4to4(void *a, void *b, bool set) 623 { 624 if (set) { 625 *(uint32_t *)a = be32_to_cpu(*(uint32_t *)b); 626 } else { 627 *(uint32_t *)b = cpu_to_be32(*((uint32_t *)a)); 628 } 629 } 630 631 static void copy_state_16to16(void *a, void *b, bool set) 632 { 633 uint64_t *src, *dst; 634 635 if (set) { 636 src = b; 637 dst = a; 638 639 dst[1] = be64_to_cpu(src[0]); 640 dst[0] = be64_to_cpu(src[1]); 641 } else { 642 src = a; 643 dst = b; 644 645 dst[1] = cpu_to_be64(src[0]); 646 dst[0] = cpu_to_be64(src[1]); 647 } 648 } 649 650 static void copy_state_4to8(void *a, void *b, bool set) 651 { 652 if (set) { 653 *(uint64_t *)a = (uint64_t) be32_to_cpu(*(uint32_t *)b); 654 } else { 655 *(uint32_t *)b = cpu_to_be32((uint32_t) (*((uint64_t *)a))); 656 } 657 } 658 659 static void copy_state_pagetbl(void *a, void *b, bool set) 660 { 661 uint64_t *pagetbl; 662 uint64_t *buf; /* 3 double words */ 663 uint64_t rts; 664 665 assert(set); 666 667 pagetbl = a; 668 buf = b; 669 670 *pagetbl = be64_to_cpu(buf[0]); 671 /* as per ISA section 6.7.6.1 */ 672 *pagetbl |= PATE0_HR; /* Host Radix bit is 1 */ 673 674 /* RTS */ 675 rts = be64_to_cpu(buf[1]); 676 assert(rts == 52); 677 rts = rts - 31; /* since radix tree size = 2^(RTS+31) */ 678 *pagetbl |= ((rts & 0x7) << 5); /* RTS2 is bit 56:58 */ 679 *pagetbl |= (((rts >> 3) & 0x3) << 61); /* RTS1 is bit 1:2 */ 680 681 /* RPDS {Size = 2^(RPDS+3) , RPDS >=5} */ 682 *pagetbl |= 63 - clz64(be64_to_cpu(buf[2])) - 3; 683 } 684 685 static void copy_state_proctbl(void *a, void *b, bool set) 686 { 687 uint64_t *proctbl; 688 uint64_t *buf; /* 2 double words */ 689 690 assert(set); 691 692 proctbl = a; 693 buf = b; 694 /* PRTB: Process Table Base */ 695 *proctbl = be64_to_cpu(buf[0]); 696 /* PRTS: Process Table Size = 2^(12+PRTS) */ 697 if (be64_to_cpu(buf[1]) == (1ULL << 12)) { 698 *proctbl |= 0; 699 } else if (be64_to_cpu(buf[1]) == (1ULL << 24)) { 700 *proctbl |= 12; 701 } else { 702 g_assert_not_reached(); 703 } 704 } 705 706 static void copy_state_runbuf(void *a, void *b, bool set) 707 { 708 uint64_t *buf; /* 2 double words */ 709 struct SpaprMachineStateNestedGuestVcpuRunBuf *runbuf; 710 711 assert(set); 712 713 runbuf = a; 714 buf = b; 715 716 runbuf->addr = be64_to_cpu(buf[0]); 717 assert(runbuf->addr); 718 719 /* per spec */ 720 assert(be64_to_cpu(buf[1]) <= 16384); 721 722 /* 723 * This will also hit in the input buffer but should be fine for 724 * now. If not we can split this function. 725 */ 726 assert(be64_to_cpu(buf[1]) >= VCPU_OUT_BUF_MIN_SZ); 727 728 runbuf->size = be64_to_cpu(buf[1]); 729 } 730 731 /* tell the L1 how big we want the output vcpu run buffer */ 732 static void out_buf_min_size(void *a, void *b, bool set) 733 { 734 uint64_t *buf; /* 1 double word */ 735 736 assert(!set); 737 738 buf = b; 739 740 buf[0] = cpu_to_be64(VCPU_OUT_BUF_MIN_SZ); 741 } 742 743 static void copy_logical_pvr(void *a, void *b, bool set) 744 { 745 SpaprMachineStateNestedGuest *guest; 746 uint32_t *buf; /* 1 word */ 747 uint32_t *pvr_logical_ptr; 748 uint32_t pvr_logical; 749 target_ulong pcr = 0; 750 751 pvr_logical_ptr = a; 752 buf = b; 753 754 if (!set) { 755 buf[0] = cpu_to_be32(*pvr_logical_ptr); 756 return; 757 } 758 759 pvr_logical = be32_to_cpu(buf[0]); 760 761 *pvr_logical_ptr = pvr_logical; 762 763 if (*pvr_logical_ptr) { 764 switch (*pvr_logical_ptr) { 765 case CPU_POWERPC_LOGICAL_3_10: 766 pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00; 767 break; 768 case CPU_POWERPC_LOGICAL_3_00: 769 pcr = PCR_COMPAT_3_00; 770 break; 771 default: 772 qemu_log_mask(LOG_GUEST_ERROR, 773 "Could not set PCR for LPVR=0x%08x\n", 774 *pvr_logical_ptr); 775 return; 776 } 777 } 778 779 guest = container_of(pvr_logical_ptr, 780 struct SpaprMachineStateNestedGuest, 781 pvr_logical); 782 for (int i = 0; i < guest->nr_vcpus; i++) { 783 guest->vcpus[i].state.pcr = ~pcr | HVMASK_PCR; 784 } 785 } 786 787 static void copy_tb_offset(void *a, void *b, bool set) 788 { 789 SpaprMachineStateNestedGuest *guest; 790 uint64_t *buf; /* 1 double word */ 791 uint64_t *tb_offset_ptr; 792 uint64_t tb_offset; 793 794 tb_offset_ptr = a; 795 buf = b; 796 797 if (!set) { 798 buf[0] = cpu_to_be64(*tb_offset_ptr); 799 return; 800 } 801 802 tb_offset = be64_to_cpu(buf[0]); 803 /* need to copy this to the individual tb_offset for each vcpu */ 804 guest = container_of(tb_offset_ptr, 805 struct SpaprMachineStateNestedGuest, 806 tb_offset); 807 for (int i = 0; i < guest->nr_vcpus; i++) { 808 guest->vcpus[i].tb_offset = tb_offset; 809 } 810 } 811 812 static void copy_state_hdecr(void *a, void *b, bool set) 813 { 814 uint64_t *buf; /* 1 double word */ 815 uint64_t *hdecr_expiry_tb; 816 817 hdecr_expiry_tb = a; 818 buf = b; 819 820 if (!set) { 821 buf[0] = cpu_to_be64(*hdecr_expiry_tb); 822 return; 823 } 824 825 *hdecr_expiry_tb = be64_to_cpu(buf[0]); 826 } 827 828 struct guest_state_element_type guest_state_element_types[] = { 829 GUEST_STATE_ELEMENT_NOP(GSB_HV_VCPU_IGNORED_ID, 0), 830 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR0, gpr[0]), 831 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR1, gpr[1]), 832 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR2, gpr[2]), 833 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR3, gpr[3]), 834 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR4, gpr[4]), 835 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR5, gpr[5]), 836 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR6, gpr[6]), 837 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR7, gpr[7]), 838 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR8, gpr[8]), 839 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR9, gpr[9]), 840 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR10, gpr[10]), 841 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR11, gpr[11]), 842 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR12, gpr[12]), 843 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR13, gpr[13]), 844 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR14, gpr[14]), 845 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR15, gpr[15]), 846 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR16, gpr[16]), 847 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR17, gpr[17]), 848 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR18, gpr[18]), 849 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR19, gpr[19]), 850 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR20, gpr[20]), 851 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR21, gpr[21]), 852 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR22, gpr[22]), 853 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR23, gpr[23]), 854 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR24, gpr[24]), 855 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR25, gpr[25]), 856 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR26, gpr[26]), 857 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR27, gpr[27]), 858 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR28, gpr[28]), 859 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR29, gpr[29]), 860 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR30, gpr[30]), 861 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR31, gpr[31]), 862 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_NIA, nip), 863 GSE_ENV_DWM(GSB_VCPU_SPR_MSR, msr, HVMASK_MSR), 864 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTR, ctr), 865 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_LR, lr), 866 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_XER, xer), 867 GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_CR, cr), 868 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_MMCR3), 869 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER2), 870 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER3), 871 GUEST_STATE_ELEMENT_NOP_W(GSB_VCPU_SPR_WORT), 872 GSE_ENV_DWM(GSB_VCPU_SPR_LPCR, lpcr, HVMASK_LPCR), 873 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMOR, amor), 874 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HFSCR, hfscr), 875 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR0, dawr0), 876 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX0, dawrx0), 877 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CIABR, ciabr), 878 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PURR, purr), 879 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPURR, spurr), 880 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IC, ic), 881 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_VTB, vtb), 882 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HDAR, hdar), 883 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HDSISR, hdsisr), 884 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HEIR, heir), 885 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_ASDR, asdr), 886 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR0, srr0), 887 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR1, srr1), 888 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG0, sprg0), 889 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG1, sprg1), 890 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG2, sprg2), 891 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG3, sprg3), 892 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PIDR, pidr), 893 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CFAR, cfar), 894 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PPR, ppr), 895 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR1, dawr1), 896 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX1, dawrx1), 897 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DEXCR, dexcr), 898 GSE_ENV_DWM(GSB_VCPU_SPR_HDEXCR, hdexcr, HVMASK_HDEXCR), 899 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHKEYR, hashkeyr), 900 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHPKEYR, hashpkeyr), 901 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR0, vsr[0]), 902 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR1, vsr[1]), 903 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR2, vsr[2]), 904 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR3, vsr[3]), 905 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR4, vsr[4]), 906 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR5, vsr[5]), 907 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR6, vsr[6]), 908 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR7, vsr[7]), 909 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR8, vsr[8]), 910 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR9, vsr[9]), 911 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR10, vsr[10]), 912 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR11, vsr[11]), 913 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR12, vsr[12]), 914 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR13, vsr[13]), 915 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR14, vsr[14]), 916 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR15, vsr[15]), 917 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR16, vsr[16]), 918 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR17, vsr[17]), 919 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR18, vsr[18]), 920 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR19, vsr[19]), 921 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR20, vsr[20]), 922 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR21, vsr[21]), 923 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR22, vsr[22]), 924 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR23, vsr[23]), 925 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR24, vsr[24]), 926 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR25, vsr[25]), 927 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR26, vsr[26]), 928 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR27, vsr[27]), 929 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR28, vsr[28]), 930 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR29, vsr[29]), 931 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR30, vsr[30]), 932 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR31, vsr[31]), 933 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR32, vsr[32]), 934 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR33, vsr[33]), 935 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR34, vsr[34]), 936 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR35, vsr[35]), 937 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR36, vsr[36]), 938 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR37, vsr[37]), 939 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR38, vsr[38]), 940 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR39, vsr[39]), 941 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR40, vsr[40]), 942 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR41, vsr[41]), 943 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR42, vsr[42]), 944 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR43, vsr[43]), 945 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR44, vsr[44]), 946 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR45, vsr[45]), 947 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR46, vsr[46]), 948 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR47, vsr[47]), 949 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR48, vsr[48]), 950 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR49, vsr[49]), 951 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR50, vsr[50]), 952 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR51, vsr[51]), 953 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR52, vsr[52]), 954 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR53, vsr[53]), 955 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR54, vsr[54]), 956 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR55, vsr[55]), 957 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR56, vsr[56]), 958 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR57, vsr[57]), 959 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR58, vsr[58]), 960 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR59, vsr[59]), 961 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR60, vsr[60]), 962 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR61, vsr[61]), 963 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR62, vsr[62]), 964 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR63, vsr[63]), 965 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBHR, ebbhr), 966 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_TAR, tar), 967 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBRR, ebbrr), 968 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_BESCR, bescr), 969 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IAMR, iamr), 970 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMR, amr), 971 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_UAMOR, uamor), 972 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DSCR, dscr), 973 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr), 974 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb), 975 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl), 976 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave), 977 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar), 978 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr), 979 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC1, pmc1), 980 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC2, pmc2), 981 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC3, pmc3), 982 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC4, pmc4), 983 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC5, pmc5), 984 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC6, pmc6), 985 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR0, mmcr0), 986 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR1, mmcr1), 987 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR2, mmcr2), 988 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCRA, mmcra), 989 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SDAR , sdar), 990 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIAR , siar), 991 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIER , sier), 992 GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_VSCR, vscr), 993 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FPSCR, fpscr), 994 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_DEC_EXPIRE_TB, dec_expiry_tb), 995 GSBE_NESTED(GSB_PART_SCOPED_PAGETBL, 0x18, parttbl[0], copy_state_pagetbl), 996 GSBE_NESTED(GSB_PROCESS_TBL, 0x10, parttbl[1], copy_state_proctbl), 997 GSBE_NESTED(GSB_VCPU_LPVR, 0x4, pvr_logical, copy_logical_pvr), 998 GSBE_NESTED_MSK(GSB_TB_OFFSET, 0x8, tb_offset, copy_tb_offset, 999 HVMASK_TB_OFFSET), 1000 GSBE_NESTED_VCPU(GSB_VCPU_IN_BUFFER, 0x10, runbufin, copy_state_runbuf), 1001 GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf), 1002 GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size), 1003 GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb, 1004 copy_state_hdecr) 1005 }; 1006 1007 void spapr_nested_gsb_init(void) 1008 { 1009 struct guest_state_element_type *type; 1010 1011 /* Init the guest state elements lookup table, flags for now */ 1012 for (int i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) { 1013 type = &guest_state_element_types[i]; 1014 1015 assert(type->id <= GSB_LAST); 1016 if (type->id >= GSB_VCPU_SPR_HDAR) 1017 /* 0xf000 - 0xf005 Thread + RO */ 1018 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY; 1019 else if (type->id >= GSB_VCPU_IN_BUFFER) 1020 /* 0x0c00 - 0xf000 Thread + RW */ 1021 type->flags = 0; 1022 else if (type->id >= GSB_VCPU_LPVR) 1023 /* 0x0003 - 0x0bff Guest + RW */ 1024 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; 1025 else if (type->id >= GSB_HV_VCPU_STATE_SIZE) 1026 /* 0x0001 - 0x0002 Guest + RO */ 1027 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY | 1028 GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE; 1029 } 1030 } 1031 1032 static struct guest_state_element *guest_state_element_next( 1033 struct guest_state_element *element, 1034 int64_t *len, 1035 int64_t *num_elements) 1036 { 1037 uint16_t size; 1038 1039 /* size is of element->value[] only. Not whole guest_state_element */ 1040 size = be16_to_cpu(element->size); 1041 1042 if (len) { 1043 *len -= size + offsetof(struct guest_state_element, value); 1044 } 1045 1046 if (num_elements) { 1047 *num_elements -= 1; 1048 } 1049 1050 return (struct guest_state_element *)(element->value + size); 1051 } 1052 1053 static 1054 struct guest_state_element_type *guest_state_element_type_find(uint16_t id) 1055 { 1056 int i; 1057 1058 for (i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) 1059 if (id == guest_state_element_types[i].id) { 1060 return &guest_state_element_types[i]; 1061 } 1062 1063 return NULL; 1064 } 1065 1066 static void log_element(struct guest_state_element *element, 1067 struct guest_state_request *gsr) 1068 { 1069 qemu_log_mask(LOG_GUEST_ERROR, "h_guest_%s_state id:0x%04x size:0x%04x", 1070 gsr->flags & GUEST_STATE_REQUEST_SET ? "set" : "get", 1071 be16_to_cpu(element->id), be16_to_cpu(element->size)); 1072 qemu_log_mask(LOG_GUEST_ERROR, "buf:0x%016"PRIx64" ...\n", 1073 be64_to_cpu(*(uint64_t *)element->value)); 1074 } 1075 1076 static bool guest_state_request_check(struct guest_state_request *gsr) 1077 { 1078 int64_t num_elements, len = gsr->len; 1079 struct guest_state_buffer *gsb = gsr->gsb; 1080 struct guest_state_element *element; 1081 struct guest_state_element_type *type; 1082 uint16_t id, size; 1083 1084 /* gsb->num_elements = 0 == 32 bits long */ 1085 assert(len >= 4); 1086 1087 num_elements = be32_to_cpu(gsb->num_elements); 1088 element = gsb->elements; 1089 len -= sizeof(gsb->num_elements); 1090 1091 /* Walk the buffer to validate the length */ 1092 while (num_elements) { 1093 1094 id = be16_to_cpu(element->id); 1095 size = be16_to_cpu(element->size); 1096 1097 if (false) { 1098 log_element(element, gsr); 1099 } 1100 /* buffer size too small */ 1101 if (len < 0) { 1102 return false; 1103 } 1104 1105 type = guest_state_element_type_find(id); 1106 if (!type) { 1107 qemu_log_mask(LOG_GUEST_ERROR, "Element ID %04x unknown\n", id); 1108 log_element(element, gsr); 1109 return false; 1110 } 1111 1112 if (id == GSB_HV_VCPU_IGNORED_ID) { 1113 goto next_element; 1114 } 1115 1116 if (size != type->size) { 1117 qemu_log_mask(LOG_GUEST_ERROR, "Size mismatch. Element ID:%04x." 1118 "Size Exp:%i Got:%i\n", id, type->size, size); 1119 log_element(element, gsr); 1120 return false; 1121 } 1122 1123 if ((type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY) && 1124 (gsr->flags & GUEST_STATE_REQUEST_SET)) { 1125 qemu_log_mask(LOG_GUEST_ERROR, "Trying to set a read-only Element " 1126 "ID:%04x.\n", id); 1127 return false; 1128 } 1129 1130 if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) { 1131 /* guest wide element type */ 1132 if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) { 1133 qemu_log_mask(LOG_GUEST_ERROR, "trying to set a guest wide " 1134 "Element ID:%04x.\n", id); 1135 return false; 1136 } 1137 } else { 1138 /* thread wide element type */ 1139 if (gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE) { 1140 qemu_log_mask(LOG_GUEST_ERROR, "trying to set a thread wide " 1141 "Element ID:%04x.\n", id); 1142 return false; 1143 } 1144 } 1145 next_element: 1146 element = guest_state_element_next(element, &len, &num_elements); 1147 1148 } 1149 return true; 1150 } 1151 1152 static bool is_gsr_invalid(struct guest_state_request *gsr, 1153 struct guest_state_element *element, 1154 struct guest_state_element_type *type) 1155 { 1156 if ((gsr->flags & GUEST_STATE_REQUEST_SET) && 1157 (*(uint64_t *)(element->value) & ~(type->mask))) { 1158 log_element(element, gsr); 1159 qemu_log_mask(LOG_GUEST_ERROR, "L1 can't set reserved bits " 1160 "(allowed mask: 0x%08"PRIx64")\n", type->mask); 1161 return true; 1162 } 1163 return false; 1164 } 1165 1166 static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu, 1167 SpaprMachineState *spapr, 1168 target_ulong opcode, 1169 target_ulong *args) 1170 { 1171 CPUPPCState *env = &cpu->env; 1172 target_ulong flags = args[0]; 1173 1174 if (flags) { /* don't handle any flags capabilities for now */ 1175 return H_PARAMETER; 1176 } 1177 1178 /* P10 capabilities */ 1179 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, 1180 spapr->max_compat_pvr)) { 1181 env->gpr[4] |= H_GUEST_CAPABILITIES_P10_MODE; 1182 } 1183 1184 /* P9 capabilities */ 1185 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, 1186 spapr->max_compat_pvr)) { 1187 env->gpr[4] |= H_GUEST_CAPABILITIES_P9_MODE; 1188 } 1189 1190 return H_SUCCESS; 1191 } 1192 1193 static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu, 1194 SpaprMachineState *spapr, 1195 target_ulong opcode, 1196 target_ulong *args) 1197 { 1198 CPUPPCState *env = &cpu->env; 1199 target_ulong flags = args[0]; 1200 target_ulong capabilities = args[1]; 1201 env->gpr[4] = 0; 1202 1203 if (flags) { /* don't handle any flags capabilities for now */ 1204 return H_PARAMETER; 1205 } 1206 1207 if (capabilities & H_GUEST_CAPABILITIES_COPY_MEM) { 1208 env->gpr[4] = 1; 1209 return H_P2; /* isn't supported */ 1210 } 1211 1212 /* 1213 * If there are no capabilities configured, set the R5 to the index of 1214 * the first supported Power Processor Mode 1215 */ 1216 if (!capabilities) { 1217 env->gpr[4] = 1; 1218 1219 /* set R5 to the first supported Power Processor Mode */ 1220 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0, 1221 spapr->max_compat_pvr)) { 1222 env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP; 1223 } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0, 1224 spapr->max_compat_pvr)) { 1225 env->gpr[5] = H_GUEST_CAP_P9_MODE_BMAP; 1226 } 1227 1228 return H_P2; 1229 } 1230 1231 /* 1232 * If an invalid capability is set, R5 should contain the index of the 1233 * invalid capability bit 1234 */ 1235 if (capabilities & ~H_GUEST_CAP_VALID_MASK) { 1236 env->gpr[4] = 1; 1237 1238 /* Set R5 to the index of the invalid capability */ 1239 env->gpr[5] = 63 - ctz64(capabilities); 1240 1241 return H_P2; 1242 } 1243 1244 if (!spapr->nested.capabilities_set) { 1245 spapr->nested.capabilities_set = true; 1246 spapr->nested.pvr_base = env->spr[SPR_PVR]; 1247 return H_SUCCESS; 1248 } else { 1249 return H_STATE; 1250 } 1251 } 1252 1253 static void 1254 destroy_guest_helper(gpointer value) 1255 { 1256 struct SpaprMachineStateNestedGuest *guest = value; 1257 g_free(guest->vcpus); 1258 g_free(guest); 1259 } 1260 1261 static target_ulong h_guest_create(PowerPCCPU *cpu, 1262 SpaprMachineState *spapr, 1263 target_ulong opcode, 1264 target_ulong *args) 1265 { 1266 CPUPPCState *env = &cpu->env; 1267 target_ulong flags = args[0]; 1268 target_ulong continue_token = args[1]; 1269 uint64_t guestid; 1270 int nguests = 0; 1271 struct SpaprMachineStateNestedGuest *guest; 1272 1273 if (flags) { /* don't handle any flags for now */ 1274 return H_UNSUPPORTED_FLAG; 1275 } 1276 1277 if (continue_token != -1) { 1278 return H_P2; 1279 } 1280 1281 if (!spapr->nested.capabilities_set) { 1282 return H_STATE; 1283 } 1284 1285 if (!spapr->nested.guests) { 1286 spapr->nested.guests = g_hash_table_new_full(NULL, 1287 NULL, 1288 NULL, 1289 destroy_guest_helper); 1290 } 1291 1292 nguests = g_hash_table_size(spapr->nested.guests); 1293 1294 if (nguests == PAPR_NESTED_GUEST_MAX) { 1295 return H_NO_MEM; 1296 } 1297 1298 /* Lookup for available guestid */ 1299 for (guestid = 1; guestid < PAPR_NESTED_GUEST_MAX; guestid++) { 1300 if (!(g_hash_table_lookup(spapr->nested.guests, 1301 GINT_TO_POINTER(guestid)))) { 1302 break; 1303 } 1304 } 1305 1306 if (guestid == PAPR_NESTED_GUEST_MAX) { 1307 return H_NO_MEM; 1308 } 1309 1310 guest = g_try_new0(struct SpaprMachineStateNestedGuest, 1); 1311 if (!guest) { 1312 return H_NO_MEM; 1313 } 1314 1315 guest->pvr_logical = spapr->nested.pvr_base; 1316 g_hash_table_insert(spapr->nested.guests, GINT_TO_POINTER(guestid), guest); 1317 env->gpr[4] = guestid; 1318 1319 return H_SUCCESS; 1320 } 1321 1322 static target_ulong h_guest_delete(PowerPCCPU *cpu, 1323 SpaprMachineState *spapr, 1324 target_ulong opcode, 1325 target_ulong *args) 1326 { 1327 target_ulong flags = args[0]; 1328 target_ulong guestid = args[1]; 1329 struct SpaprMachineStateNestedGuest *guest; 1330 1331 /* 1332 * handle flag deleteAllGuests, if set: 1333 * guestid is ignored and all guests are deleted 1334 * 1335 */ 1336 if (flags & ~H_GUEST_DELETE_ALL_FLAG) { 1337 return H_UNSUPPORTED_FLAG; /* other flag bits reserved */ 1338 } else if (flags & H_GUEST_DELETE_ALL_FLAG) { 1339 g_hash_table_destroy(spapr->nested.guests); 1340 return H_SUCCESS; 1341 } 1342 1343 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid)); 1344 if (!guest) { 1345 return H_P2; 1346 } 1347 1348 g_hash_table_remove(spapr->nested.guests, GINT_TO_POINTER(guestid)); 1349 1350 return H_SUCCESS; 1351 } 1352 1353 static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu, 1354 SpaprMachineState *spapr, 1355 target_ulong opcode, 1356 target_ulong *args) 1357 { 1358 target_ulong flags = args[0]; 1359 target_ulong guestid = args[1]; 1360 target_ulong vcpuid = args[2]; 1361 SpaprMachineStateNestedGuest *guest; 1362 1363 if (flags) { /* don't handle any flags for now */ 1364 return H_UNSUPPORTED_FLAG; 1365 } 1366 1367 guest = spapr_get_nested_guest(spapr, guestid); 1368 if (!guest) { 1369 return H_P2; 1370 } 1371 1372 if (vcpuid < guest->nr_vcpus) { 1373 qemu_log_mask(LOG_UNIMP, "vcpuid " TARGET_FMT_ld " already in use.", 1374 vcpuid); 1375 return H_IN_USE; 1376 } 1377 /* linear vcpuid allocation only */ 1378 assert(vcpuid == guest->nr_vcpus); 1379 1380 if (guest->nr_vcpus >= PAPR_NESTED_GUEST_VCPU_MAX) { 1381 return H_P3; 1382 } 1383 1384 SpaprMachineStateNestedGuestVcpu *vcpus, *curr_vcpu; 1385 vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu, 1386 guest->vcpus, 1387 guest->nr_vcpus + 1); 1388 if (!vcpus) { 1389 return H_NO_MEM; 1390 } 1391 guest->vcpus = vcpus; 1392 curr_vcpu = &vcpus[guest->nr_vcpus]; 1393 memset(curr_vcpu, 0, sizeof(SpaprMachineStateNestedGuestVcpu)); 1394 1395 curr_vcpu->enabled = true; 1396 guest->nr_vcpus++; 1397 1398 return H_SUCCESS; 1399 } 1400 1401 static target_ulong getset_state(SpaprMachineStateNestedGuest *guest, 1402 uint64_t vcpuid, 1403 struct guest_state_request *gsr) 1404 { 1405 void *ptr; 1406 uint16_t id; 1407 struct guest_state_element *element; 1408 struct guest_state_element_type *type; 1409 int64_t lenleft, num_elements; 1410 1411 lenleft = gsr->len; 1412 1413 if (!guest_state_request_check(gsr)) { 1414 return H_P3; 1415 } 1416 1417 num_elements = be32_to_cpu(gsr->gsb->num_elements); 1418 element = gsr->gsb->elements; 1419 /* Process the elements */ 1420 while (num_elements) { 1421 type = NULL; 1422 /* log_element(element, gsr); */ 1423 1424 id = be16_to_cpu(element->id); 1425 if (id == GSB_HV_VCPU_IGNORED_ID) { 1426 goto next_element; 1427 } 1428 1429 type = guest_state_element_type_find(id); 1430 assert(type); 1431 1432 /* Get pointer to guest data to get/set */ 1433 if (type->location && type->copy) { 1434 ptr = type->location(guest, vcpuid); 1435 assert(ptr); 1436 if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) { 1437 return H_INVALID_ELEMENT_VALUE; 1438 } 1439 type->copy(ptr + type->offset, element->value, 1440 gsr->flags & GUEST_STATE_REQUEST_SET ? true : false); 1441 } 1442 1443 next_element: 1444 element = guest_state_element_next(element, &lenleft, &num_elements); 1445 } 1446 1447 return H_SUCCESS; 1448 } 1449 1450 static target_ulong map_and_getset_state(PowerPCCPU *cpu, 1451 SpaprMachineStateNestedGuest *guest, 1452 uint64_t vcpuid, 1453 struct guest_state_request *gsr) 1454 { 1455 target_ulong rc; 1456 int64_t len; 1457 bool is_write; 1458 1459 len = gsr->len; 1460 /* only get_state would require write access to the provided buffer */ 1461 is_write = (gsr->flags & GUEST_STATE_REQUEST_SET) ? false : true; 1462 gsr->gsb = address_space_map(CPU(cpu)->as, gsr->buf, (uint64_t *)&len, 1463 is_write, MEMTXATTRS_UNSPECIFIED); 1464 if (!gsr->gsb) { 1465 rc = H_P3; 1466 goto out1; 1467 } 1468 1469 if (len != gsr->len) { 1470 rc = H_P3; 1471 goto out1; 1472 } 1473 1474 rc = getset_state(guest, vcpuid, gsr); 1475 1476 out1: 1477 address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len); 1478 return rc; 1479 } 1480 1481 static target_ulong h_guest_getset_state(PowerPCCPU *cpu, 1482 SpaprMachineState *spapr, 1483 target_ulong *args, 1484 bool set) 1485 { 1486 target_ulong flags = args[0]; 1487 target_ulong lpid = args[1]; 1488 target_ulong vcpuid = args[2]; 1489 target_ulong buf = args[3]; 1490 target_ulong buflen = args[4]; 1491 struct guest_state_request gsr; 1492 SpaprMachineStateNestedGuest *guest; 1493 1494 guest = spapr_get_nested_guest(spapr, lpid); 1495 if (!guest) { 1496 return H_P2; 1497 } 1498 gsr.buf = buf; 1499 assert(buflen <= GSB_MAX_BUF_SIZE); 1500 gsr.len = buflen; 1501 gsr.flags = 0; 1502 if (flags & H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { 1503 gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE; 1504 } 1505 if (flags & !H_GUEST_GETSET_STATE_FLAG_GUEST_WIDE) { 1506 return H_PARAMETER; /* flag not supported yet */ 1507 } 1508 1509 if (set) { 1510 gsr.flags |= GUEST_STATE_REQUEST_SET; 1511 } 1512 return map_and_getset_state(cpu, guest, vcpuid, &gsr); 1513 } 1514 1515 static target_ulong h_guest_set_state(PowerPCCPU *cpu, 1516 SpaprMachineState *spapr, 1517 target_ulong opcode, 1518 target_ulong *args) 1519 { 1520 return h_guest_getset_state(cpu, spapr, args, true); 1521 } 1522 1523 static target_ulong h_guest_get_state(PowerPCCPU *cpu, 1524 SpaprMachineState *spapr, 1525 target_ulong opcode, 1526 target_ulong *args) 1527 { 1528 return h_guest_getset_state(cpu, spapr, args, false); 1529 } 1530 1531 void spapr_register_nested_hv(void) 1532 { 1533 spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl); 1534 spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested); 1535 spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate); 1536 spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest); 1537 } 1538 1539 void spapr_unregister_nested_hv(void) 1540 { 1541 spapr_unregister_hypercall(KVMPPC_H_SET_PARTITION_TABLE); 1542 spapr_unregister_hypercall(KVMPPC_H_ENTER_NESTED); 1543 spapr_unregister_hypercall(KVMPPC_H_TLB_INVALIDATE); 1544 spapr_unregister_hypercall(KVMPPC_H_COPY_TOFROM_GUEST); 1545 } 1546 1547 void spapr_register_nested_papr(void) 1548 { 1549 spapr_register_hypercall(H_GUEST_GET_CAPABILITIES, 1550 h_guest_get_capabilities); 1551 spapr_register_hypercall(H_GUEST_SET_CAPABILITIES, 1552 h_guest_set_capabilities); 1553 spapr_register_hypercall(H_GUEST_CREATE, h_guest_create); 1554 spapr_register_hypercall(H_GUEST_DELETE, h_guest_delete); 1555 spapr_register_hypercall(H_GUEST_CREATE_VCPU, h_guest_create_vcpu); 1556 spapr_register_hypercall(H_GUEST_SET_STATE, h_guest_set_state); 1557 spapr_register_hypercall(H_GUEST_GET_STATE, h_guest_get_state); 1558 } 1559 1560 void spapr_unregister_nested_papr(void) 1561 { 1562 spapr_unregister_hypercall(H_GUEST_GET_CAPABILITIES); 1563 spapr_unregister_hypercall(H_GUEST_SET_CAPABILITIES); 1564 spapr_unregister_hypercall(H_GUEST_CREATE); 1565 spapr_unregister_hypercall(H_GUEST_DELETE); 1566 spapr_unregister_hypercall(H_GUEST_CREATE_VCPU); 1567 spapr_unregister_hypercall(H_GUEST_SET_STATE); 1568 spapr_unregister_hypercall(H_GUEST_GET_STATE); 1569 } 1570 1571 #else 1572 void spapr_exit_nested(PowerPCCPU *cpu, int excp) 1573 { 1574 g_assert_not_reached(); 1575 } 1576 1577 void spapr_register_nested_hv(void) 1578 { 1579 /* DO NOTHING */ 1580 } 1581 1582 void spapr_unregister_nested_hv(void) 1583 { 1584 /* DO NOTHING */ 1585 } 1586 1587 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu, 1588 target_ulong lpid, ppc_v3_pate_t *entry) 1589 { 1590 return false; 1591 } 1592 1593 void spapr_register_nested_papr(void) 1594 { 1595 /* DO NOTHING */ 1596 } 1597 1598 void spapr_unregister_nested_papr(void) 1599 { 1600 /* DO NOTHING */ 1601 } 1602 1603 void spapr_nested_gsb_init(void) 1604 { 1605 /* DO NOTHING */ 1606 } 1607 1608 #endif 1609