1 #include "qemu/osdep.h"
2 #include "qemu/cutils.h"
3 #include "exec/cputlb.h"
4 #include "exec/target_long.h"
5 #include "helper_regs.h"
6 #include "hw/ppc/ppc.h"
7 #include "hw/ppc/spapr.h"
8 #include "hw/ppc/spapr_cpu_core.h"
9 #include "hw/ppc/spapr_nested.h"
10 #include "mmu-book3s-v3.h"
11 #include "cpu-models.h"
12 #include "qemu/log.h"
13
spapr_nested_reset(SpaprMachineState * spapr)14 void spapr_nested_reset(SpaprMachineState *spapr)
15 {
16 if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) {
17 spapr_unregister_nested_hv();
18 spapr_register_nested_hv();
19 } else if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_PAPR)) {
20 spapr->nested.capabilities_set = false;
21 spapr_unregister_nested_papr();
22 spapr_register_nested_papr();
23 spapr_nested_gsb_init();
24 } else {
25 spapr->nested.api = 0;
26 }
27 }
28
spapr_nested_api(SpaprMachineState * spapr)29 uint8_t spapr_nested_api(SpaprMachineState *spapr)
30 {
31 return spapr->nested.api;
32 }
33
34 #ifdef CONFIG_TCG
35
spapr_get_pate_nested_hv(SpaprMachineState * spapr,PowerPCCPU * cpu,target_ulong lpid,ppc_v3_pate_t * entry)36 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu,
37 target_ulong lpid, ppc_v3_pate_t *entry)
38 {
39 uint64_t patb, pats;
40
41 assert(lpid != 0);
42
43 patb = spapr->nested.ptcr & PTCR_PATB;
44 pats = spapr->nested.ptcr & PTCR_PATS;
45
46 /* Check if partition table is properly aligned */
47 if (patb & MAKE_64BIT_MASK(0, pats + 12)) {
48 return false;
49 }
50
51 /* Calculate number of entries */
52 pats = 1ull << (pats + 12 - 4);
53 if (pats <= lpid) {
54 return false;
55 }
56
57 /* Grab entry */
58 patb += 16 * lpid;
59 entry->dw0 = ldq_phys(CPU(cpu)->as, patb);
60 entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8);
61 return true;
62 }
63
64 static
spapr_get_nested_guest(SpaprMachineState * spapr,target_ulong guestid)65 SpaprMachineStateNestedGuest *spapr_get_nested_guest(SpaprMachineState *spapr,
66 target_ulong guestid)
67 {
68 return spapr->nested.guests ?
69 g_hash_table_lookup(spapr->nested.guests,
70 GINT_TO_POINTER(guestid)) : NULL;
71 }
72
spapr_get_pate_nested_papr(SpaprMachineState * spapr,PowerPCCPU * cpu,target_ulong lpid,ppc_v3_pate_t * entry)73 bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu,
74 target_ulong lpid, ppc_v3_pate_t *entry)
75 {
76 SpaprMachineStateNestedGuest *guest;
77 assert(lpid != 0);
78 guest = spapr_get_nested_guest(spapr, lpid);
79 if (!guest) {
80 return false;
81 }
82
83 entry->dw0 = guest->parttbl[0];
84 entry->dw1 = guest->parttbl[1];
85 return true;
86 }
87
88 #define PRTS_MASK 0x1f
89
h_set_ptbl(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)90 static target_ulong h_set_ptbl(PowerPCCPU *cpu,
91 SpaprMachineState *spapr,
92 target_ulong opcode,
93 target_ulong *args)
94 {
95 target_ulong ptcr = args[0];
96
97 if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) {
98 return H_FUNCTION;
99 }
100
101 if ((ptcr & PRTS_MASK) + 12 - 4 > 12) {
102 return H_PARAMETER;
103 }
104
105 spapr->nested.ptcr = ptcr; /* Save new partition table */
106
107 return H_SUCCESS;
108 }
109
h_tlb_invalidate(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)110 static target_ulong h_tlb_invalidate(PowerPCCPU *cpu,
111 SpaprMachineState *spapr,
112 target_ulong opcode,
113 target_ulong *args)
114 {
115 /*
116 * The spapr virtual hypervisor nested HV implementation retains no L2
117 * translation state except for TLB. And the TLB is always invalidated
118 * across L1<->L2 transitions, so nothing is required here.
119 */
120
121 return H_SUCCESS;
122 }
123
h_copy_tofrom_guest(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)124 static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu,
125 SpaprMachineState *spapr,
126 target_ulong opcode,
127 target_ulong *args)
128 {
129 /*
130 * This HCALL is not required, L1 KVM will take a slow path and walk the
131 * page tables manually to do the data copy.
132 */
133 return H_FUNCTION;
134 }
135
nested_save_state(struct nested_ppc_state * save,PowerPCCPU * cpu)136 static void nested_save_state(struct nested_ppc_state *save, PowerPCCPU *cpu)
137 {
138 CPUPPCState *env = &cpu->env;
139 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
140
141 memcpy(save->gpr, env->gpr, sizeof(save->gpr));
142
143 save->lr = env->lr;
144 save->ctr = env->ctr;
145 save->cfar = env->cfar;
146 save->msr = env->msr;
147 save->nip = env->nip;
148
149 save->cr = ppc_get_cr(env);
150 save->xer = cpu_read_xer(env);
151
152 save->lpcr = env->spr[SPR_LPCR];
153 save->lpidr = env->spr[SPR_LPIDR];
154 save->pcr = env->spr[SPR_PCR];
155 save->dpdes = env->spr[SPR_DPDES];
156 save->hfscr = env->spr[SPR_HFSCR];
157 save->srr0 = env->spr[SPR_SRR0];
158 save->srr1 = env->spr[SPR_SRR1];
159 save->sprg0 = env->spr[SPR_SPRG0];
160 save->sprg1 = env->spr[SPR_SPRG1];
161 save->sprg2 = env->spr[SPR_SPRG2];
162 save->sprg3 = env->spr[SPR_SPRG3];
163 save->pidr = env->spr[SPR_BOOKS_PID];
164 save->ppr = env->spr[SPR_PPR];
165
166 if (spapr_nested_api(spapr) == NESTED_API_PAPR) {
167 save->amor = env->spr[SPR_AMOR];
168 save->dawr0 = env->spr[SPR_DAWR0];
169 save->dawrx0 = env->spr[SPR_DAWRX0];
170 save->ciabr = env->spr[SPR_CIABR];
171 save->purr = env->spr[SPR_PURR];
172 save->spurr = env->spr[SPR_SPURR];
173 save->ic = env->spr[SPR_IC];
174 save->vtb = env->spr[SPR_VTB];
175 save->hdar = env->spr[SPR_HDAR];
176 save->hdsisr = env->spr[SPR_HDSISR];
177 save->heir = env->spr[SPR_HEIR];
178 save->asdr = env->spr[SPR_ASDR];
179 save->dawr1 = env->spr[SPR_DAWR1];
180 save->dawrx1 = env->spr[SPR_DAWRX1];
181 save->dexcr = env->spr[SPR_DEXCR];
182 save->hdexcr = env->spr[SPR_HDEXCR];
183 save->hashkeyr = env->spr[SPR_HASHKEYR];
184 save->hashpkeyr = env->spr[SPR_HASHPKEYR];
185 memcpy(save->vsr, env->vsr, sizeof(save->vsr));
186 save->ebbhr = env->spr[SPR_EBBHR];
187 save->tar = env->spr[SPR_TAR];
188 save->ebbrr = env->spr[SPR_EBBRR];
189 save->bescr = env->spr[SPR_BESCR];
190 save->iamr = env->spr[SPR_IAMR];
191 save->amr = env->spr[SPR_AMR];
192 save->uamor = env->spr[SPR_UAMOR];
193 save->dscr = env->spr[SPR_DSCR];
194 save->fscr = env->spr[SPR_FSCR];
195 save->pspb = env->spr[SPR_PSPB];
196 save->ctrl = env->spr[SPR_CTRL];
197 save->vrsave = env->spr[SPR_VRSAVE];
198 save->dar = env->spr[SPR_DAR];
199 save->dsisr = env->spr[SPR_DSISR];
200 save->pmc1 = env->spr[SPR_POWER_PMC1];
201 save->pmc2 = env->spr[SPR_POWER_PMC2];
202 save->pmc3 = env->spr[SPR_POWER_PMC3];
203 save->pmc4 = env->spr[SPR_POWER_PMC4];
204 save->pmc5 = env->spr[SPR_POWER_PMC5];
205 save->pmc6 = env->spr[SPR_POWER_PMC6];
206 save->mmcr0 = env->spr[SPR_POWER_MMCR0];
207 save->mmcr1 = env->spr[SPR_POWER_MMCR1];
208 save->mmcr2 = env->spr[SPR_POWER_MMCR2];
209 save->mmcra = env->spr[SPR_POWER_MMCRA];
210 save->sdar = env->spr[SPR_POWER_SDAR];
211 save->siar = env->spr[SPR_POWER_SIAR];
212 save->sier = env->spr[SPR_POWER_SIER];
213 save->vscr = ppc_get_vscr(env);
214 save->fpscr = env->fpscr;
215 } else if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) {
216 save->tb_offset = env->tb_env->tb_offset;
217 }
218 }
219
nested_post_load_state(CPUPPCState * env,CPUState * cs)220 static void nested_post_load_state(CPUPPCState *env, CPUState *cs)
221 {
222 /*
223 * compute hflags and possible interrupts.
224 */
225 hreg_compute_hflags(env);
226 ppc_maybe_interrupt(env);
227 /*
228 * Nested HV does not tag TLB entries between L1 and L2, so must
229 * flush on transition.
230 */
231 tlb_flush(cs);
232 env->reserve_addr = -1; /* Reset the reservation */
233 }
234
nested_load_state(PowerPCCPU * cpu,struct nested_ppc_state * load)235 static void nested_load_state(PowerPCCPU *cpu, struct nested_ppc_state *load)
236 {
237 CPUPPCState *env = &cpu->env;
238 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
239
240 memcpy(env->gpr, load->gpr, sizeof(env->gpr));
241
242 env->lr = load->lr;
243 env->ctr = load->ctr;
244 env->cfar = load->cfar;
245 env->msr = load->msr;
246 env->nip = load->nip;
247
248 ppc_set_cr(env, load->cr);
249 cpu_write_xer(env, load->xer);
250
251 env->spr[SPR_LPCR] = load->lpcr;
252 env->spr[SPR_LPIDR] = load->lpidr;
253 env->spr[SPR_PCR] = load->pcr;
254 env->spr[SPR_DPDES] = load->dpdes;
255 env->spr[SPR_HFSCR] = load->hfscr;
256 env->spr[SPR_SRR0] = load->srr0;
257 env->spr[SPR_SRR1] = load->srr1;
258 env->spr[SPR_SPRG0] = load->sprg0;
259 env->spr[SPR_SPRG1] = load->sprg1;
260 env->spr[SPR_SPRG2] = load->sprg2;
261 env->spr[SPR_SPRG3] = load->sprg3;
262 env->spr[SPR_BOOKS_PID] = load->pidr;
263 env->spr[SPR_PPR] = load->ppr;
264
265 if (spapr_nested_api(spapr) == NESTED_API_PAPR) {
266 env->spr[SPR_AMOR] = load->amor;
267 env->spr[SPR_DAWR0] = load->dawr0;
268 env->spr[SPR_DAWRX0] = load->dawrx0;
269 env->spr[SPR_CIABR] = load->ciabr;
270 env->spr[SPR_PURR] = load->purr;
271 env->spr[SPR_SPURR] = load->purr;
272 env->spr[SPR_IC] = load->ic;
273 env->spr[SPR_VTB] = load->vtb;
274 env->spr[SPR_HDAR] = load->hdar;
275 env->spr[SPR_HDSISR] = load->hdsisr;
276 env->spr[SPR_HEIR] = load->heir;
277 env->spr[SPR_ASDR] = load->asdr;
278 env->spr[SPR_DAWR1] = load->dawr1;
279 env->spr[SPR_DAWRX1] = load->dawrx1;
280 env->spr[SPR_DEXCR] = load->dexcr;
281 env->spr[SPR_HDEXCR] = load->hdexcr;
282 env->spr[SPR_HASHKEYR] = load->hashkeyr;
283 env->spr[SPR_HASHPKEYR] = load->hashpkeyr;
284 memcpy(env->vsr, load->vsr, sizeof(env->vsr));
285 env->spr[SPR_EBBHR] = load->ebbhr;
286 env->spr[SPR_TAR] = load->tar;
287 env->spr[SPR_EBBRR] = load->ebbrr;
288 env->spr[SPR_BESCR] = load->bescr;
289 env->spr[SPR_IAMR] = load->iamr;
290 env->spr[SPR_AMR] = load->amr;
291 env->spr[SPR_UAMOR] = load->uamor;
292 env->spr[SPR_DSCR] = load->dscr;
293 env->spr[SPR_FSCR] = load->fscr;
294 env->spr[SPR_PSPB] = load->pspb;
295 env->spr[SPR_CTRL] = load->ctrl;
296 env->spr[SPR_VRSAVE] = load->vrsave;
297 env->spr[SPR_DAR] = load->dar;
298 env->spr[SPR_DSISR] = load->dsisr;
299 env->spr[SPR_POWER_PMC1] = load->pmc1;
300 env->spr[SPR_POWER_PMC2] = load->pmc2;
301 env->spr[SPR_POWER_PMC3] = load->pmc3;
302 env->spr[SPR_POWER_PMC4] = load->pmc4;
303 env->spr[SPR_POWER_PMC5] = load->pmc5;
304 env->spr[SPR_POWER_PMC6] = load->pmc6;
305 env->spr[SPR_POWER_MMCR0] = load->mmcr0;
306 env->spr[SPR_POWER_MMCR1] = load->mmcr1;
307 env->spr[SPR_POWER_MMCR2] = load->mmcr2;
308 env->spr[SPR_POWER_MMCRA] = load->mmcra;
309 env->spr[SPR_POWER_SDAR] = load->sdar;
310 env->spr[SPR_POWER_SIAR] = load->siar;
311 env->spr[SPR_POWER_SIER] = load->sier;
312 ppc_store_vscr(env, load->vscr);
313 ppc_store_fpscr(env, load->fpscr);
314 } else if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) {
315 env->tb_env->tb_offset = load->tb_offset;
316 }
317 }
318
319 /*
320 * When this handler returns, the environment is switched to the L2 guest
321 * and TCG begins running that. spapr_exit_nested() performs the switch from
322 * L2 back to L1 and returns from the H_ENTER_NESTED hcall.
323 */
h_enter_nested(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)324 static target_ulong h_enter_nested(PowerPCCPU *cpu,
325 SpaprMachineState *spapr,
326 target_ulong opcode,
327 target_ulong *args)
328 {
329 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
330 CPUPPCState *env = &cpu->env;
331 CPUState *cs = CPU(cpu);
332 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
333 struct nested_ppc_state l2_state;
334 target_ulong hv_ptr = args[0];
335 target_ulong regs_ptr = args[1];
336 target_ulong hdec, now = cpu_ppc_load_tbl(env);
337 target_ulong lpcr, lpcr_mask;
338 struct kvmppc_hv_guest_state *hvstate;
339 struct kvmppc_hv_guest_state hv_state;
340 struct kvmppc_pt_regs *regs;
341 hwaddr len;
342
343 if (spapr->nested.ptcr == 0) {
344 return H_NOT_AVAILABLE;
345 }
346
347 len = sizeof(*hvstate);
348 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false,
349 MEMTXATTRS_UNSPECIFIED);
350 if (len != sizeof(*hvstate)) {
351 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false);
352 return H_PARAMETER;
353 }
354
355 memcpy(&hv_state, hvstate, len);
356
357 address_space_unmap(CPU(cpu)->as, hvstate, len, len, false);
358
359 /*
360 * We accept versions 1 and 2. Version 2 fields are unused because TCG
361 * does not implement DAWR*.
362 */
363 if (hv_state.version > HV_GUEST_STATE_VERSION) {
364 return H_PARAMETER;
365 }
366
367 if (hv_state.lpid == 0) {
368 return H_PARAMETER;
369 }
370
371 spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1);
372 if (!spapr_cpu->nested_host_state) {
373 return H_NO_MEM;
374 }
375
376 assert(env->spr[SPR_LPIDR] == 0);
377 assert(env->spr[SPR_DPDES] == 0);
378 nested_save_state(spapr_cpu->nested_host_state, cpu);
379
380 len = sizeof(*regs);
381 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false,
382 MEMTXATTRS_UNSPECIFIED);
383 if (!regs || len != sizeof(*regs)) {
384 address_space_unmap(CPU(cpu)->as, regs, len, 0, false);
385 g_free(spapr_cpu->nested_host_state);
386 return H_P2;
387 }
388
389 len = sizeof(l2_state.gpr);
390 assert(len == sizeof(regs->gpr));
391 memcpy(l2_state.gpr, regs->gpr, len);
392
393 l2_state.lr = regs->link;
394 l2_state.ctr = regs->ctr;
395 l2_state.xer = regs->xer;
396 l2_state.cr = regs->ccr;
397 l2_state.msr = regs->msr;
398 l2_state.nip = regs->nip;
399
400 address_space_unmap(CPU(cpu)->as, regs, len, len, false);
401
402 l2_state.cfar = hv_state.cfar;
403 l2_state.lpidr = hv_state.lpid;
404
405 lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER;
406 lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask);
407 lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE;
408 lpcr &= ~LPCR_LPES0;
409 l2_state.lpcr = lpcr & pcc->lpcr_mask;
410
411 l2_state.pcr = hv_state.pcr;
412 /* hv_state.amor is not used */
413 l2_state.dpdes = hv_state.dpdes;
414 l2_state.hfscr = hv_state.hfscr;
415 /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/
416 l2_state.srr0 = hv_state.srr0;
417 l2_state.srr1 = hv_state.srr1;
418 l2_state.sprg0 = hv_state.sprg[0];
419 l2_state.sprg1 = hv_state.sprg[1];
420 l2_state.sprg2 = hv_state.sprg[2];
421 l2_state.sprg3 = hv_state.sprg[3];
422 l2_state.pidr = hv_state.pidr;
423 l2_state.ppr = hv_state.ppr;
424 l2_state.tb_offset = env->tb_env->tb_offset + hv_state.tb_offset;
425
426 /*
427 * Switch to the nested guest environment and start the "hdec" timer.
428 */
429 nested_load_state(cpu, &l2_state);
430 nested_post_load_state(env, cs);
431
432 hdec = hv_state.hdec_expiry - now;
433 cpu_ppc_hdecr_init(env);
434 cpu_ppc_store_hdecr(env, hdec);
435
436 /*
437 * The hv_state.vcpu_token is not needed. It is used by the KVM
438 * implementation to remember which L2 vCPU last ran on which physical
439 * CPU so as to invalidate process scope translations if it is moved
440 * between physical CPUs. For now TLBs are always flushed on L1<->L2
441 * transitions so this is not a problem.
442 *
443 * Could validate that the same vcpu_token does not attempt to run on
444 * different L1 vCPUs at the same time, but that would be a L1 KVM bug
445 * and it's not obviously worth a new data structure to do it.
446 */
447
448 spapr_cpu->in_nested = true;
449
450 /*
451 * The spapr hcall helper sets env->gpr[3] to the return value, but at
452 * this point the L1 is not returning from the hcall but rather we
453 * start running the L2, so r3 must not be clobbered, so return env->gpr[3]
454 * to leave it unchanged.
455 */
456 return env->gpr[3];
457 }
458
spapr_exit_nested_hv(PowerPCCPU * cpu,int excp)459 static void spapr_exit_nested_hv(PowerPCCPU *cpu, int excp)
460 {
461 CPUPPCState *env = &cpu->env;
462 CPUState *cs = CPU(cpu);
463 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
464 struct nested_ppc_state l2_state;
465 target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4];
466 target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5];
467 target_ulong hsrr0, hsrr1, hdar, asdr, hdsisr;
468 struct kvmppc_hv_guest_state *hvstate;
469 struct kvmppc_pt_regs *regs;
470 hwaddr len;
471
472 nested_save_state(&l2_state, cpu);
473 hsrr0 = env->spr[SPR_HSRR0];
474 hsrr1 = env->spr[SPR_HSRR1];
475 hdar = env->spr[SPR_HDAR];
476 hdsisr = env->spr[SPR_HDSISR];
477 asdr = env->spr[SPR_ASDR];
478
479 /*
480 * Switch back to the host environment (including for any error).
481 */
482 assert(env->spr[SPR_LPIDR] != 0);
483 nested_load_state(cpu, spapr_cpu->nested_host_state);
484 nested_post_load_state(env, cs);
485 env->gpr[3] = env->excp_vectors[excp]; /* hcall return value */
486
487 cpu_ppc_hdecr_exit(env);
488
489 spapr_cpu->in_nested = false;
490
491 g_free(spapr_cpu->nested_host_state);
492 spapr_cpu->nested_host_state = NULL;
493
494 len = sizeof(*hvstate);
495 hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true,
496 MEMTXATTRS_UNSPECIFIED);
497 if (len != sizeof(*hvstate)) {
498 address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true);
499 env->gpr[3] = H_PARAMETER;
500 return;
501 }
502
503 hvstate->cfar = l2_state.cfar;
504 hvstate->lpcr = l2_state.lpcr;
505 hvstate->pcr = l2_state.pcr;
506 hvstate->dpdes = l2_state.dpdes;
507 hvstate->hfscr = l2_state.hfscr;
508
509 if (excp == POWERPC_EXCP_HDSI) {
510 hvstate->hdar = hdar;
511 hvstate->hdsisr = hdsisr;
512 hvstate->asdr = asdr;
513 } else if (excp == POWERPC_EXCP_HISI) {
514 hvstate->asdr = asdr;
515 }
516
517 /* HEIR should be implemented for HV mode and saved here. */
518 hvstate->srr0 = l2_state.srr0;
519 hvstate->srr1 = l2_state.srr1;
520 hvstate->sprg[0] = l2_state.sprg0;
521 hvstate->sprg[1] = l2_state.sprg1;
522 hvstate->sprg[2] = l2_state.sprg2;
523 hvstate->sprg[3] = l2_state.sprg3;
524 hvstate->pidr = l2_state.pidr;
525 hvstate->ppr = l2_state.ppr;
526
527 /* Is it okay to specify write length larger than actual data written? */
528 address_space_unmap(CPU(cpu)->as, hvstate, len, len, true);
529
530 len = sizeof(*regs);
531 regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true,
532 MEMTXATTRS_UNSPECIFIED);
533 if (!regs || len != sizeof(*regs)) {
534 address_space_unmap(CPU(cpu)->as, regs, len, 0, true);
535 env->gpr[3] = H_P2;
536 return;
537 }
538
539 len = sizeof(env->gpr);
540 assert(len == sizeof(regs->gpr));
541 memcpy(regs->gpr, l2_state.gpr, len);
542
543 regs->link = l2_state.lr;
544 regs->ctr = l2_state.ctr;
545 regs->xer = l2_state.xer;
546 regs->ccr = l2_state.cr;
547
548 if (excp == POWERPC_EXCP_MCHECK ||
549 excp == POWERPC_EXCP_RESET ||
550 excp == POWERPC_EXCP_SYSCALL) {
551 regs->nip = l2_state.srr0;
552 regs->msr = l2_state.srr1 & env->msr_mask;
553 } else {
554 regs->nip = hsrr0;
555 regs->msr = hsrr1 & env->msr_mask;
556 }
557
558 /* Is it okay to specify write length larger than actual data written? */
559 address_space_unmap(CPU(cpu)->as, regs, len, len, true);
560 }
561
spapr_nested_vcpu_check(SpaprMachineStateNestedGuest * guest,target_ulong vcpuid,bool inoutbuf)562 static bool spapr_nested_vcpu_check(SpaprMachineStateNestedGuest *guest,
563 target_ulong vcpuid, bool inoutbuf)
564 {
565 struct SpaprMachineStateNestedGuestVcpu *vcpu;
566 /*
567 * Perform sanity checks for the provided vcpuid of a guest.
568 * For now, ensure its valid, allocated and enabled for use.
569 */
570
571 if (vcpuid >= PAPR_NESTED_GUEST_VCPU_MAX) {
572 return false;
573 }
574
575 if (!(vcpuid < guest->nr_vcpus)) {
576 return false;
577 }
578
579 vcpu = &guest->vcpus[vcpuid];
580 if (!vcpu->enabled) {
581 return false;
582 }
583
584 if (!inoutbuf) {
585 return true;
586 }
587
588 /* Check to see if the in/out buffers are registered */
589 if (vcpu->runbufin.addr && vcpu->runbufout.addr) {
590 return true;
591 }
592
593 return false;
594 }
595
get_vcpu_state_ptr(SpaprMachineState * spapr,SpaprMachineStateNestedGuest * guest,target_ulong vcpuid)596 static void *get_vcpu_state_ptr(SpaprMachineState *spapr,
597 SpaprMachineStateNestedGuest *guest,
598 target_ulong vcpuid)
599 {
600 assert(spapr_nested_vcpu_check(guest, vcpuid, false));
601 return &guest->vcpus[vcpuid].state;
602 }
603
get_vcpu_ptr(SpaprMachineState * spapr,SpaprMachineStateNestedGuest * guest,target_ulong vcpuid)604 static void *get_vcpu_ptr(SpaprMachineState *spapr,
605 SpaprMachineStateNestedGuest *guest,
606 target_ulong vcpuid)
607 {
608 assert(spapr_nested_vcpu_check(guest, vcpuid, false));
609 return &guest->vcpus[vcpuid];
610 }
611
get_guest_ptr(SpaprMachineState * spapr,SpaprMachineStateNestedGuest * guest,target_ulong vcpuid)612 static void *get_guest_ptr(SpaprMachineState *spapr,
613 SpaprMachineStateNestedGuest *guest,
614 target_ulong vcpuid)
615 {
616 return guest; /* for GSBE_NESTED */
617 }
618
get_machine_ptr(SpaprMachineState * spapr,SpaprMachineStateNestedGuest * guest,target_ulong vcpuid)619 static void *get_machine_ptr(SpaprMachineState *spapr,
620 SpaprMachineStateNestedGuest *guest,
621 target_ulong vcpuid)
622 {
623 /* ignore guest and vcpuid for this */
624 return &spapr->nested;
625 }
626
627 /*
628 * set=1 means the L1 is trying to set some state
629 * set=0 means the L1 is trying to get some state
630 */
copy_state_8to8(void * a,void * b,bool set)631 static void copy_state_8to8(void *a, void *b, bool set)
632 {
633 /* set takes from the Big endian element_buf and sets internal buffer */
634
635 if (set) {
636 *(uint64_t *)a = be64_to_cpu(*(uint64_t *)b);
637 } else {
638 *(uint64_t *)b = cpu_to_be64(*(uint64_t *)a);
639 }
640 }
641
copy_state_4to4(void * a,void * b,bool set)642 static void copy_state_4to4(void *a, void *b, bool set)
643 {
644 if (set) {
645 *(uint32_t *)a = be32_to_cpu(*(uint32_t *)b);
646 } else {
647 *(uint32_t *)b = cpu_to_be32(*((uint32_t *)a));
648 }
649 }
650
copy_state_16to16(void * a,void * b,bool set)651 static void copy_state_16to16(void *a, void *b, bool set)
652 {
653 uint64_t *src, *dst;
654
655 if (set) {
656 src = b;
657 dst = a;
658
659 dst[1] = be64_to_cpu(src[0]);
660 dst[0] = be64_to_cpu(src[1]);
661 } else {
662 src = a;
663 dst = b;
664
665 dst[1] = cpu_to_be64(src[0]);
666 dst[0] = cpu_to_be64(src[1]);
667 }
668 }
669
copy_state_4to8(void * a,void * b,bool set)670 static void copy_state_4to8(void *a, void *b, bool set)
671 {
672 if (set) {
673 *(uint64_t *)a = (uint64_t) be32_to_cpu(*(uint32_t *)b);
674 } else {
675 *(uint32_t *)b = cpu_to_be32((uint32_t) (*((uint64_t *)a)));
676 }
677 }
678
copy_state_pagetbl(void * a,void * b,bool set)679 static void copy_state_pagetbl(void *a, void *b, bool set)
680 {
681 uint64_t *pagetbl;
682 uint64_t *buf; /* 3 double words */
683 uint64_t rts;
684
685 assert(set);
686
687 pagetbl = a;
688 buf = b;
689
690 *pagetbl = be64_to_cpu(buf[0]);
691 /* as per ISA section 6.7.6.1 */
692 *pagetbl |= PATE0_HR; /* Host Radix bit is 1 */
693
694 /* RTS */
695 rts = be64_to_cpu(buf[1]);
696 assert(rts == 52);
697 rts = rts - 31; /* since radix tree size = 2^(RTS+31) */
698 *pagetbl |= ((rts & 0x7) << 5); /* RTS2 is bit 56:58 */
699 *pagetbl |= (((rts >> 3) & 0x3) << 61); /* RTS1 is bit 1:2 */
700
701 /* RPDS {Size = 2^(RPDS+3) , RPDS >=5} */
702 *pagetbl |= 63 - clz64(be64_to_cpu(buf[2])) - 3;
703 }
704
copy_state_proctbl(void * a,void * b,bool set)705 static void copy_state_proctbl(void *a, void *b, bool set)
706 {
707 uint64_t *proctbl;
708 uint64_t *buf; /* 2 double words */
709
710 assert(set);
711
712 proctbl = a;
713 buf = b;
714 /* PRTB: Process Table Base */
715 *proctbl = be64_to_cpu(buf[0]);
716 /* PRTS: Process Table Size = 2^(12+PRTS) */
717 if (be64_to_cpu(buf[1]) == (1ULL << 12)) {
718 *proctbl |= 0;
719 } else if (be64_to_cpu(buf[1]) == (1ULL << 24)) {
720 *proctbl |= 12;
721 } else {
722 g_assert_not_reached();
723 }
724 }
725
copy_state_runbuf(void * a,void * b,bool set)726 static void copy_state_runbuf(void *a, void *b, bool set)
727 {
728 uint64_t *buf; /* 2 double words */
729 struct SpaprMachineStateNestedGuestVcpuRunBuf *runbuf;
730
731 assert(set);
732
733 runbuf = a;
734 buf = b;
735
736 runbuf->addr = be64_to_cpu(buf[0]);
737 assert(runbuf->addr);
738
739 /* per spec */
740 assert(be64_to_cpu(buf[1]) <= 16384);
741
742 /*
743 * This will also hit in the input buffer but should be fine for
744 * now. If not we can split this function.
745 */
746 assert(be64_to_cpu(buf[1]) >= VCPU_OUT_BUF_MIN_SZ);
747
748 runbuf->size = be64_to_cpu(buf[1]);
749 }
750
751 /* tell the L1 how big we want the output vcpu run buffer */
out_buf_min_size(void * a,void * b,bool set)752 static void out_buf_min_size(void *a, void *b, bool set)
753 {
754 uint64_t *buf; /* 1 double word */
755
756 assert(!set);
757
758 buf = b;
759
760 buf[0] = cpu_to_be64(VCPU_OUT_BUF_MIN_SZ);
761 }
762
copy_logical_pvr(void * a,void * b,bool set)763 static void copy_logical_pvr(void *a, void *b, bool set)
764 {
765 SpaprMachineStateNestedGuest *guest;
766 uint32_t *buf; /* 1 word */
767 uint32_t *pvr_logical_ptr;
768 uint32_t pvr_logical;
769 target_ulong pcr = 0;
770
771 pvr_logical_ptr = a;
772 buf = b;
773
774 if (!set) {
775 buf[0] = cpu_to_be32(*pvr_logical_ptr);
776 return;
777 }
778
779 pvr_logical = be32_to_cpu(buf[0]);
780
781 *pvr_logical_ptr = pvr_logical;
782
783 if (*pvr_logical_ptr) {
784 switch (*pvr_logical_ptr) {
785 case CPU_POWERPC_LOGICAL_3_10_P11:
786 case CPU_POWERPC_LOGICAL_3_10:
787 pcr = PCR_COMPAT_3_10 | PCR_COMPAT_3_00;
788 break;
789 case CPU_POWERPC_LOGICAL_3_00:
790 pcr = PCR_COMPAT_3_00;
791 break;
792 default:
793 qemu_log_mask(LOG_GUEST_ERROR,
794 "Could not set PCR for LPVR=0x%08x\n",
795 *pvr_logical_ptr);
796 return;
797 }
798 }
799
800 guest = container_of(pvr_logical_ptr,
801 struct SpaprMachineStateNestedGuest,
802 pvr_logical);
803 for (int i = 0; i < guest->nr_vcpus; i++) {
804 guest->vcpus[i].state.pcr = ~pcr | HVMASK_PCR;
805 }
806 }
807
copy_tb_offset(void * a,void * b,bool set)808 static void copy_tb_offset(void *a, void *b, bool set)
809 {
810 SpaprMachineStateNestedGuest *guest;
811 uint64_t *buf; /* 1 double word */
812 uint64_t *tb_offset_ptr;
813 uint64_t tb_offset;
814
815 tb_offset_ptr = a;
816 buf = b;
817
818 if (!set) {
819 buf[0] = cpu_to_be64(*tb_offset_ptr);
820 return;
821 }
822
823 tb_offset = be64_to_cpu(buf[0]);
824 /* need to copy this to the individual tb_offset for each vcpu */
825 guest = container_of(tb_offset_ptr,
826 struct SpaprMachineStateNestedGuest,
827 tb_offset);
828 for (int i = 0; i < guest->nr_vcpus; i++) {
829 guest->vcpus[i].tb_offset = tb_offset;
830 }
831 }
832
copy_state_hdecr(void * a,void * b,bool set)833 static void copy_state_hdecr(void *a, void *b, bool set)
834 {
835 uint64_t *buf; /* 1 double word */
836 uint64_t *hdecr_expiry_tb;
837
838 hdecr_expiry_tb = a;
839 buf = b;
840
841 if (!set) {
842 buf[0] = cpu_to_be64(*hdecr_expiry_tb);
843 return;
844 }
845
846 *hdecr_expiry_tb = be64_to_cpu(buf[0]);
847 }
848
849 struct guest_state_element_type guest_state_element_types[] = {
850 GUEST_STATE_ELEMENT_NOP(GSB_HV_VCPU_IGNORED_ID, 0),
851 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR0, gpr[0]),
852 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR1, gpr[1]),
853 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR2, gpr[2]),
854 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR3, gpr[3]),
855 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR4, gpr[4]),
856 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR5, gpr[5]),
857 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR6, gpr[6]),
858 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR7, gpr[7]),
859 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR8, gpr[8]),
860 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR9, gpr[9]),
861 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR10, gpr[10]),
862 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR11, gpr[11]),
863 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR12, gpr[12]),
864 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR13, gpr[13]),
865 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR14, gpr[14]),
866 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR15, gpr[15]),
867 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR16, gpr[16]),
868 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR17, gpr[17]),
869 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR18, gpr[18]),
870 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR19, gpr[19]),
871 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR20, gpr[20]),
872 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR21, gpr[21]),
873 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR22, gpr[22]),
874 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR23, gpr[23]),
875 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR24, gpr[24]),
876 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR25, gpr[25]),
877 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR26, gpr[26]),
878 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR27, gpr[27]),
879 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR28, gpr[28]),
880 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR29, gpr[29]),
881 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR30, gpr[30]),
882 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_GPR31, gpr[31]),
883 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_NIA, nip),
884 GSE_ENV_DWM(GSB_VCPU_SPR_MSR, msr, HVMASK_MSR),
885 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTR, ctr),
886 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_LR, lr),
887 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_XER, xer),
888 GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_CR, cr),
889 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_MMCR3),
890 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER2),
891 GUEST_STATE_ELEMENT_NOP_DW(GSB_VCPU_SPR_SIER3),
892 GUEST_STATE_ELEMENT_NOP_W(GSB_VCPU_SPR_WORT),
893 GSE_ENV_DWM(GSB_VCPU_SPR_LPCR, lpcr, HVMASK_LPCR),
894 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMOR, amor),
895 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HFSCR, hfscr),
896 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR0, dawr0),
897 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX0, dawrx0),
898 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CIABR, ciabr),
899 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PURR, purr),
900 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPURR, spurr),
901 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IC, ic),
902 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_VTB, vtb),
903 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HDAR, hdar),
904 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HDSISR, hdsisr),
905 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_HEIR, heir),
906 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_ASDR, asdr),
907 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR0, srr0),
908 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SRR1, srr1),
909 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG0, sprg0),
910 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG1, sprg1),
911 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG2, sprg2),
912 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SPRG3, sprg3),
913 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PIDR, pidr),
914 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CFAR, cfar),
915 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_PPR, ppr),
916 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAWR1, dawr1),
917 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DAWRX1, dawrx1),
918 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DEXCR, dexcr),
919 GSE_ENV_DWM(GSB_VCPU_SPR_HDEXCR, hdexcr, HVMASK_HDEXCR),
920 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHKEYR, hashkeyr),
921 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_HASHPKEYR, hashpkeyr),
922 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR0, vsr[0]),
923 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR1, vsr[1]),
924 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR2, vsr[2]),
925 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR3, vsr[3]),
926 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR4, vsr[4]),
927 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR5, vsr[5]),
928 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR6, vsr[6]),
929 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR7, vsr[7]),
930 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR8, vsr[8]),
931 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR9, vsr[9]),
932 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR10, vsr[10]),
933 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR11, vsr[11]),
934 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR12, vsr[12]),
935 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR13, vsr[13]),
936 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR14, vsr[14]),
937 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR15, vsr[15]),
938 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR16, vsr[16]),
939 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR17, vsr[17]),
940 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR18, vsr[18]),
941 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR19, vsr[19]),
942 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR20, vsr[20]),
943 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR21, vsr[21]),
944 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR22, vsr[22]),
945 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR23, vsr[23]),
946 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR24, vsr[24]),
947 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR25, vsr[25]),
948 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR26, vsr[26]),
949 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR27, vsr[27]),
950 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR28, vsr[28]),
951 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR29, vsr[29]),
952 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR30, vsr[30]),
953 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR31, vsr[31]),
954 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR32, vsr[32]),
955 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR33, vsr[33]),
956 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR34, vsr[34]),
957 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR35, vsr[35]),
958 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR36, vsr[36]),
959 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR37, vsr[37]),
960 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR38, vsr[38]),
961 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR39, vsr[39]),
962 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR40, vsr[40]),
963 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR41, vsr[41]),
964 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR42, vsr[42]),
965 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR43, vsr[43]),
966 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR44, vsr[44]),
967 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR45, vsr[45]),
968 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR46, vsr[46]),
969 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR47, vsr[47]),
970 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR48, vsr[48]),
971 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR49, vsr[49]),
972 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR50, vsr[50]),
973 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR51, vsr[51]),
974 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR52, vsr[52]),
975 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR53, vsr[53]),
976 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR54, vsr[54]),
977 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR55, vsr[55]),
978 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR56, vsr[56]),
979 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR57, vsr[57]),
980 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR58, vsr[58]),
981 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR59, vsr[59]),
982 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR60, vsr[60]),
983 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR61, vsr[61]),
984 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR62, vsr[62]),
985 GUEST_STATE_ELEMENT_ENV_QW(GSB_VCPU_SPR_VSR63, vsr[63]),
986 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBHR, ebbhr),
987 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_TAR, tar),
988 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_EBBRR, ebbrr),
989 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_BESCR, bescr),
990 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_IAMR, iamr),
991 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_AMR, amr),
992 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_UAMOR, uamor),
993 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DSCR, dscr),
994 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FSCR, fscr),
995 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PSPB, pspb),
996 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_CTRL, ctrl),
997 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DPDES, dpdes),
998 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_VRSAVE, vrsave),
999 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_DAR, dar),
1000 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_DSISR, dsisr),
1001 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC1, pmc1),
1002 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC2, pmc2),
1003 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC3, pmc3),
1004 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC4, pmc4),
1005 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC5, pmc5),
1006 GUEST_STATE_ELEMENT_ENV_W(GSB_VCPU_SPR_PMC6, pmc6),
1007 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR0, mmcr0),
1008 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR1, mmcr1),
1009 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCR2, mmcr2),
1010 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_MMCRA, mmcra),
1011 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SDAR , sdar),
1012 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIAR , siar),
1013 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_SIER , sier),
1014 GUEST_STATE_ELEMENT_ENV_WW(GSB_VCPU_SPR_VSCR, vscr),
1015 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_SPR_FPSCR, fpscr),
1016 GUEST_STATE_ELEMENT_ENV_DW(GSB_VCPU_DEC_EXPIRE_TB, dec_expiry_tb),
1017 GSBE_NESTED(GSB_PART_SCOPED_PAGETBL, 0x18, parttbl[0], copy_state_pagetbl),
1018 GSBE_NESTED(GSB_PROCESS_TBL, 0x10, parttbl[1], copy_state_proctbl),
1019 GSBE_NESTED(GSB_VCPU_LPVR, 0x4, pvr_logical, copy_logical_pvr),
1020 GSBE_NESTED_MSK(GSB_TB_OFFSET, 0x8, tb_offset, copy_tb_offset,
1021 HVMASK_TB_OFFSET),
1022 GSBE_NESTED_VCPU(GSB_VCPU_IN_BUFFER, 0x10, runbufin, copy_state_runbuf),
1023 GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUFFER, 0x10, runbufout, copy_state_runbuf),
1024 GSBE_NESTED_VCPU(GSB_VCPU_OUT_BUF_MIN_SZ, 0x8, runbufout, out_buf_min_size),
1025 GSBE_NESTED_VCPU(GSB_VCPU_HDEC_EXPIRY_TB, 0x8, hdecr_expiry_tb,
1026 copy_state_hdecr),
1027 GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_HEAP_INUSE, l0_guest_heap_inuse),
1028 GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_HEAP_MAX, l0_guest_heap_max),
1029 GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_SIZE_INUSE,
1030 l0_guest_pgtable_size_inuse),
1031 GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_SIZE_MAX,
1032 l0_guest_pgtable_size_max),
1033 GSBE_NESTED_MACHINE_DW(GSB_L0_GUEST_PGTABLE_RECLAIMED,
1034 l0_guest_pgtable_reclaimed),
1035 };
1036
spapr_nested_gsb_init(void)1037 void spapr_nested_gsb_init(void)
1038 {
1039 struct guest_state_element_type *type;
1040
1041 /* Init the guest state elements lookup table, flags for now */
1042 for (int i = 0; i < ARRAY_SIZE(guest_state_element_types); i++) {
1043 type = &guest_state_element_types[i];
1044
1045 assert(type->id <= GSB_LAST);
1046 if (type->id >= GSB_VCPU_SPR_HDAR)
1047 /* 0xf000 - 0xf005 Thread + RO */
1048 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY;
1049 else if (type->id >= GSB_VCPU_IN_BUFFER)
1050 /* 0x0c00 - 0xf000 Thread + RW */
1051 type->flags = 0;
1052 else if (type->id >= GSB_L0_GUEST_HEAP_INUSE)
1053
1054 /*0x0800 - 0x0804 Hostwide Counters + RO */
1055 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE |
1056 GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY;
1057 else if (type->id >= GSB_VCPU_LPVR)
1058 /* 0x0003 - 0x07ff Guest + RW */
1059 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE;
1060 else if (type->id >= GSB_HV_VCPU_STATE_SIZE)
1061 /* 0x0001 - 0x0002 Guest + RO */
1062 type->flags = GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY |
1063 GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE;
1064 }
1065 }
1066
guest_state_element_next(struct guest_state_element * element,int64_t * len,int64_t * num_elements)1067 static struct guest_state_element *guest_state_element_next(
1068 struct guest_state_element *element,
1069 int64_t *len,
1070 int64_t *num_elements)
1071 {
1072 uint16_t size;
1073
1074 /* size is of element->value[] only. Not whole guest_state_element */
1075 size = be16_to_cpu(element->size);
1076
1077 if (len) {
1078 *len -= size + offsetof(struct guest_state_element, value);
1079 }
1080
1081 if (num_elements) {
1082 *num_elements -= 1;
1083 }
1084
1085 return (struct guest_state_element *)(element->value + size);
1086 }
1087
1088 static
guest_state_element_type_find(uint16_t id)1089 struct guest_state_element_type *guest_state_element_type_find(uint16_t id)
1090 {
1091 int i;
1092
1093 for (i = 0; i < ARRAY_SIZE(guest_state_element_types); i++)
1094 if (id == guest_state_element_types[i].id) {
1095 return &guest_state_element_types[i];
1096 }
1097
1098 return NULL;
1099 }
1100
log_element(struct guest_state_element * element,struct guest_state_request * gsr)1101 static void log_element(struct guest_state_element *element,
1102 struct guest_state_request *gsr)
1103 {
1104 qemu_log_mask(LOG_GUEST_ERROR, "h_guest_%s_state id:0x%04x size:0x%04x",
1105 gsr->flags & GUEST_STATE_REQUEST_SET ? "set" : "get",
1106 be16_to_cpu(element->id), be16_to_cpu(element->size));
1107 qemu_log_mask(LOG_GUEST_ERROR, "buf:0x%016"PRIx64" ...\n",
1108 be64_to_cpu(*(uint64_t *)element->value));
1109 }
1110
guest_state_request_check(struct guest_state_request * gsr)1111 static bool guest_state_request_check(struct guest_state_request *gsr)
1112 {
1113 int64_t num_elements, len = gsr->len;
1114 struct guest_state_buffer *gsb = gsr->gsb;
1115 struct guest_state_element *element;
1116 struct guest_state_element_type *type;
1117 uint16_t id, size;
1118
1119 /* gsb->num_elements = 0 == 32 bits long */
1120 assert(len >= 4);
1121
1122 num_elements = be32_to_cpu(gsb->num_elements);
1123 element = gsb->elements;
1124 len -= sizeof(gsb->num_elements);
1125
1126 /* Walk the buffer to validate the length */
1127 while (num_elements) {
1128
1129 id = be16_to_cpu(element->id);
1130 size = be16_to_cpu(element->size);
1131
1132 if (false) {
1133 log_element(element, gsr);
1134 }
1135 /* buffer size too small */
1136 if (len < 0) {
1137 return false;
1138 }
1139
1140 type = guest_state_element_type_find(id);
1141 if (!type) {
1142 qemu_log_mask(LOG_GUEST_ERROR, "Element ID %04x unknown\n", id);
1143 log_element(element, gsr);
1144 return false;
1145 }
1146
1147 if (id == GSB_HV_VCPU_IGNORED_ID) {
1148 goto next_element;
1149 }
1150
1151 if (size != type->size) {
1152 qemu_log_mask(LOG_GUEST_ERROR, "Size mismatch. Element ID:%04x."
1153 "Size Exp:%i Got:%i\n", id, type->size, size);
1154 log_element(element, gsr);
1155 return false;
1156 }
1157
1158 if ((type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_READ_ONLY) &&
1159 (gsr->flags & GUEST_STATE_REQUEST_SET)) {
1160 qemu_log_mask(LOG_GUEST_ERROR, "Trying to set a read-only Element "
1161 "ID:%04x.\n", id);
1162 return false;
1163 }
1164
1165 if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_HOST_WIDE) {
1166 /* Hostwide elements cant be clubbed with other types */
1167 if (!(gsr->flags & GUEST_STATE_REQUEST_HOST_WIDE)) {
1168 qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a host wide "
1169 "Element ID:%04x.\n", id);
1170 return false;
1171 }
1172 } else if (type->flags & GUEST_STATE_ELEMENT_TYPE_FLAG_GUEST_WIDE) {
1173 /* guest wide element type */
1174 if (!(gsr->flags & GUEST_STATE_REQUEST_GUEST_WIDE)) {
1175 qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a guest wide "
1176 "Element ID:%04x.\n", id);
1177 return false;
1178 }
1179 } else {
1180 /* thread wide element type */
1181 if (gsr->flags & (GUEST_STATE_REQUEST_GUEST_WIDE |
1182 GUEST_STATE_REQUEST_HOST_WIDE)) {
1183 qemu_log_mask(LOG_GUEST_ERROR, "trying to get/set a thread wide"
1184 " Element ID:%04x.\n", id);
1185 return false;
1186 }
1187 }
1188 next_element:
1189 element = guest_state_element_next(element, &len, &num_elements);
1190
1191 }
1192 return true;
1193 }
1194
is_gsr_invalid(struct guest_state_request * gsr,struct guest_state_element * element,struct guest_state_element_type * type)1195 static bool is_gsr_invalid(struct guest_state_request *gsr,
1196 struct guest_state_element *element,
1197 struct guest_state_element_type *type)
1198 {
1199 if ((gsr->flags & GUEST_STATE_REQUEST_SET) &&
1200 (*(uint64_t *)(element->value) & ~(type->mask))) {
1201 log_element(element, gsr);
1202 qemu_log_mask(LOG_GUEST_ERROR, "L1 can't set reserved bits "
1203 "(allowed mask: 0x%08"PRIx64")\n", type->mask);
1204 return true;
1205 }
1206 return false;
1207 }
1208
h_guest_get_capabilities(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1209 static target_ulong h_guest_get_capabilities(PowerPCCPU *cpu,
1210 SpaprMachineState *spapr,
1211 target_ulong opcode,
1212 target_ulong *args)
1213 {
1214 CPUPPCState *env = &cpu->env;
1215 target_ulong flags = args[0];
1216
1217 if (flags) { /* don't handle any flags capabilities for now */
1218 return H_PARAMETER;
1219 }
1220
1221 /* P11 capabilities */
1222 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0,
1223 spapr->max_compat_pvr)) {
1224 env->gpr[4] |= H_GUEST_CAPABILITIES_P11_MODE;
1225 }
1226
1227 /* P10 capabilities */
1228 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
1229 spapr->max_compat_pvr)) {
1230 env->gpr[4] |= H_GUEST_CAPABILITIES_P10_MODE;
1231 }
1232
1233 /* P9 capabilities */
1234 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
1235 spapr->max_compat_pvr)) {
1236 env->gpr[4] |= H_GUEST_CAPABILITIES_P9_MODE;
1237 }
1238
1239 return H_SUCCESS;
1240 }
1241
h_guest_set_capabilities(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1242 static target_ulong h_guest_set_capabilities(PowerPCCPU *cpu,
1243 SpaprMachineState *spapr,
1244 target_ulong opcode,
1245 target_ulong *args)
1246 {
1247 CPUPPCState *env = &cpu->env;
1248 target_ulong flags = args[0];
1249 target_ulong capabilities = args[1];
1250 env->gpr[4] = 0;
1251
1252 if (flags) { /* don't handle any flags capabilities for now */
1253 return H_PARAMETER;
1254 }
1255
1256 if (capabilities & H_GUEST_CAPABILITIES_COPY_MEM) {
1257 env->gpr[4] = 1;
1258 return H_P2; /* isn't supported */
1259 }
1260
1261 /*
1262 * If there are no capabilities configured, set the R5 to the index of
1263 * the first supported Power Processor Mode
1264 */
1265 if (!capabilities) {
1266 env->gpr[4] = 1;
1267
1268 /* set R5 to the first supported Power Processor Mode */
1269 if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10_P11, 0,
1270 spapr->max_compat_pvr)) {
1271 env->gpr[5] = H_GUEST_CAP_P11_MODE_BMAP;
1272 } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_10, 0,
1273 spapr->max_compat_pvr)) {
1274 env->gpr[5] = H_GUEST_CAP_P10_MODE_BMAP;
1275 } else if (ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
1276 spapr->max_compat_pvr)) {
1277 env->gpr[5] = H_GUEST_CAP_P9_MODE_BMAP;
1278 }
1279
1280 return H_P2;
1281 }
1282
1283 /*
1284 * If an invalid capability is set, R5 should contain the index of the
1285 * invalid capability bit
1286 */
1287 if (capabilities & ~H_GUEST_CAP_VALID_MASK) {
1288 env->gpr[4] = 1;
1289
1290 /* Set R5 to the index of the invalid capability */
1291 env->gpr[5] = 63 - ctz64(capabilities);
1292
1293 return H_P2;
1294 }
1295
1296 if (!spapr->nested.capabilities_set) {
1297 spapr->nested.capabilities_set = true;
1298 spapr->nested.pvr_base = env->spr[SPR_PVR];
1299 return H_SUCCESS;
1300 } else {
1301 return H_STATE;
1302 }
1303 }
1304
1305 static void
destroy_guest_helper(gpointer value)1306 destroy_guest_helper(gpointer value)
1307 {
1308 struct SpaprMachineStateNestedGuest *guest = value;
1309 g_free(guest->vcpus);
1310 g_free(guest);
1311 }
1312
h_guest_create(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1313 static target_ulong h_guest_create(PowerPCCPU *cpu,
1314 SpaprMachineState *spapr,
1315 target_ulong opcode,
1316 target_ulong *args)
1317 {
1318 CPUPPCState *env = &cpu->env;
1319 target_ulong flags = args[0];
1320 target_ulong continue_token = args[1];
1321 uint64_t guestid;
1322 int nguests = 0;
1323 struct SpaprMachineStateNestedGuest *guest;
1324
1325 if (flags) { /* don't handle any flags for now */
1326 return H_UNSUPPORTED_FLAG;
1327 }
1328
1329 if (continue_token != -1) {
1330 return H_P2;
1331 }
1332
1333 if (!spapr->nested.capabilities_set) {
1334 return H_STATE;
1335 }
1336
1337 if (!spapr->nested.guests) {
1338 spapr->nested.guests = g_hash_table_new_full(NULL,
1339 NULL,
1340 NULL,
1341 destroy_guest_helper);
1342 }
1343
1344 nguests = g_hash_table_size(spapr->nested.guests);
1345
1346 if (nguests == PAPR_NESTED_GUEST_MAX) {
1347 return H_NO_MEM;
1348 }
1349
1350 /* Lookup for available guestid */
1351 for (guestid = 1; guestid < PAPR_NESTED_GUEST_MAX; guestid++) {
1352 if (!(g_hash_table_lookup(spapr->nested.guests,
1353 GINT_TO_POINTER(guestid)))) {
1354 break;
1355 }
1356 }
1357
1358 if (guestid == PAPR_NESTED_GUEST_MAX) {
1359 return H_NO_MEM;
1360 }
1361
1362 guest = g_try_new0(struct SpaprMachineStateNestedGuest, 1);
1363 if (!guest) {
1364 return H_NO_MEM;
1365 }
1366
1367 guest->pvr_logical = spapr->nested.pvr_base;
1368 g_hash_table_insert(spapr->nested.guests, GINT_TO_POINTER(guestid), guest);
1369 env->gpr[4] = guestid;
1370
1371 return H_SUCCESS;
1372 }
1373
h_guest_delete(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1374 static target_ulong h_guest_delete(PowerPCCPU *cpu,
1375 SpaprMachineState *spapr,
1376 target_ulong opcode,
1377 target_ulong *args)
1378 {
1379 target_ulong flags = args[0];
1380 target_ulong guestid = args[1];
1381 struct SpaprMachineStateNestedGuest *guest;
1382
1383 /*
1384 * handle flag deleteAllGuests, if set:
1385 * guestid is ignored and all guests are deleted
1386 *
1387 */
1388 if (flags & ~H_GUEST_DELETE_ALL_FLAG) {
1389 return H_UNSUPPORTED_FLAG; /* other flag bits reserved */
1390 } else if (flags & H_GUEST_DELETE_ALL_FLAG) {
1391 g_hash_table_destroy(spapr->nested.guests);
1392 return H_SUCCESS;
1393 }
1394
1395 guest = g_hash_table_lookup(spapr->nested.guests, GINT_TO_POINTER(guestid));
1396 if (!guest) {
1397 return H_P2;
1398 }
1399
1400 g_hash_table_remove(spapr->nested.guests, GINT_TO_POINTER(guestid));
1401
1402 return H_SUCCESS;
1403 }
1404
h_guest_create_vcpu(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1405 static target_ulong h_guest_create_vcpu(PowerPCCPU *cpu,
1406 SpaprMachineState *spapr,
1407 target_ulong opcode,
1408 target_ulong *args)
1409 {
1410 target_ulong flags = args[0];
1411 target_ulong guestid = args[1];
1412 target_ulong vcpuid = args[2];
1413 SpaprMachineStateNestedGuest *guest;
1414
1415 if (flags) { /* don't handle any flags for now */
1416 return H_UNSUPPORTED_FLAG;
1417 }
1418
1419 guest = spapr_get_nested_guest(spapr, guestid);
1420 if (!guest) {
1421 return H_P2;
1422 }
1423
1424 if (vcpuid < guest->nr_vcpus) {
1425 qemu_log_mask(LOG_UNIMP, "vcpuid " TARGET_FMT_ld " already in use.",
1426 vcpuid);
1427 return H_IN_USE;
1428 }
1429 /* linear vcpuid allocation only */
1430 assert(vcpuid == guest->nr_vcpus);
1431
1432 if (guest->nr_vcpus >= PAPR_NESTED_GUEST_VCPU_MAX) {
1433 return H_P3;
1434 }
1435
1436 SpaprMachineStateNestedGuestVcpu *vcpus, *curr_vcpu;
1437 vcpus = g_try_renew(struct SpaprMachineStateNestedGuestVcpu,
1438 guest->vcpus,
1439 guest->nr_vcpus + 1);
1440 if (!vcpus) {
1441 return H_NO_MEM;
1442 }
1443 guest->vcpus = vcpus;
1444 curr_vcpu = &vcpus[guest->nr_vcpus];
1445 memset(curr_vcpu, 0, sizeof(SpaprMachineStateNestedGuestVcpu));
1446
1447 curr_vcpu->enabled = true;
1448 guest->nr_vcpus++;
1449
1450 return H_SUCCESS;
1451 }
1452
getset_state(SpaprMachineState * spapr,SpaprMachineStateNestedGuest * guest,uint64_t vcpuid,struct guest_state_request * gsr)1453 static target_ulong getset_state(SpaprMachineState *spapr,
1454 SpaprMachineStateNestedGuest *guest,
1455 uint64_t vcpuid,
1456 struct guest_state_request *gsr)
1457 {
1458 void *ptr;
1459 uint16_t id;
1460 struct guest_state_element *element;
1461 struct guest_state_element_type *type;
1462 int64_t lenleft, num_elements;
1463
1464 lenleft = gsr->len;
1465
1466 if (!guest_state_request_check(gsr)) {
1467 return H_P3;
1468 }
1469
1470 num_elements = be32_to_cpu(gsr->gsb->num_elements);
1471 element = gsr->gsb->elements;
1472 /* Process the elements */
1473 while (num_elements) {
1474 type = NULL;
1475 /* log_element(element, gsr); */
1476
1477 id = be16_to_cpu(element->id);
1478 if (id == GSB_HV_VCPU_IGNORED_ID) {
1479 goto next_element;
1480 }
1481
1482 type = guest_state_element_type_find(id);
1483 assert(type);
1484
1485 /* Get pointer to guest data to get/set */
1486 if (type->location && type->copy) {
1487 ptr = type->location(spapr, guest, vcpuid);
1488 assert(ptr);
1489 if (!~(type->mask) && is_gsr_invalid(gsr, element, type)) {
1490 return H_INVALID_ELEMENT_VALUE;
1491 }
1492 type->copy(ptr + type->offset, element->value,
1493 gsr->flags & GUEST_STATE_REQUEST_SET ? true : false);
1494 }
1495
1496 next_element:
1497 element = guest_state_element_next(element, &lenleft, &num_elements);
1498 }
1499
1500 return H_SUCCESS;
1501 }
1502
map_and_getset_state(PowerPCCPU * cpu,SpaprMachineState * spapr,SpaprMachineStateNestedGuest * guest,uint64_t vcpuid,struct guest_state_request * gsr)1503 static target_ulong map_and_getset_state(PowerPCCPU *cpu,
1504 SpaprMachineState *spapr,
1505 SpaprMachineStateNestedGuest *guest,
1506 uint64_t vcpuid,
1507 struct guest_state_request *gsr)
1508 {
1509 target_ulong rc;
1510 int64_t len;
1511 bool is_write;
1512
1513 len = gsr->len;
1514 /* only get_state would require write access to the provided buffer */
1515 is_write = (gsr->flags & GUEST_STATE_REQUEST_SET) ? false : true;
1516 gsr->gsb = address_space_map(CPU(cpu)->as, gsr->buf, (uint64_t *)&len,
1517 is_write, MEMTXATTRS_UNSPECIFIED);
1518 if (!gsr->gsb) {
1519 rc = H_P3;
1520 goto out1;
1521 }
1522
1523 if (len != gsr->len) {
1524 rc = H_P3;
1525 goto out1;
1526 }
1527
1528 rc = getset_state(spapr, guest, vcpuid, gsr);
1529
1530 out1:
1531 address_space_unmap(CPU(cpu)->as, gsr->gsb, len, is_write, len);
1532 return rc;
1533 }
1534
h_guest_getset_state(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong * args,bool set)1535 static target_ulong h_guest_getset_state(PowerPCCPU *cpu,
1536 SpaprMachineState *spapr,
1537 target_ulong *args,
1538 bool set)
1539 {
1540 target_ulong flags = args[0];
1541 target_ulong lpid = args[1];
1542 target_ulong vcpuid = args[2];
1543 target_ulong buf = args[3];
1544 target_ulong buflen = args[4];
1545 struct guest_state_request gsr;
1546 SpaprMachineStateNestedGuest *guest = NULL;
1547
1548 gsr.buf = buf;
1549 assert(buflen <= GSB_MAX_BUF_SIZE);
1550 gsr.len = buflen;
1551 gsr.flags = 0;
1552
1553 /* Works for both get/set state */
1554 if ((flags & H_GUEST_GET_STATE_FLAGS_GUEST_WIDE) ||
1555 (flags & H_GUEST_SET_STATE_FLAGS_GUEST_WIDE)) {
1556 gsr.flags |= GUEST_STATE_REQUEST_GUEST_WIDE;
1557 }
1558
1559 if (set) {
1560 if (flags & ~H_GUEST_SET_STATE_FLAGS_MASK) {
1561 return H_PARAMETER;
1562 }
1563 gsr.flags |= GUEST_STATE_REQUEST_SET;
1564 } else {
1565 /*
1566 * No reserved fields to be set in flags nor both
1567 * GUEST/HOST wide bits
1568 */
1569 if ((flags & ~H_GUEST_GET_STATE_FLAGS_MASK) ||
1570 (flags == H_GUEST_GET_STATE_FLAGS_MASK)) {
1571 return H_PARAMETER;
1572 }
1573
1574 if (flags & H_GUEST_GET_STATE_FLAGS_HOST_WIDE) {
1575 gsr.flags |= GUEST_STATE_REQUEST_HOST_WIDE;
1576 }
1577 }
1578
1579 if (!(gsr.flags & GUEST_STATE_REQUEST_HOST_WIDE)) {
1580 guest = spapr_get_nested_guest(spapr, lpid);
1581 if (!guest) {
1582 return H_P2;
1583 }
1584 }
1585 return map_and_getset_state(cpu, spapr, guest, vcpuid, &gsr);
1586 }
1587
h_guest_set_state(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1588 static target_ulong h_guest_set_state(PowerPCCPU *cpu,
1589 SpaprMachineState *spapr,
1590 target_ulong opcode,
1591 target_ulong *args)
1592 {
1593 return h_guest_getset_state(cpu, spapr, args, true);
1594 }
1595
h_guest_get_state(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1596 static target_ulong h_guest_get_state(PowerPCCPU *cpu,
1597 SpaprMachineState *spapr,
1598 target_ulong opcode,
1599 target_ulong *args)
1600 {
1601 return h_guest_getset_state(cpu, spapr, args, false);
1602 }
1603
exit_nested_store_l2(PowerPCCPU * cpu,int excp,SpaprMachineStateNestedGuestVcpu * vcpu)1604 static void exit_nested_store_l2(PowerPCCPU *cpu, int excp,
1605 SpaprMachineStateNestedGuestVcpu *vcpu)
1606 {
1607 CPUPPCState *env = &cpu->env;
1608 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1609 target_ulong now, hdar, hdsisr, asdr;
1610
1611 assert(sizeof(env->gpr) == sizeof(vcpu->state.gpr)); /* sanity check */
1612
1613 now = cpu_ppc_load_tbl(env); /* L2 timebase */
1614 now -= vcpu->tb_offset; /* L1 timebase */
1615 vcpu->state.dec_expiry_tb = now - cpu_ppc_load_decr(env);
1616 cpu_ppc_store_decr(env, spapr_cpu->nested_host_state->dec_expiry_tb - now);
1617 /* backup hdar, hdsisr, asdr if reqd later below */
1618 hdar = vcpu->state.hdar;
1619 hdsisr = vcpu->state.hdsisr;
1620 asdr = vcpu->state.asdr;
1621
1622 nested_save_state(&vcpu->state, cpu);
1623
1624 if (excp == POWERPC_EXCP_MCHECK ||
1625 excp == POWERPC_EXCP_RESET ||
1626 excp == POWERPC_EXCP_SYSCALL) {
1627 vcpu->state.nip = env->spr[SPR_SRR0];
1628 vcpu->state.msr = env->spr[SPR_SRR1] & env->msr_mask;
1629 } else {
1630 vcpu->state.nip = env->spr[SPR_HSRR0];
1631 vcpu->state.msr = env->spr[SPR_HSRR1] & env->msr_mask;
1632 }
1633
1634 /* hdar, hdsisr, asdr should be retained unless certain exceptions */
1635 if ((excp != POWERPC_EXCP_HDSI) && (excp != POWERPC_EXCP_HISI)) {
1636 vcpu->state.asdr = asdr;
1637 } else if (excp != POWERPC_EXCP_HDSI) {
1638 vcpu->state.hdar = hdar;
1639 vcpu->state.hdsisr = hdsisr;
1640 }
1641 }
1642
get_exit_ids(uint64_t srr0,uint16_t ids[16])1643 static int get_exit_ids(uint64_t srr0, uint16_t ids[16])
1644 {
1645 int nr;
1646
1647 switch (srr0) {
1648 case 0xc00:
1649 nr = 10;
1650 ids[0] = GSB_VCPU_GPR3;
1651 ids[1] = GSB_VCPU_GPR4;
1652 ids[2] = GSB_VCPU_GPR5;
1653 ids[3] = GSB_VCPU_GPR6;
1654 ids[4] = GSB_VCPU_GPR7;
1655 ids[5] = GSB_VCPU_GPR8;
1656 ids[6] = GSB_VCPU_GPR9;
1657 ids[7] = GSB_VCPU_GPR10;
1658 ids[8] = GSB_VCPU_GPR11;
1659 ids[9] = GSB_VCPU_GPR12;
1660 break;
1661 case 0xe00:
1662 nr = 5;
1663 ids[0] = GSB_VCPU_SPR_HDAR;
1664 ids[1] = GSB_VCPU_SPR_HDSISR;
1665 ids[2] = GSB_VCPU_SPR_ASDR;
1666 ids[3] = GSB_VCPU_SPR_NIA;
1667 ids[4] = GSB_VCPU_SPR_MSR;
1668 break;
1669 case 0xe20:
1670 nr = 4;
1671 ids[0] = GSB_VCPU_SPR_HDAR;
1672 ids[1] = GSB_VCPU_SPR_ASDR;
1673 ids[2] = GSB_VCPU_SPR_NIA;
1674 ids[3] = GSB_VCPU_SPR_MSR;
1675 break;
1676 case 0xe40:
1677 nr = 3;
1678 ids[0] = GSB_VCPU_SPR_HEIR;
1679 ids[1] = GSB_VCPU_SPR_NIA;
1680 ids[2] = GSB_VCPU_SPR_MSR;
1681 break;
1682 case 0xf80:
1683 nr = 3;
1684 ids[0] = GSB_VCPU_SPR_HFSCR;
1685 ids[1] = GSB_VCPU_SPR_NIA;
1686 ids[2] = GSB_VCPU_SPR_MSR;
1687 break;
1688 default:
1689 nr = 0;
1690 break;
1691 }
1692
1693 return nr;
1694 }
1695
exit_process_output_buffer(SpaprMachineState * spapr,PowerPCCPU * cpu,SpaprMachineStateNestedGuest * guest,target_ulong vcpuid,target_ulong * r3)1696 static void exit_process_output_buffer(SpaprMachineState *spapr,
1697 PowerPCCPU *cpu,
1698 SpaprMachineStateNestedGuest *guest,
1699 target_ulong vcpuid,
1700 target_ulong *r3)
1701 {
1702 SpaprMachineStateNestedGuestVcpu *vcpu = &guest->vcpus[vcpuid];
1703 struct guest_state_request gsr;
1704 struct guest_state_buffer *gsb;
1705 struct guest_state_element *element;
1706 struct guest_state_element_type *type;
1707 int exit_id_count = 0;
1708 uint16_t exit_cause_ids[16];
1709 hwaddr len;
1710
1711 len = vcpu->runbufout.size;
1712 gsb = address_space_map(CPU(cpu)->as, vcpu->runbufout.addr, &len, true,
1713 MEMTXATTRS_UNSPECIFIED);
1714 if (!gsb || len != vcpu->runbufout.size) {
1715 address_space_unmap(CPU(cpu)->as, gsb, len, true, len);
1716 *r3 = H_P2;
1717 return;
1718 }
1719
1720 exit_id_count = get_exit_ids(*r3, exit_cause_ids);
1721
1722 /* Create a buffer of elements to send back */
1723 gsb->num_elements = cpu_to_be32(exit_id_count);
1724 element = gsb->elements;
1725 for (int i = 0; i < exit_id_count; i++) {
1726 type = guest_state_element_type_find(exit_cause_ids[i]);
1727 assert(type);
1728 element->id = cpu_to_be16(exit_cause_ids[i]);
1729 element->size = cpu_to_be16(type->size);
1730 element = guest_state_element_next(element, NULL, NULL);
1731 }
1732 gsr.gsb = gsb;
1733 gsr.len = VCPU_OUT_BUF_MIN_SZ;
1734 gsr.flags = 0; /* get + never guest wide */
1735 getset_state(spapr, guest, vcpuid, &gsr);
1736
1737 address_space_unmap(CPU(cpu)->as, gsb, len, true, len);
1738 }
1739
1740 static
spapr_exit_nested_papr(SpaprMachineState * spapr,PowerPCCPU * cpu,int excp)1741 void spapr_exit_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu, int excp)
1742 {
1743 CPUPPCState *env = &cpu->env;
1744 CPUState *cs = CPU(cpu);
1745 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1746 target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */
1747 target_ulong lpid = 0, vcpuid = 0;
1748 struct SpaprMachineStateNestedGuestVcpu *vcpu = NULL;
1749 struct SpaprMachineStateNestedGuest *guest = NULL;
1750
1751 lpid = spapr_cpu->nested_host_state->gpr[5];
1752 vcpuid = spapr_cpu->nested_host_state->gpr[6];
1753 guest = spapr_get_nested_guest(spapr, lpid);
1754 assert(guest);
1755 spapr_nested_vcpu_check(guest, vcpuid, false);
1756 vcpu = &guest->vcpus[vcpuid];
1757
1758 exit_nested_store_l2(cpu, excp, vcpu);
1759 /* do the output buffer for run_vcpu*/
1760 exit_process_output_buffer(spapr, cpu, guest, vcpuid, &r3_return);
1761
1762 assert(env->spr[SPR_LPIDR] != 0);
1763 nested_load_state(cpu, spapr_cpu->nested_host_state);
1764 cpu_ppc_decrease_tb_by_offset(env, vcpu->tb_offset);
1765 env->gpr[3] = H_SUCCESS;
1766 env->gpr[4] = r3_return;
1767 nested_post_load_state(env, cs);
1768 cpu_ppc_hdecr_exit(env);
1769
1770 spapr_cpu->in_nested = false;
1771 g_free(spapr_cpu->nested_host_state);
1772 spapr_cpu->nested_host_state = NULL;
1773 }
1774
spapr_exit_nested(PowerPCCPU * cpu,int excp)1775 void spapr_exit_nested(PowerPCCPU *cpu, int excp)
1776 {
1777 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1778 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1779
1780 assert(spapr_cpu->in_nested);
1781 if (spapr_nested_api(spapr) == NESTED_API_KVM_HV) {
1782 spapr_exit_nested_hv(cpu, excp);
1783 } else if (spapr_nested_api(spapr) == NESTED_API_PAPR) {
1784 spapr_exit_nested_papr(spapr, cpu, excp);
1785 } else {
1786 g_assert_not_reached();
1787 }
1788 }
1789
nested_papr_load_l2(PowerPCCPU * cpu,CPUPPCState * env,SpaprMachineStateNestedGuestVcpu * vcpu,target_ulong now)1790 static void nested_papr_load_l2(PowerPCCPU *cpu,
1791 CPUPPCState *env,
1792 SpaprMachineStateNestedGuestVcpu *vcpu,
1793 target_ulong now)
1794 {
1795 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1796 target_ulong lpcr, lpcr_mask, hdec;
1797 lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER;
1798
1799 assert(vcpu);
1800 assert(sizeof(env->gpr) == sizeof(vcpu->state.gpr));
1801 nested_load_state(cpu, &vcpu->state);
1802 lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) |
1803 (vcpu->state.lpcr & lpcr_mask);
1804 lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE;
1805 lpcr &= ~LPCR_LPES0;
1806 env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask;
1807
1808 hdec = vcpu->hdecr_expiry_tb - now;
1809 cpu_ppc_store_decr(env, vcpu->state.dec_expiry_tb - now);
1810 cpu_ppc_hdecr_init(env);
1811 cpu_ppc_store_hdecr(env, hdec);
1812
1813 cpu_ppc_increase_tb_by_offset(env, vcpu->tb_offset);
1814 }
1815
nested_papr_run_vcpu(PowerPCCPU * cpu,uint64_t lpid,SpaprMachineStateNestedGuestVcpu * vcpu)1816 static void nested_papr_run_vcpu(PowerPCCPU *cpu,
1817 uint64_t lpid,
1818 SpaprMachineStateNestedGuestVcpu *vcpu)
1819 {
1820 SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1821 CPUPPCState *env = &cpu->env;
1822 CPUState *cs = CPU(cpu);
1823 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
1824 target_ulong now = cpu_ppc_load_tbl(env);
1825
1826 assert(env->spr[SPR_LPIDR] == 0);
1827 assert(spapr->nested.api); /* ensure API version is initialized */
1828 spapr_cpu->nested_host_state = g_try_new(struct nested_ppc_state, 1);
1829 assert(spapr_cpu->nested_host_state);
1830 nested_save_state(spapr_cpu->nested_host_state, cpu);
1831 spapr_cpu->nested_host_state->dec_expiry_tb = now - cpu_ppc_load_decr(env);
1832 nested_papr_load_l2(cpu, env, vcpu, now);
1833 env->spr[SPR_LPIDR] = lpid; /* post load l2 */
1834
1835 spapr_cpu->in_nested = true;
1836 nested_post_load_state(env, cs);
1837 }
1838
h_guest_run_vcpu(PowerPCCPU * cpu,SpaprMachineState * spapr,target_ulong opcode,target_ulong * args)1839 static target_ulong h_guest_run_vcpu(PowerPCCPU *cpu,
1840 SpaprMachineState *spapr,
1841 target_ulong opcode,
1842 target_ulong *args)
1843 {
1844 CPUPPCState *env = &cpu->env;
1845 target_ulong flags = args[0];
1846 target_ulong lpid = args[1];
1847 target_ulong vcpuid = args[2];
1848 struct SpaprMachineStateNestedGuestVcpu *vcpu;
1849 struct guest_state_request gsr;
1850 SpaprMachineStateNestedGuest *guest;
1851 target_ulong rc;
1852
1853 if (flags) /* don't handle any flags for now */
1854 return H_PARAMETER;
1855
1856 guest = spapr_get_nested_guest(spapr, lpid);
1857 if (!guest) {
1858 return H_P2;
1859 }
1860 if (!spapr_nested_vcpu_check(guest, vcpuid, true)) {
1861 return H_P3;
1862 }
1863
1864 if (guest->parttbl[0] == 0) {
1865 /* At least need a partition scoped radix tree */
1866 return H_NOT_AVAILABLE;
1867 }
1868
1869 vcpu = &guest->vcpus[vcpuid];
1870
1871 /* Read run_vcpu input buffer to update state */
1872 gsr.buf = vcpu->runbufin.addr;
1873 gsr.len = vcpu->runbufin.size;
1874 gsr.flags = GUEST_STATE_REQUEST_SET; /* Thread wide + writing */
1875 rc = map_and_getset_state(cpu, spapr, guest, vcpuid, &gsr);
1876 if (rc == H_SUCCESS) {
1877 nested_papr_run_vcpu(cpu, lpid, vcpu);
1878 } else {
1879 env->gpr[3] = rc;
1880 }
1881 return env->gpr[3];
1882 }
1883
spapr_register_nested_hv(void)1884 void spapr_register_nested_hv(void)
1885 {
1886 spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl);
1887 spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested);
1888 spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate);
1889 spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest);
1890 }
1891
spapr_unregister_nested_hv(void)1892 void spapr_unregister_nested_hv(void)
1893 {
1894 spapr_unregister_hypercall(KVMPPC_H_SET_PARTITION_TABLE);
1895 spapr_unregister_hypercall(KVMPPC_H_ENTER_NESTED);
1896 spapr_unregister_hypercall(KVMPPC_H_TLB_INVALIDATE);
1897 spapr_unregister_hypercall(KVMPPC_H_COPY_TOFROM_GUEST);
1898 }
1899
spapr_register_nested_papr(void)1900 void spapr_register_nested_papr(void)
1901 {
1902 spapr_register_hypercall(H_GUEST_GET_CAPABILITIES,
1903 h_guest_get_capabilities);
1904 spapr_register_hypercall(H_GUEST_SET_CAPABILITIES,
1905 h_guest_set_capabilities);
1906 spapr_register_hypercall(H_GUEST_CREATE, h_guest_create);
1907 spapr_register_hypercall(H_GUEST_DELETE, h_guest_delete);
1908 spapr_register_hypercall(H_GUEST_CREATE_VCPU, h_guest_create_vcpu);
1909 spapr_register_hypercall(H_GUEST_SET_STATE, h_guest_set_state);
1910 spapr_register_hypercall(H_GUEST_GET_STATE, h_guest_get_state);
1911 spapr_register_hypercall(H_GUEST_RUN_VCPU, h_guest_run_vcpu);
1912 }
1913
spapr_unregister_nested_papr(void)1914 void spapr_unregister_nested_papr(void)
1915 {
1916 spapr_unregister_hypercall(H_GUEST_GET_CAPABILITIES);
1917 spapr_unregister_hypercall(H_GUEST_SET_CAPABILITIES);
1918 spapr_unregister_hypercall(H_GUEST_CREATE);
1919 spapr_unregister_hypercall(H_GUEST_DELETE);
1920 spapr_unregister_hypercall(H_GUEST_CREATE_VCPU);
1921 spapr_unregister_hypercall(H_GUEST_SET_STATE);
1922 spapr_unregister_hypercall(H_GUEST_GET_STATE);
1923 spapr_unregister_hypercall(H_GUEST_RUN_VCPU);
1924 }
1925
1926 #else
spapr_exit_nested(PowerPCCPU * cpu,int excp)1927 void spapr_exit_nested(PowerPCCPU *cpu, int excp)
1928 {
1929 g_assert_not_reached();
1930 }
1931
spapr_register_nested_hv(void)1932 void spapr_register_nested_hv(void)
1933 {
1934 /* DO NOTHING */
1935 }
1936
spapr_unregister_nested_hv(void)1937 void spapr_unregister_nested_hv(void)
1938 {
1939 /* DO NOTHING */
1940 }
1941
spapr_get_pate_nested_hv(SpaprMachineState * spapr,PowerPCCPU * cpu,target_ulong lpid,ppc_v3_pate_t * entry)1942 bool spapr_get_pate_nested_hv(SpaprMachineState *spapr, PowerPCCPU *cpu,
1943 target_ulong lpid, ppc_v3_pate_t *entry)
1944 {
1945 return false;
1946 }
1947
spapr_get_pate_nested_papr(SpaprMachineState * spapr,PowerPCCPU * cpu,target_ulong lpid,ppc_v3_pate_t * entry)1948 bool spapr_get_pate_nested_papr(SpaprMachineState *spapr, PowerPCCPU *cpu,
1949 target_ulong lpid, ppc_v3_pate_t *entry)
1950 {
1951 return false;
1952 }
1953
spapr_register_nested_papr(void)1954 void spapr_register_nested_papr(void)
1955 {
1956 /* DO NOTHING */
1957 }
1958
spapr_unregister_nested_papr(void)1959 void spapr_unregister_nested_papr(void)
1960 {
1961 /* DO NOTHING */
1962 }
1963
spapr_nested_gsb_init(void)1964 void spapr_nested_gsb_init(void)
1965 {
1966 /* DO NOTHING */
1967 }
1968
1969 #endif
1970