xref: /linux/arch/powerpc/kvm/book3s_hv_nestedv2.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
4  *
5  * Authors:
6  *    Jordan Niethe <jniethe5@gmail.com>
7  *
8  * Description: KVM functions specific to running on Book 3S
9  * processors as a NESTEDv2 guest.
10  *
11  */
12 
13 #include "linux/blk-mq.h"
14 #include "linux/console.h"
15 #include "linux/gfp_types.h"
16 #include "linux/signal.h"
17 #include <linux/kernel.h>
18 #include <linux/kvm_host.h>
19 #include <linux/pgtable.h>
20 
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/hvcall.h>
24 #include <asm/pgalloc.h>
25 #include <asm/reg.h>
26 #include <asm/plpar_wrappers.h>
27 #include <asm/guest-state-buffer.h>
28 #include "trace_hv.h"
29 
30 struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
31 EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
32 
33 
34 static size_t
gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg * gsm)35 gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
36 {
37 	u16 ids[] = {
38 		KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
39 		KVMPPC_GSID_RUN_INPUT,
40 		KVMPPC_GSID_RUN_OUTPUT,
41 
42 	};
43 	size_t size = 0;
44 
45 	for (int i = 0; i < ARRAY_SIZE(ids); i++)
46 		size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
47 	return size;
48 }
49 
50 static int
gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff * gsb,struct kvmppc_gs_msg * gsm)51 gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
52 					   struct kvmppc_gs_msg *gsm)
53 {
54 	struct kvmhv_nestedv2_config *cfg;
55 	int rc;
56 
57 	cfg = gsm->data;
58 
59 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
60 		rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
61 					cfg->vcpu_run_output_size);
62 		if (rc < 0)
63 			return rc;
64 	}
65 
66 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
67 		rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
68 					      cfg->vcpu_run_input_cfg);
69 		if (rc < 0)
70 			return rc;
71 	}
72 
73 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
74 		rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
75 					      cfg->vcpu_run_output_cfg);
76 		if (rc < 0)
77 			return rc;
78 	}
79 
80 	return 0;
81 }
82 
83 static int
gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg * gsm,struct kvmppc_gs_buff * gsb)84 gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
85 					      struct kvmppc_gs_buff *gsb)
86 {
87 	struct kvmhv_nestedv2_config *cfg;
88 	struct kvmppc_gs_parser gsp = { 0 };
89 	struct kvmppc_gs_elem *gse;
90 	int rc;
91 
92 	cfg = gsm->data;
93 
94 	rc = kvmppc_gse_parse(&gsp, gsb);
95 	if (rc < 0)
96 		return rc;
97 
98 	gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
99 	if (gse)
100 		cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
101 	return 0;
102 }
103 
104 static struct kvmppc_gs_msg_ops config_msg_ops = {
105 	.get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
106 	.fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
107 	.refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
108 };
109 
gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg * gsm)110 static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
111 {
112 	struct kvmppc_gs_bitmap gsbm = { 0 };
113 	size_t size = 0;
114 	u16 iden;
115 
116 	kvmppc_gsbm_fill(&gsbm);
117 	kvmppc_gsbm_for_each(&gsbm, iden)
118 	{
119 		switch (iden) {
120 		case KVMPPC_GSID_HOST_STATE_SIZE:
121 		case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
122 		case KVMPPC_GSID_PARTITION_TABLE:
123 		case KVMPPC_GSID_PROCESS_TABLE:
124 		case KVMPPC_GSID_RUN_INPUT:
125 		case KVMPPC_GSID_RUN_OUTPUT:
126 		  /* Host wide counters */
127 		case KVMPPC_GSID_L0_GUEST_HEAP:
128 		case KVMPPC_GSID_L0_GUEST_HEAP_MAX:
129 		case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE:
130 		case KVMPPC_GSID_L0_GUEST_PGTABLE_SIZE_MAX:
131 		case KVMPPC_GSID_L0_GUEST_PGTABLE_RECLAIM:
132 			break;
133 		default:
134 			size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
135 		}
136 	}
137 	return size;
138 }
139 
gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff * gsb,struct kvmppc_gs_msg * gsm)140 static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
141 				     struct kvmppc_gs_msg *gsm)
142 {
143 	struct kvm_vcpu *vcpu;
144 	vector128 v;
145 	int rc, i;
146 	u16 iden;
147 	u32 arch_compat = 0;
148 
149 	vcpu = gsm->data;
150 
151 	kvmppc_gsm_for_each(gsm, iden)
152 	{
153 		rc = 0;
154 
155 		if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
156 		    (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
157 			continue;
158 
159 		switch (iden) {
160 		case KVMPPC_GSID_DSCR:
161 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
162 			break;
163 		case KVMPPC_GSID_MMCRA:
164 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
165 			break;
166 		case KVMPPC_GSID_HFSCR:
167 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
168 			break;
169 		case KVMPPC_GSID_PURR:
170 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
171 			break;
172 		case KVMPPC_GSID_SPURR:
173 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
174 			break;
175 		case KVMPPC_GSID_AMR:
176 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
177 			break;
178 		case KVMPPC_GSID_UAMOR:
179 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
180 			break;
181 		case KVMPPC_GSID_SIAR:
182 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
183 			break;
184 		case KVMPPC_GSID_SDAR:
185 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
186 			break;
187 		case KVMPPC_GSID_IAMR:
188 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
189 			break;
190 		case KVMPPC_GSID_DAWR0:
191 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
192 			break;
193 		case KVMPPC_GSID_DAWR1:
194 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
195 			break;
196 		case KVMPPC_GSID_DAWRX0:
197 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
198 			break;
199 		case KVMPPC_GSID_DAWRX1:
200 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
201 			break;
202 		case KVMPPC_GSID_DEXCR:
203 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr);
204 			break;
205 		case KVMPPC_GSID_HASHKEYR:
206 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr);
207 			break;
208 		case KVMPPC_GSID_HASHPKEYR:
209 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr);
210 			break;
211 		case KVMPPC_GSID_CIABR:
212 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
213 			break;
214 		case KVMPPC_GSID_WORT:
215 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
216 			break;
217 		case KVMPPC_GSID_PPR:
218 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
219 			break;
220 		case KVMPPC_GSID_PSPB:
221 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
222 			break;
223 		case KVMPPC_GSID_TAR:
224 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
225 			break;
226 		case KVMPPC_GSID_FSCR:
227 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
228 			break;
229 		case KVMPPC_GSID_EBBHR:
230 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
231 			break;
232 		case KVMPPC_GSID_EBBRR:
233 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
234 			break;
235 		case KVMPPC_GSID_BESCR:
236 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
237 			break;
238 		case KVMPPC_GSID_IC:
239 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
240 			break;
241 		case KVMPPC_GSID_CTRL:
242 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
243 			break;
244 		case KVMPPC_GSID_PIDR:
245 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
246 			break;
247 		case KVMPPC_GSID_AMOR: {
248 			u64 amor = ~0;
249 
250 			rc = kvmppc_gse_put_u64(gsb, iden, amor);
251 			break;
252 		}
253 		case KVMPPC_GSID_VRSAVE:
254 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
255 			break;
256 		case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
257 			i = iden - KVMPPC_GSID_MMCR(0);
258 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
259 			break;
260 		case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
261 			i = iden - KVMPPC_GSID_SIER(0);
262 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
263 			break;
264 		case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
265 			i = iden - KVMPPC_GSID_PMC(0);
266 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
267 			break;
268 		case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
269 			i = iden - KVMPPC_GSID_GPR(0);
270 			rc = kvmppc_gse_put_u64(gsb, iden,
271 						vcpu->arch.regs.gpr[i]);
272 			break;
273 		case KVMPPC_GSID_CR:
274 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
275 			break;
276 		case KVMPPC_GSID_XER:
277 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
278 			break;
279 		case KVMPPC_GSID_CTR:
280 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
281 			break;
282 		case KVMPPC_GSID_LR:
283 			rc = kvmppc_gse_put_u64(gsb, iden,
284 						vcpu->arch.regs.link);
285 			break;
286 		case KVMPPC_GSID_NIA:
287 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
288 			break;
289 		case KVMPPC_GSID_SRR0:
290 			rc = kvmppc_gse_put_u64(gsb, iden,
291 						vcpu->arch.shregs.srr0);
292 			break;
293 		case KVMPPC_GSID_SRR1:
294 			rc = kvmppc_gse_put_u64(gsb, iden,
295 						vcpu->arch.shregs.srr1);
296 			break;
297 		case KVMPPC_GSID_SPRG0:
298 			rc = kvmppc_gse_put_u64(gsb, iden,
299 						vcpu->arch.shregs.sprg0);
300 			break;
301 		case KVMPPC_GSID_SPRG1:
302 			rc = kvmppc_gse_put_u64(gsb, iden,
303 						vcpu->arch.shregs.sprg1);
304 			break;
305 		case KVMPPC_GSID_SPRG2:
306 			rc = kvmppc_gse_put_u64(gsb, iden,
307 						vcpu->arch.shregs.sprg2);
308 			break;
309 		case KVMPPC_GSID_SPRG3:
310 			rc = kvmppc_gse_put_u64(gsb, iden,
311 						vcpu->arch.shregs.sprg3);
312 			break;
313 		case KVMPPC_GSID_DAR:
314 			rc = kvmppc_gse_put_u64(gsb, iden,
315 						vcpu->arch.shregs.dar);
316 			break;
317 		case KVMPPC_GSID_DSISR:
318 			rc = kvmppc_gse_put_u32(gsb, iden,
319 						vcpu->arch.shregs.dsisr);
320 			break;
321 		case KVMPPC_GSID_MSR:
322 			rc = kvmppc_gse_put_u64(gsb, iden,
323 						vcpu->arch.shregs.msr);
324 			break;
325 		case KVMPPC_GSID_VTB:
326 			rc = kvmppc_gse_put_u64(gsb, iden,
327 						vcpu->arch.vcore->vtb);
328 			break;
329 		case KVMPPC_GSID_DPDES:
330 			rc = kvmppc_gse_put_u64(gsb, iden,
331 						vcpu->arch.vcore->dpdes);
332 			break;
333 		case KVMPPC_GSID_LPCR:
334 			rc = kvmppc_gse_put_u64(gsb, iden,
335 						vcpu->arch.vcore->lpcr);
336 			break;
337 		case KVMPPC_GSID_TB_OFFSET:
338 			rc = kvmppc_gse_put_u64(gsb, iden,
339 						vcpu->arch.vcore->tb_offset);
340 			break;
341 		case KVMPPC_GSID_FPSCR:
342 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
343 			break;
344 		case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
345 			i = iden - KVMPPC_GSID_VSRS(0);
346 			memcpy(&v, &vcpu->arch.fp.fpr[i],
347 			       sizeof(vcpu->arch.fp.fpr[i]));
348 			rc = kvmppc_gse_put_vector128(gsb, iden, &v);
349 			break;
350 #ifdef CONFIG_VSX
351 		case KVMPPC_GSID_VSCR:
352 			rc = kvmppc_gse_put_u32(gsb, iden,
353 						vcpu->arch.vr.vscr.u[3]);
354 			break;
355 		case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
356 			i = iden - KVMPPC_GSID_VSRS(32);
357 			rc = kvmppc_gse_put_vector128(gsb, iden,
358 						      &vcpu->arch.vr.vr[i]);
359 			break;
360 #endif
361 		case KVMPPC_GSID_DEC_EXPIRY_TB: {
362 			u64 dw;
363 
364 			dw = vcpu->arch.dec_expires -
365 			     vcpu->arch.vcore->tb_offset;
366 			rc = kvmppc_gse_put_u64(gsb, iden, dw);
367 			break;
368 		}
369 		case KVMPPC_GSID_LOGICAL_PVR:
370 			/*
371 			 * Though 'arch_compat == 0' would mean the default
372 			 * compatibility, arch_compat, being a Guest Wide
373 			 * Element, cannot be filled with a value of 0 in GSB
374 			 * as this would result into a kernel trap.
375 			 * Hence, when `arch_compat == 0`, arch_compat should
376 			 * default to L1's PVR.
377 			 */
378 			if (!vcpu->arch.vcore->arch_compat) {
379 				if (cpu_has_feature(CPU_FTR_P11_PVR))
380 					arch_compat = PVR_ARCH_31_P11;
381 				else if (cpu_has_feature(CPU_FTR_ARCH_31))
382 					arch_compat = PVR_ARCH_31;
383 				else if (cpu_has_feature(CPU_FTR_ARCH_300))
384 					arch_compat = PVR_ARCH_300;
385 			} else {
386 				arch_compat = vcpu->arch.vcore->arch_compat;
387 			}
388 			rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
389 			break;
390 		}
391 
392 		if (rc < 0)
393 			return rc;
394 	}
395 
396 	return 0;
397 }
398 
gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg * gsm,struct kvmppc_gs_buff * gsb)399 static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
400 					struct kvmppc_gs_buff *gsb)
401 {
402 	struct kvmppc_gs_parser gsp = { 0 };
403 	struct kvmhv_nestedv2_io *io;
404 	struct kvmppc_gs_bitmap *valids;
405 	struct kvm_vcpu *vcpu;
406 	struct kvmppc_gs_elem *gse;
407 	vector128 v;
408 	int rc, i;
409 	u16 iden;
410 
411 	vcpu = gsm->data;
412 
413 	rc = kvmppc_gse_parse(&gsp, gsb);
414 	if (rc < 0)
415 		return rc;
416 
417 	io = &vcpu->arch.nestedv2_io;
418 	valids = &io->valids;
419 
420 	kvmppc_gsp_for_each(&gsp, iden, gse)
421 	{
422 		switch (iden) {
423 		case KVMPPC_GSID_DSCR:
424 			vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
425 			break;
426 		case KVMPPC_GSID_MMCRA:
427 			vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
428 			break;
429 		case KVMPPC_GSID_HFSCR:
430 			vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
431 			break;
432 		case KVMPPC_GSID_PURR:
433 			vcpu->arch.purr = kvmppc_gse_get_u64(gse);
434 			break;
435 		case KVMPPC_GSID_SPURR:
436 			vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
437 			break;
438 		case KVMPPC_GSID_AMR:
439 			vcpu->arch.amr = kvmppc_gse_get_u64(gse);
440 			break;
441 		case KVMPPC_GSID_UAMOR:
442 			vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
443 			break;
444 		case KVMPPC_GSID_SIAR:
445 			vcpu->arch.siar = kvmppc_gse_get_u64(gse);
446 			break;
447 		case KVMPPC_GSID_SDAR:
448 			vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
449 			break;
450 		case KVMPPC_GSID_IAMR:
451 			vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
452 			break;
453 		case KVMPPC_GSID_DAWR0:
454 			vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
455 			break;
456 		case KVMPPC_GSID_DAWR1:
457 			vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
458 			break;
459 		case KVMPPC_GSID_DAWRX0:
460 			vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
461 			break;
462 		case KVMPPC_GSID_DAWRX1:
463 			vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
464 			break;
465 		case KVMPPC_GSID_DEXCR:
466 			vcpu->arch.dexcr = kvmppc_gse_get_u64(gse);
467 			break;
468 		case KVMPPC_GSID_HASHKEYR:
469 			vcpu->arch.hashkeyr = kvmppc_gse_get_u64(gse);
470 			break;
471 		case KVMPPC_GSID_HASHPKEYR:
472 			vcpu->arch.hashpkeyr = kvmppc_gse_get_u64(gse);
473 			break;
474 		case KVMPPC_GSID_CIABR:
475 			vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
476 			break;
477 		case KVMPPC_GSID_WORT:
478 			vcpu->arch.wort = kvmppc_gse_get_u32(gse);
479 			break;
480 		case KVMPPC_GSID_PPR:
481 			vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
482 			break;
483 		case KVMPPC_GSID_PSPB:
484 			vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
485 			break;
486 		case KVMPPC_GSID_TAR:
487 			vcpu->arch.tar = kvmppc_gse_get_u64(gse);
488 			break;
489 		case KVMPPC_GSID_FSCR:
490 			vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
491 			break;
492 		case KVMPPC_GSID_EBBHR:
493 			vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
494 			break;
495 		case KVMPPC_GSID_EBBRR:
496 			vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
497 			break;
498 		case KVMPPC_GSID_BESCR:
499 			vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
500 			break;
501 		case KVMPPC_GSID_IC:
502 			vcpu->arch.ic = kvmppc_gse_get_u64(gse);
503 			break;
504 		case KVMPPC_GSID_CTRL:
505 			vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
506 			break;
507 		case KVMPPC_GSID_PIDR:
508 			vcpu->arch.pid = kvmppc_gse_get_u32(gse);
509 			break;
510 		case KVMPPC_GSID_AMOR:
511 			break;
512 		case KVMPPC_GSID_VRSAVE:
513 			vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
514 			break;
515 		case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
516 			i = iden - KVMPPC_GSID_MMCR(0);
517 			vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
518 			break;
519 		case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
520 			i = iden - KVMPPC_GSID_SIER(0);
521 			vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
522 			break;
523 		case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
524 			i = iden - KVMPPC_GSID_PMC(0);
525 			vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
526 			break;
527 		case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
528 			i = iden - KVMPPC_GSID_GPR(0);
529 			vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
530 			break;
531 		case KVMPPC_GSID_CR:
532 			vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
533 			break;
534 		case KVMPPC_GSID_XER:
535 			vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
536 			break;
537 		case KVMPPC_GSID_CTR:
538 			vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
539 			break;
540 		case KVMPPC_GSID_LR:
541 			vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
542 			break;
543 		case KVMPPC_GSID_NIA:
544 			vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
545 			break;
546 		case KVMPPC_GSID_SRR0:
547 			vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
548 			break;
549 		case KVMPPC_GSID_SRR1:
550 			vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
551 			break;
552 		case KVMPPC_GSID_SPRG0:
553 			vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
554 			break;
555 		case KVMPPC_GSID_SPRG1:
556 			vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
557 			break;
558 		case KVMPPC_GSID_SPRG2:
559 			vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
560 			break;
561 		case KVMPPC_GSID_SPRG3:
562 			vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
563 			break;
564 		case KVMPPC_GSID_DAR:
565 			vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
566 			break;
567 		case KVMPPC_GSID_DSISR:
568 			vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
569 			break;
570 		case KVMPPC_GSID_MSR:
571 			vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
572 			break;
573 		case KVMPPC_GSID_VTB:
574 			vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
575 			break;
576 		case KVMPPC_GSID_DPDES:
577 			vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse);
578 			break;
579 		case KVMPPC_GSID_LPCR:
580 			vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
581 			break;
582 		case KVMPPC_GSID_TB_OFFSET:
583 			vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
584 			break;
585 		case KVMPPC_GSID_FPSCR:
586 			vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
587 			break;
588 		case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
589 			kvmppc_gse_get_vector128(gse, &v);
590 			i = iden - KVMPPC_GSID_VSRS(0);
591 			memcpy(&vcpu->arch.fp.fpr[i], &v,
592 			       sizeof(vcpu->arch.fp.fpr[i]));
593 			break;
594 #ifdef CONFIG_VSX
595 		case KVMPPC_GSID_VSCR:
596 			vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
597 			break;
598 		case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
599 			i = iden - KVMPPC_GSID_VSRS(32);
600 			kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
601 			break;
602 #endif
603 		case KVMPPC_GSID_HDAR:
604 			vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
605 			break;
606 		case KVMPPC_GSID_HDSISR:
607 			vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
608 			break;
609 		case KVMPPC_GSID_ASDR:
610 			vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
611 			break;
612 		case KVMPPC_GSID_HEIR:
613 			vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
614 			break;
615 		case KVMPPC_GSID_DEC_EXPIRY_TB: {
616 			u64 dw;
617 
618 			dw = kvmppc_gse_get_u64(gse);
619 			vcpu->arch.dec_expires =
620 				dw + vcpu->arch.vcore->tb_offset;
621 			break;
622 		}
623 		case KVMPPC_GSID_LOGICAL_PVR:
624 			vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
625 			break;
626 		default:
627 			continue;
628 		}
629 		kvmppc_gsbm_set(valids, iden);
630 	}
631 
632 	return 0;
633 }
634 
635 static struct kvmppc_gs_msg_ops vcpu_message_ops = {
636 	.get_size = gs_msg_ops_vcpu_get_size,
637 	.fill_info = gs_msg_ops_vcpu_fill_info,
638 	.refresh_info = gs_msg_ops_vcpu_refresh_info,
639 };
640 
kvmhv_nestedv2_host_create(struct kvm_vcpu * vcpu,struct kvmhv_nestedv2_io * io)641 static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
642 				      struct kvmhv_nestedv2_io *io)
643 {
644 	struct kvmhv_nestedv2_config *cfg;
645 	struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
646 	unsigned long guest_id, vcpu_id;
647 	struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
648 	int rc;
649 
650 	cfg = &io->cfg;
651 	guest_id = vcpu->kvm->arch.lpid;
652 	vcpu_id = vcpu->vcpu_id;
653 
654 	gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
655 			     GFP_KERNEL);
656 	if (!gsm) {
657 		rc = -ENOMEM;
658 		goto err;
659 	}
660 
661 	gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
662 			     GFP_KERNEL);
663 	if (!gsb) {
664 		rc = -ENOMEM;
665 		goto free_gsm;
666 	}
667 
668 	rc = kvmppc_gsb_receive_datum(gsb, gsm,
669 				      KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
670 	if (rc < 0) {
671 		pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
672 		goto free_gsb;
673 	}
674 
675 	vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
676 					 vcpu_id, GFP_KERNEL);
677 	if (!vcpu_run_output) {
678 		rc = -ENOMEM;
679 		goto free_gsb;
680 	}
681 
682 	cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
683 	cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
684 	io->vcpu_run_output = vcpu_run_output;
685 
686 	gsm->flags = 0;
687 	rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
688 	if (rc < 0) {
689 		pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
690 		goto free_gs_out;
691 	}
692 
693 	vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
694 	if (!vcpu_message) {
695 		rc = -ENOMEM;
696 		goto free_gs_out;
697 	}
698 	kvmppc_gsm_include_all(vcpu_message);
699 
700 	io->vcpu_message = vcpu_message;
701 
702 	vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
703 					vcpu_id, GFP_KERNEL);
704 	if (!vcpu_run_input) {
705 		rc = -ENOMEM;
706 		goto free_vcpu_message;
707 	}
708 
709 	io->vcpu_run_input = vcpu_run_input;
710 	cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
711 	cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
712 	rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
713 	if (rc < 0) {
714 		pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
715 		goto free_vcpu_run_input;
716 	}
717 
718 	vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
719 				       KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
720 	if (!vcore_message) {
721 		rc = -ENOMEM;
722 		goto free_vcpu_run_input;
723 	}
724 
725 	kvmppc_gsm_include_all(vcore_message);
726 	kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
727 	io->vcore_message = vcore_message;
728 
729 	kvmppc_gsbm_fill(&io->valids);
730 	kvmppc_gsm_free(gsm);
731 	kvmppc_gsb_free(gsb);
732 	return 0;
733 
734 free_vcpu_run_input:
735 	kvmppc_gsb_free(vcpu_run_input);
736 free_vcpu_message:
737 	kvmppc_gsm_free(vcpu_message);
738 free_gs_out:
739 	kvmppc_gsb_free(vcpu_run_output);
740 free_gsb:
741 	kvmppc_gsb_free(gsb);
742 free_gsm:
743 	kvmppc_gsm_free(gsm);
744 err:
745 	return rc;
746 }
747 
748 /**
749  * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
750  * @vcpu: vcpu
751  * @iden: guest state ID
752  *
753  * Mark a guest state ID as having been changed by the L1 host and thus
754  * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
755  */
__kvmhv_nestedv2_mark_dirty(struct kvm_vcpu * vcpu,u16 iden)756 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
757 {
758 	struct kvmhv_nestedv2_io *io;
759 	struct kvmppc_gs_bitmap *valids;
760 	struct kvmppc_gs_msg *gsm;
761 
762 	if (!iden)
763 		return 0;
764 
765 	io = &vcpu->arch.nestedv2_io;
766 	valids = &io->valids;
767 	gsm = io->vcpu_message;
768 	kvmppc_gsm_include(gsm, iden);
769 	gsm = io->vcore_message;
770 	kvmppc_gsm_include(gsm, iden);
771 	kvmppc_gsbm_set(valids, iden);
772 	return 0;
773 }
774 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
775 
776 /**
777  * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
778  * @vcpu: vcpu
779  * @iden: guest state ID
780  *
781  * Reload the value for the guest state ID from the L0 host into the L1 host.
782  * This is cached so that going out to the L0 host only happens if necessary.
783  */
__kvmhv_nestedv2_cached_reload(struct kvm_vcpu * vcpu,u16 iden)784 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
785 {
786 	struct kvmhv_nestedv2_io *io;
787 	struct kvmppc_gs_bitmap *valids;
788 	struct kvmppc_gs_buff *gsb;
789 	struct kvmppc_gs_msg gsm;
790 	int rc;
791 
792 	if (!iden)
793 		return 0;
794 
795 	io = &vcpu->arch.nestedv2_io;
796 	valids = &io->valids;
797 	if (kvmppc_gsbm_test(valids, iden))
798 		return 0;
799 
800 	gsb = io->vcpu_run_input;
801 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
802 	rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
803 	if (rc < 0) {
804 		pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
805 		return rc;
806 	}
807 	return 0;
808 }
809 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
810 
811 /**
812  * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
813  * @vcpu: vcpu
814  * @time_limit: hdec expiry tb
815  *
816  * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
817  * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
818  * wide values need to be sent with H_GUEST_SET first.
819  *
820  * The hdec tb offset is always sent to L0 host.
821  */
kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu * vcpu,u64 time_limit)822 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
823 {
824 	struct kvmhv_nestedv2_io *io;
825 	struct kvmppc_gs_buff *gsb;
826 	struct kvmppc_gs_msg *gsm;
827 	int rc;
828 
829 	io = &vcpu->arch.nestedv2_io;
830 	gsb = io->vcpu_run_input;
831 	gsm = io->vcore_message;
832 	rc = kvmppc_gsb_send_data(gsb, gsm);
833 	if (rc < 0) {
834 		pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
835 		return rc;
836 	}
837 
838 	gsm = io->vcpu_message;
839 	kvmppc_gsb_reset(gsb);
840 	rc = kvmppc_gsm_fill_info(gsm, gsb);
841 	if (rc < 0) {
842 		pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
843 		return rc;
844 	}
845 
846 	rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
847 	if (rc < 0)
848 		return rc;
849 	return 0;
850 }
851 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
852 
853 /**
854  * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
855  * L0 host
856  * @lpid: guest id
857  * @dw0: partition table double word
858  * @dw1: process table double word
859  */
kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid,u64 dw0,u64 dw1)860 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
861 {
862 	struct kvmppc_gs_part_table patbl;
863 	struct kvmppc_gs_proc_table prtbl;
864 	struct kvmppc_gs_buff *gsb;
865 	size_t size;
866 	int rc;
867 
868 	size = kvmppc_gse_total_size(
869 		       kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
870 	       kvmppc_gse_total_size(
871 		       kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
872 	       sizeof(struct kvmppc_gs_header);
873 	gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
874 	if (!gsb)
875 		return -ENOMEM;
876 
877 	patbl.address = dw0 & RPDB_MASK;
878 	patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
879 			  ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
880 			 31);
881 	patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
882 	rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
883 	if (rc < 0)
884 		goto free_gsb;
885 
886 	prtbl.address = dw1 & PRTB_MASK;
887 	prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
888 	rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
889 	if (rc < 0)
890 		goto free_gsb;
891 
892 	rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
893 	if (rc < 0) {
894 		pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
895 		goto free_gsb;
896 	}
897 
898 	kvmppc_gsb_free(gsb);
899 	return 0;
900 
901 free_gsb:
902 	kvmppc_gsb_free(gsb);
903 	return rc;
904 }
905 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
906 
907 /**
908  * kvmhv_nestedv2_set_vpa() - register L2 VPA with L0
909  * @vcpu: vcpu
910  * @vpa: L1 logical real address
911  */
kvmhv_nestedv2_set_vpa(struct kvm_vcpu * vcpu,unsigned long vpa)912 int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa)
913 {
914 	struct kvmhv_nestedv2_io *io;
915 	struct kvmppc_gs_buff *gsb;
916 	int rc = 0;
917 
918 	io = &vcpu->arch.nestedv2_io;
919 	gsb = io->vcpu_run_input;
920 
921 	kvmppc_gsb_reset(gsb);
922 	rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa);
923 	if (rc < 0)
924 		goto out;
925 
926 	rc = kvmppc_gsb_send(gsb, 0);
927 	if (rc < 0)
928 		pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc);
929 
930 out:
931 	kvmppc_gsb_reset(gsb);
932 	return rc;
933 }
934 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa);
935 
936 /**
937  * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
938  * @vcpu: vcpu
939  *
940  * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
941  */
kvmhv_nestedv2_parse_output(struct kvm_vcpu * vcpu)942 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
943 {
944 	struct kvmhv_nestedv2_io *io;
945 	struct kvmppc_gs_buff *gsb;
946 	struct kvmppc_gs_msg gsm;
947 
948 	io = &vcpu->arch.nestedv2_io;
949 	gsb = io->vcpu_run_output;
950 
951 	vcpu->arch.fault_dar = 0;
952 	vcpu->arch.fault_dsisr = 0;
953 	vcpu->arch.fault_gpa = 0;
954 	vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
955 
956 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
957 	return kvmppc_gsm_refresh_info(&gsm, gsb);
958 }
959 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
960 
kvmhv_nestedv2_host_free(struct kvm_vcpu * vcpu,struct kvmhv_nestedv2_io * io)961 static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
962 				     struct kvmhv_nestedv2_io *io)
963 {
964 	kvmppc_gsm_free(io->vcpu_message);
965 	kvmppc_gsm_free(io->vcore_message);
966 	kvmppc_gsb_free(io->vcpu_run_input);
967 	kvmppc_gsb_free(io->vcpu_run_output);
968 }
969 
__kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu * vcpu,struct pt_regs * regs)970 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
971 {
972 	struct kvmhv_nestedv2_io *io;
973 	struct kvmppc_gs_bitmap *valids;
974 	struct kvmppc_gs_buff *gsb;
975 	struct kvmppc_gs_msg gsm;
976 	int rc = 0;
977 
978 
979 	io = &vcpu->arch.nestedv2_io;
980 	valids = &io->valids;
981 
982 	gsb = io->vcpu_run_input;
983 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
984 
985 	for (int i = 0; i < 32; i++) {
986 		if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
987 			kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
988 	}
989 
990 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
991 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
992 
993 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
994 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
995 
996 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
997 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
998 
999 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
1000 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
1001 
1002 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
1003 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
1004 
1005 	rc = kvmppc_gsb_receive_data(gsb, &gsm);
1006 	if (rc < 0)
1007 		pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
1008 
1009 	return rc;
1010 }
1011 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
1012 
__kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu * vcpu,struct pt_regs * regs)1013 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
1014 				       struct pt_regs *regs)
1015 {
1016 	for (int i = 0; i < 32; i++)
1017 		kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
1018 
1019 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
1020 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
1021 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
1022 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
1023 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
1024 
1025 	return 0;
1026 }
1027 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
1028 
1029 /**
1030  * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
1031  * @vcpu: vcpu
1032  * @io: NESTEDv2 nested io state
1033  *
1034  * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
1035  */
kvmhv_nestedv2_vcpu_create(struct kvm_vcpu * vcpu,struct kvmhv_nestedv2_io * io)1036 int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
1037 			       struct kvmhv_nestedv2_io *io)
1038 {
1039 	long rc;
1040 
1041 	rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
1042 
1043 	if (rc != H_SUCCESS) {
1044 		pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
1045 		switch (rc) {
1046 		case H_NOT_ENOUGH_RESOURCES:
1047 		case H_ABORTED:
1048 			return -ENOMEM;
1049 		case H_AUTHORITY:
1050 			return -EPERM;
1051 		default:
1052 			return -EINVAL;
1053 		}
1054 	}
1055 
1056 	rc = kvmhv_nestedv2_host_create(vcpu, io);
1057 
1058 	return rc;
1059 }
1060 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
1061 
1062 /**
1063  * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
1064  * @vcpu: vcpu
1065  * @io: NESTEDv2 nested io state
1066  */
kvmhv_nestedv2_vcpu_free(struct kvm_vcpu * vcpu,struct kvmhv_nestedv2_io * io)1067 void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
1068 			      struct kvmhv_nestedv2_io *io)
1069 {
1070 	kvmhv_nestedv2_host_free(vcpu, io);
1071 }
1072 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);
1073