1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3 #define _ASM_POWERPC_PLPAR_WRAPPERS_H
4
5 #ifdef CONFIG_PPC_PSERIES
6
7 #include <linux/string.h>
8 #include <linux/irqflags.h>
9 #include <linux/delay.h>
10
11 #include <asm/hvcall.h>
12 #include <asm/paca.h>
13 #include <asm/lppaca.h>
14 #include <asm/page.h>
15
poll_pending(void)16 static inline long poll_pending(void)
17 {
18 return plpar_hcall_norets(H_POLL_PENDING);
19 }
20
cede_processor(void)21 static inline long cede_processor(void)
22 {
23 /*
24 * We cannot call tracepoints inside RCU idle regions which
25 * means we must not trace H_CEDE.
26 */
27 return plpar_hcall_norets_notrace(H_CEDE);
28 }
29
vpa_call(unsigned long flags,unsigned long cpu,unsigned long vpa)30 static inline long vpa_call(unsigned long flags, unsigned long cpu,
31 unsigned long vpa)
32 {
33 flags = flags << H_VPA_FUNC_SHIFT;
34
35 return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
36 }
37
unregister_vpa(unsigned long cpu)38 static inline long unregister_vpa(unsigned long cpu)
39 {
40 return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
41 }
42
register_vpa(unsigned long cpu,unsigned long vpa)43 static inline long register_vpa(unsigned long cpu, unsigned long vpa)
44 {
45 return vpa_call(H_VPA_REG_VPA, cpu, vpa);
46 }
47
unregister_slb_shadow(unsigned long cpu)48 static inline long unregister_slb_shadow(unsigned long cpu)
49 {
50 return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
51 }
52
register_slb_shadow(unsigned long cpu,unsigned long vpa)53 static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
54 {
55 return vpa_call(H_VPA_REG_SLB, cpu, vpa);
56 }
57
unregister_dtl(unsigned long cpu)58 static inline long unregister_dtl(unsigned long cpu)
59 {
60 return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
61 }
62
register_dtl(unsigned long cpu,unsigned long vpa)63 static inline long register_dtl(unsigned long cpu, unsigned long vpa)
64 {
65 return vpa_call(H_VPA_REG_DTL, cpu, vpa);
66 }
67
htm_call(unsigned long flags,unsigned long target,unsigned long operation,unsigned long param1,unsigned long param2,unsigned long param3)68 static inline long htm_call(unsigned long flags, unsigned long target,
69 unsigned long operation, unsigned long param1,
70 unsigned long param2, unsigned long param3)
71 {
72 return plpar_hcall_norets(H_HTM, flags, target, operation,
73 param1, param2, param3);
74 }
75
htm_get_dump_hardware(unsigned long nodeindex,unsigned long nodalchipindex,unsigned long coreindexonchip,unsigned long type,unsigned long addr,unsigned long size,unsigned long offset)76 static inline long htm_get_dump_hardware(unsigned long nodeindex,
77 unsigned long nodalchipindex, unsigned long coreindexonchip,
78 unsigned long type, unsigned long addr, unsigned long size,
79 unsigned long offset)
80 {
81 return htm_call(H_HTM_FLAGS_HARDWARE_TARGET,
82 H_HTM_TARGET_NODE_INDEX(nodeindex) |
83 H_HTM_TARGET_NODAL_CHIP_INDEX(nodalchipindex) |
84 H_HTM_TARGET_CORE_INDEX_ON_CHIP(coreindexonchip),
85 H_HTM_OP(H_HTM_OP_DUMP_DATA) | H_HTM_TYPE(type),
86 addr, size, offset);
87 }
88
89 extern void vpa_init(int cpu);
90
plpar_pte_enter(unsigned long flags,unsigned long hpte_group,unsigned long hpte_v,unsigned long hpte_r,unsigned long * slot)91 static inline long plpar_pte_enter(unsigned long flags,
92 unsigned long hpte_group, unsigned long hpte_v,
93 unsigned long hpte_r, unsigned long *slot)
94 {
95 long rc;
96 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
97
98 rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
99
100 *slot = retbuf[0];
101
102 return rc;
103 }
104
plpar_pte_remove(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)105 static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
106 unsigned long avpn, unsigned long *old_pteh_ret,
107 unsigned long *old_ptel_ret)
108 {
109 long rc;
110 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
111
112 rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
113
114 *old_pteh_ret = retbuf[0];
115 *old_ptel_ret = retbuf[1];
116
117 return rc;
118 }
119
120 /* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_remove_raw(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)121 static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
122 unsigned long avpn, unsigned long *old_pteh_ret,
123 unsigned long *old_ptel_ret)
124 {
125 long rc;
126 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
127
128 rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
129
130 *old_pteh_ret = retbuf[0];
131 *old_ptel_ret = retbuf[1];
132
133 return rc;
134 }
135
plpar_pte_read(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)136 static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
137 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
138 {
139 long rc;
140 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
141
142 rc = plpar_hcall(H_READ, retbuf, flags, ptex);
143
144 *old_pteh_ret = retbuf[0];
145 *old_ptel_ret = retbuf[1];
146
147 return rc;
148 }
149
150 /* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_read_raw(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)151 static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
152 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
153 {
154 long rc;
155 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
156
157 rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
158
159 *old_pteh_ret = retbuf[0];
160 *old_ptel_ret = retbuf[1];
161
162 return rc;
163 }
164
165 /*
166 * ptes must be 8*sizeof(unsigned long)
167 */
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)168 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
169 unsigned long *ptes)
170
171 {
172 long rc;
173 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
174
175 rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
176
177 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
178
179 return rc;
180 }
181
182 /*
183 * plpar_pte_read_4_raw can be called in real mode.
184 * ptes must be 8*sizeof(unsigned long)
185 */
plpar_pte_read_4_raw(unsigned long flags,unsigned long ptex,unsigned long * ptes)186 static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
187 unsigned long *ptes)
188
189 {
190 long rc;
191 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
192
193 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
194
195 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
196
197 return rc;
198 }
199
plpar_pte_protect(unsigned long flags,unsigned long ptex,unsigned long avpn)200 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
201 unsigned long avpn)
202 {
203 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
204 }
205
plpar_resize_hpt_prepare(unsigned long flags,unsigned long shift)206 static inline long plpar_resize_hpt_prepare(unsigned long flags,
207 unsigned long shift)
208 {
209 return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
210 }
211
plpar_resize_hpt_commit(unsigned long flags,unsigned long shift)212 static inline long plpar_resize_hpt_commit(unsigned long flags,
213 unsigned long shift)
214 {
215 return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
216 }
217
plpar_tce_get(unsigned long liobn,unsigned long ioba,unsigned long * tce_ret)218 static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
219 unsigned long *tce_ret)
220 {
221 long rc;
222 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
223
224 rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
225
226 *tce_ret = retbuf[0];
227
228 return rc;
229 }
230
plpar_tce_put(unsigned long liobn,unsigned long ioba,unsigned long tceval)231 static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
232 unsigned long tceval)
233 {
234 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
235 }
236
plpar_tce_put_indirect(unsigned long liobn,unsigned long ioba,unsigned long page,unsigned long count)237 static inline long plpar_tce_put_indirect(unsigned long liobn,
238 unsigned long ioba, unsigned long page, unsigned long count)
239 {
240 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
241 }
242
plpar_tce_stuff(unsigned long liobn,unsigned long ioba,unsigned long tceval,unsigned long count)243 static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
244 unsigned long tceval, unsigned long count)
245 {
246 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
247 }
248
249 /* Set various resource mode parameters */
plpar_set_mode(unsigned long mflags,unsigned long resource,unsigned long value1,unsigned long value2)250 static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
251 unsigned long value1, unsigned long value2)
252 {
253 return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
254 }
255
256 /*
257 * Enable relocation on exceptions on this partition
258 *
259 * Note: this call has a partition wide scope and can take a while to complete.
260 * If it returns H_LONG_BUSY_* it should be retried periodically until it
261 * returns H_SUCCESS.
262 */
enable_reloc_on_exceptions(void)263 static inline long enable_reloc_on_exceptions(void)
264 {
265 /* mflags = 3: Exceptions at 0xC000000000004000 */
266 return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
267 }
268
269 /*
270 * Disable relocation on exceptions on this partition
271 *
272 * Note: this call has a partition wide scope and can take a while to complete.
273 * If it returns H_LONG_BUSY_* it should be retried periodically until it
274 * returns H_SUCCESS.
275 */
disable_reloc_on_exceptions(void)276 static inline long disable_reloc_on_exceptions(void) {
277 return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
278 }
279
280 /*
281 * Take exceptions in big endian mode on this partition
282 *
283 * Note: this call has a partition wide scope and can take a while to complete.
284 * If it returns H_LONG_BUSY_* it should be retried periodically until it
285 * returns H_SUCCESS.
286 */
enable_big_endian_exceptions(void)287 static inline long enable_big_endian_exceptions(void)
288 {
289 /* mflags = 0: big endian exceptions */
290 return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
291 }
292
293 /*
294 * Take exceptions in little endian mode on this partition
295 *
296 * Note: this call has a partition wide scope and can take a while to complete.
297 * If it returns H_LONG_BUSY_* it should be retried periodically until it
298 * returns H_SUCCESS.
299 */
enable_little_endian_exceptions(void)300 static inline long enable_little_endian_exceptions(void)
301 {
302 /* mflags = 1: little endian exceptions */
303 return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
304 }
305
plpar_set_ciabr(unsigned long ciabr)306 static inline long plpar_set_ciabr(unsigned long ciabr)
307 {
308 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
309 }
310
plpar_set_watchpoint0(unsigned long dawr0,unsigned long dawrx0)311 static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
312 {
313 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
314 }
315
plpar_set_watchpoint1(unsigned long dawr1,unsigned long dawrx1)316 static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
317 {
318 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
319 }
320
plpar_signal_sys_reset(long cpu)321 static inline long plpar_signal_sys_reset(long cpu)
322 {
323 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
324 }
325
plpar_get_cpu_characteristics(struct h_cpu_char_result * p)326 static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
327 {
328 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
329 long rc;
330
331 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
332 if (rc == H_SUCCESS) {
333 p->character = retbuf[0];
334 p->behaviour = retbuf[1];
335 }
336
337 return rc;
338 }
339
plpar_guest_create(unsigned long flags,unsigned long * guest_id)340 static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
341 {
342 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
343 unsigned long token;
344 long rc;
345
346 token = -1UL;
347 do {
348 rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token);
349 if (rc == H_SUCCESS)
350 *guest_id = retbuf[0];
351
352 if (rc == H_BUSY) {
353 token = retbuf[0];
354 cond_resched();
355 }
356
357 if (H_IS_LONG_BUSY(rc)) {
358 token = retbuf[0];
359 msleep(get_longbusy_msecs(rc));
360 rc = H_BUSY;
361 }
362
363 } while (rc == H_BUSY);
364
365 return rc;
366 }
367
plpar_guest_create_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id)368 static inline long plpar_guest_create_vcpu(unsigned long flags,
369 unsigned long guest_id,
370 unsigned long vcpu_id)
371 {
372 long rc;
373
374 do {
375 rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id);
376
377 if (rc == H_BUSY)
378 cond_resched();
379
380 if (H_IS_LONG_BUSY(rc)) {
381 msleep(get_longbusy_msecs(rc));
382 rc = H_BUSY;
383 }
384
385 } while (rc == H_BUSY);
386
387 return rc;
388 }
389
plpar_guest_set_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)390 static inline long plpar_guest_set_state(unsigned long flags,
391 unsigned long guest_id,
392 unsigned long vcpu_id,
393 unsigned long data_buffer,
394 unsigned long data_size,
395 unsigned long *failed_index)
396 {
397 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
398 long rc;
399
400 while (true) {
401 rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id,
402 vcpu_id, data_buffer, data_size);
403
404 if (rc == H_BUSY) {
405 cpu_relax();
406 continue;
407 }
408
409 if (H_IS_LONG_BUSY(rc)) {
410 mdelay(get_longbusy_msecs(rc));
411 continue;
412 }
413
414 if (rc == H_INVALID_ELEMENT_ID)
415 *failed_index = retbuf[0];
416 else if (rc == H_INVALID_ELEMENT_SIZE)
417 *failed_index = retbuf[0];
418 else if (rc == H_INVALID_ELEMENT_VALUE)
419 *failed_index = retbuf[0];
420
421 break;
422 }
423
424 return rc;
425 }
426
plpar_guest_get_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)427 static inline long plpar_guest_get_state(unsigned long flags,
428 unsigned long guest_id,
429 unsigned long vcpu_id,
430 unsigned long data_buffer,
431 unsigned long data_size,
432 unsigned long *failed_index)
433 {
434 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
435 long rc;
436
437 while (true) {
438 rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id,
439 vcpu_id, data_buffer, data_size);
440
441 if (rc == H_BUSY) {
442 cpu_relax();
443 continue;
444 }
445
446 if (H_IS_LONG_BUSY(rc)) {
447 mdelay(get_longbusy_msecs(rc));
448 continue;
449 }
450
451 if (rc == H_INVALID_ELEMENT_ID)
452 *failed_index = retbuf[0];
453 else if (rc == H_INVALID_ELEMENT_SIZE)
454 *failed_index = retbuf[0];
455 else if (rc == H_INVALID_ELEMENT_VALUE)
456 *failed_index = retbuf[0];
457
458 break;
459 }
460
461 return rc;
462 }
463
plpar_guest_run_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,int * trap,unsigned long * failed_index)464 static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
465 unsigned long vcpu_id, int *trap,
466 unsigned long *failed_index)
467 {
468 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
469 long rc;
470
471 rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id);
472 if (rc == H_SUCCESS)
473 *trap = retbuf[0];
474 else if (rc == H_INVALID_ELEMENT_ID)
475 *failed_index = retbuf[0];
476 else if (rc == H_INVALID_ELEMENT_SIZE)
477 *failed_index = retbuf[0];
478 else if (rc == H_INVALID_ELEMENT_VALUE)
479 *failed_index = retbuf[0];
480
481 return rc;
482 }
483
plpar_guest_delete(unsigned long flags,u64 guest_id)484 static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
485 {
486 long rc;
487
488 do {
489 rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id);
490 if (rc == H_BUSY)
491 cond_resched();
492
493 if (H_IS_LONG_BUSY(rc)) {
494 msleep(get_longbusy_msecs(rc));
495 rc = H_BUSY;
496 }
497
498 } while (rc == H_BUSY);
499
500 return rc;
501 }
502
plpar_guest_set_capabilities(unsigned long flags,unsigned long capabilities)503 static inline long plpar_guest_set_capabilities(unsigned long flags,
504 unsigned long capabilities)
505 {
506 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
507 long rc;
508
509 do {
510 rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities);
511 if (rc == H_BUSY)
512 cond_resched();
513
514 if (H_IS_LONG_BUSY(rc)) {
515 msleep(get_longbusy_msecs(rc));
516 rc = H_BUSY;
517 }
518 } while (rc == H_BUSY);
519
520 return rc;
521 }
522
plpar_guest_get_capabilities(unsigned long flags,unsigned long * capabilities)523 static inline long plpar_guest_get_capabilities(unsigned long flags,
524 unsigned long *capabilities)
525 {
526 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
527 long rc;
528
529 do {
530 rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags);
531 if (rc == H_BUSY)
532 cond_resched();
533
534 if (H_IS_LONG_BUSY(rc)) {
535 msleep(get_longbusy_msecs(rc));
536 rc = H_BUSY;
537 }
538 } while (rc == H_BUSY);
539
540 if (rc == H_SUCCESS)
541 *capabilities = retbuf[0];
542
543 return rc;
544 }
545
546 /*
547 * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
548 *
549 * - Returns H_SUCCESS on success
550 * - For H_BUSY return value, we retry the hcall.
551 * - For any other hcall failures, attempt a full flush once before
552 * resorting to BUG().
553 *
554 * Note: This hcall is expected to fail only very rarely. The correct
555 * error recovery of killing the process/guest will be eventually
556 * needed.
557 */
pseries_rpt_invalidate(u64 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)558 static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
559 u64 page_sizes, u64 start, u64 end)
560 {
561 long rc;
562 unsigned long all;
563
564 while (true) {
565 rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
566 page_sizes, start, end);
567 if (rc == H_BUSY) {
568 cpu_relax();
569 continue;
570 } else if (rc == H_SUCCESS)
571 return rc;
572
573 /* Flush request failed, try with a full flush once */
574 if (type & H_RPTI_TYPE_NESTED)
575 all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
576 else
577 all = H_RPTI_TYPE_ALL;
578 retry:
579 rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
580 all, page_sizes, 0, -1UL);
581 if (rc == H_BUSY) {
582 cpu_relax();
583 goto retry;
584 } else if (rc == H_SUCCESS)
585 return rc;
586
587 BUG();
588 }
589 }
590
591 #else /* !CONFIG_PPC_PSERIES */
592
plpar_set_ciabr(unsigned long ciabr)593 static inline long plpar_set_ciabr(unsigned long ciabr)
594 {
595 return 0;
596 }
597
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)598 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
599 unsigned long *ptes)
600 {
601 return 0;
602 }
603
pseries_rpt_invalidate(u64 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)604 static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
605 u64 page_sizes, u64 start, u64 end)
606 {
607 return 0;
608 }
609
plpar_guest_create_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id)610 static inline long plpar_guest_create_vcpu(unsigned long flags,
611 unsigned long guest_id,
612 unsigned long vcpu_id)
613 {
614 return 0;
615 }
616
plpar_guest_get_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)617 static inline long plpar_guest_get_state(unsigned long flags,
618 unsigned long guest_id,
619 unsigned long vcpu_id,
620 unsigned long data_buffer,
621 unsigned long data_size,
622 unsigned long *failed_index)
623 {
624 return 0;
625 }
626
plpar_guest_set_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)627 static inline long plpar_guest_set_state(unsigned long flags,
628 unsigned long guest_id,
629 unsigned long vcpu_id,
630 unsigned long data_buffer,
631 unsigned long data_size,
632 unsigned long *failed_index)
633 {
634 return 0;
635 }
636
plpar_guest_run_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,int * trap,unsigned long * failed_index)637 static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
638 unsigned long vcpu_id, int *trap,
639 unsigned long *failed_index)
640 {
641 return 0;
642 }
643
plpar_guest_create(unsigned long flags,unsigned long * guest_id)644 static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
645 {
646 return 0;
647 }
648
plpar_guest_delete(unsigned long flags,u64 guest_id)649 static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
650 {
651 return 0;
652 }
653
plpar_guest_get_capabilities(unsigned long flags,unsigned long * capabilities)654 static inline long plpar_guest_get_capabilities(unsigned long flags,
655 unsigned long *capabilities)
656 {
657 return 0;
658 }
659
plpar_guest_set_capabilities(unsigned long flags,unsigned long capabilities)660 static inline long plpar_guest_set_capabilities(unsigned long flags,
661 unsigned long capabilities)
662 {
663 return 0;
664 }
665
666 #endif /* CONFIG_PPC_PSERIES */
667
668 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
669