1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3 #define _ASM_POWERPC_PLPAR_WRAPPERS_H
4
5 #ifdef CONFIG_PPC_PSERIES
6
7 #include <linux/string.h>
8 #include <linux/irqflags.h>
9 #include <linux/delay.h>
10
11 #include <asm/hvcall.h>
12 #include <asm/paca.h>
13 #include <asm/lppaca.h>
14 #include <asm/page.h>
15
poll_pending(void)16 static inline long poll_pending(void)
17 {
18 return plpar_hcall_norets(H_POLL_PENDING);
19 }
20
cede_processor(void)21 static inline long cede_processor(void)
22 {
23 /*
24 * We cannot call tracepoints inside RCU idle regions which
25 * means we must not trace H_CEDE.
26 */
27 return plpar_hcall_norets_notrace(H_CEDE);
28 }
29
vpa_call(unsigned long flags,unsigned long cpu,unsigned long vpa)30 static inline long vpa_call(unsigned long flags, unsigned long cpu,
31 unsigned long vpa)
32 {
33 flags = flags << H_VPA_FUNC_SHIFT;
34
35 return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
36 }
37
unregister_vpa(unsigned long cpu)38 static inline long unregister_vpa(unsigned long cpu)
39 {
40 return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
41 }
42
register_vpa(unsigned long cpu,unsigned long vpa)43 static inline long register_vpa(unsigned long cpu, unsigned long vpa)
44 {
45 return vpa_call(H_VPA_REG_VPA, cpu, vpa);
46 }
47
unregister_slb_shadow(unsigned long cpu)48 static inline long unregister_slb_shadow(unsigned long cpu)
49 {
50 return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
51 }
52
register_slb_shadow(unsigned long cpu,unsigned long vpa)53 static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
54 {
55 return vpa_call(H_VPA_REG_SLB, cpu, vpa);
56 }
57
unregister_dtl(unsigned long cpu)58 static inline long unregister_dtl(unsigned long cpu)
59 {
60 return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
61 }
62
register_dtl(unsigned long cpu,unsigned long vpa)63 static inline long register_dtl(unsigned long cpu, unsigned long vpa)
64 {
65 return vpa_call(H_VPA_REG_DTL, cpu, vpa);
66 }
67
68 /*
69 * Invokes H_HTM hcall with parameters passed from htm_hcall_wrapper.
70 * flags: Set to hardwareTarget.
71 * target: Specifies target using node index, nodal chip index and core index.
72 * operation : action to perform ie configure, start, stop, deconfigure, trace
73 * based on the HTM type.
74 * param1, param2, param3: parameters for each action.
75 */
htm_call(unsigned long flags,unsigned long target,unsigned long operation,unsigned long param1,unsigned long param2,unsigned long param3)76 static inline long htm_call(unsigned long flags, unsigned long target,
77 unsigned long operation, unsigned long param1,
78 unsigned long param2, unsigned long param3)
79 {
80 return plpar_hcall_norets(H_HTM, flags, target, operation,
81 param1, param2, param3);
82 }
83
htm_hcall_wrapper(unsigned long flags,unsigned long nodeindex,unsigned long nodalchipindex,unsigned long coreindexonchip,unsigned long type,unsigned long htm_op,unsigned long param1,unsigned long param2,unsigned long param3)84 static inline long htm_hcall_wrapper(unsigned long flags, unsigned long nodeindex,
85 unsigned long nodalchipindex, unsigned long coreindexonchip,
86 unsigned long type, unsigned long htm_op, unsigned long param1, unsigned long param2,
87 unsigned long param3)
88 {
89 return htm_call(H_HTM_FLAGS_HARDWARE_TARGET | flags,
90 H_HTM_TARGET_NODE_INDEX(nodeindex) |
91 H_HTM_TARGET_NODAL_CHIP_INDEX(nodalchipindex) |
92 H_HTM_TARGET_CORE_INDEX_ON_CHIP(coreindexonchip),
93 H_HTM_OP(htm_op) | H_HTM_TYPE(type),
94 param1, param2, param3);
95 }
96
97 extern void vpa_init(int cpu);
98
plpar_pte_enter(unsigned long flags,unsigned long hpte_group,unsigned long hpte_v,unsigned long hpte_r,unsigned long * slot)99 static inline long plpar_pte_enter(unsigned long flags,
100 unsigned long hpte_group, unsigned long hpte_v,
101 unsigned long hpte_r, unsigned long *slot)
102 {
103 long rc;
104 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
105
106 rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
107
108 *slot = retbuf[0];
109
110 return rc;
111 }
112
plpar_pte_remove(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)113 static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
114 unsigned long avpn, unsigned long *old_pteh_ret,
115 unsigned long *old_ptel_ret)
116 {
117 long rc;
118 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
119
120 rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
121
122 *old_pteh_ret = retbuf[0];
123 *old_ptel_ret = retbuf[1];
124
125 return rc;
126 }
127
128 /* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_remove_raw(unsigned long flags,unsigned long ptex,unsigned long avpn,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)129 static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
130 unsigned long avpn, unsigned long *old_pteh_ret,
131 unsigned long *old_ptel_ret)
132 {
133 long rc;
134 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
135
136 rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
137
138 *old_pteh_ret = retbuf[0];
139 *old_ptel_ret = retbuf[1];
140
141 return rc;
142 }
143
plpar_pte_read(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)144 static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
145 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
146 {
147 long rc;
148 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
149
150 rc = plpar_hcall(H_READ, retbuf, flags, ptex);
151
152 *old_pteh_ret = retbuf[0];
153 *old_ptel_ret = retbuf[1];
154
155 return rc;
156 }
157
158 /* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
plpar_pte_read_raw(unsigned long flags,unsigned long ptex,unsigned long * old_pteh_ret,unsigned long * old_ptel_ret)159 static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
160 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
161 {
162 long rc;
163 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
164
165 rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
166
167 *old_pteh_ret = retbuf[0];
168 *old_ptel_ret = retbuf[1];
169
170 return rc;
171 }
172
173 /*
174 * ptes must be 8*sizeof(unsigned long)
175 */
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)176 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
177 unsigned long *ptes)
178
179 {
180 long rc;
181 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
182
183 rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
184
185 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
186
187 return rc;
188 }
189
190 /*
191 * plpar_pte_read_4_raw can be called in real mode.
192 * ptes must be 8*sizeof(unsigned long)
193 */
plpar_pte_read_4_raw(unsigned long flags,unsigned long ptex,unsigned long * ptes)194 static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
195 unsigned long *ptes)
196
197 {
198 long rc;
199 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
200
201 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
202
203 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
204
205 return rc;
206 }
207
plpar_pte_protect(unsigned long flags,unsigned long ptex,unsigned long avpn)208 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
209 unsigned long avpn)
210 {
211 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
212 }
213
plpar_resize_hpt_prepare(unsigned long flags,unsigned long shift)214 static inline long plpar_resize_hpt_prepare(unsigned long flags,
215 unsigned long shift)
216 {
217 return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
218 }
219
plpar_resize_hpt_commit(unsigned long flags,unsigned long shift)220 static inline long plpar_resize_hpt_commit(unsigned long flags,
221 unsigned long shift)
222 {
223 return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
224 }
225
plpar_tce_get(unsigned long liobn,unsigned long ioba,unsigned long * tce_ret)226 static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
227 unsigned long *tce_ret)
228 {
229 long rc;
230 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
231
232 rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
233
234 *tce_ret = retbuf[0];
235
236 return rc;
237 }
238
plpar_tce_put(unsigned long liobn,unsigned long ioba,unsigned long tceval)239 static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
240 unsigned long tceval)
241 {
242 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
243 }
244
plpar_tce_put_indirect(unsigned long liobn,unsigned long ioba,unsigned long page,unsigned long count)245 static inline long plpar_tce_put_indirect(unsigned long liobn,
246 unsigned long ioba, unsigned long page, unsigned long count)
247 {
248 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
249 }
250
plpar_tce_stuff(unsigned long liobn,unsigned long ioba,unsigned long tceval,unsigned long count)251 static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
252 unsigned long tceval, unsigned long count)
253 {
254 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
255 }
256
257 /* Set various resource mode parameters */
plpar_set_mode(unsigned long mflags,unsigned long resource,unsigned long value1,unsigned long value2)258 static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
259 unsigned long value1, unsigned long value2)
260 {
261 return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
262 }
263
264 /*
265 * Enable relocation on exceptions on this partition
266 *
267 * Note: this call has a partition wide scope and can take a while to complete.
268 * If it returns H_LONG_BUSY_* it should be retried periodically until it
269 * returns H_SUCCESS.
270 */
enable_reloc_on_exceptions(void)271 static inline long enable_reloc_on_exceptions(void)
272 {
273 /* mflags = 3: Exceptions at 0xC000000000004000 */
274 return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
275 }
276
277 /*
278 * Disable relocation on exceptions on this partition
279 *
280 * Note: this call has a partition wide scope and can take a while to complete.
281 * If it returns H_LONG_BUSY_* it should be retried periodically until it
282 * returns H_SUCCESS.
283 */
disable_reloc_on_exceptions(void)284 static inline long disable_reloc_on_exceptions(void) {
285 return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
286 }
287
288 /*
289 * Take exceptions in big endian mode on this partition
290 *
291 * Note: this call has a partition wide scope and can take a while to complete.
292 * If it returns H_LONG_BUSY_* it should be retried periodically until it
293 * returns H_SUCCESS.
294 */
enable_big_endian_exceptions(void)295 static inline long enable_big_endian_exceptions(void)
296 {
297 /* mflags = 0: big endian exceptions */
298 return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
299 }
300
301 /*
302 * Take exceptions in little endian mode on this partition
303 *
304 * Note: this call has a partition wide scope and can take a while to complete.
305 * If it returns H_LONG_BUSY_* it should be retried periodically until it
306 * returns H_SUCCESS.
307 */
enable_little_endian_exceptions(void)308 static inline long enable_little_endian_exceptions(void)
309 {
310 /* mflags = 1: little endian exceptions */
311 return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
312 }
313
plpar_set_ciabr(unsigned long ciabr)314 static inline long plpar_set_ciabr(unsigned long ciabr)
315 {
316 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
317 }
318
plpar_set_watchpoint0(unsigned long dawr0,unsigned long dawrx0)319 static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
320 {
321 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
322 }
323
plpar_set_watchpoint1(unsigned long dawr1,unsigned long dawrx1)324 static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
325 {
326 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
327 }
328
plpar_signal_sys_reset(long cpu)329 static inline long plpar_signal_sys_reset(long cpu)
330 {
331 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
332 }
333
plpar_get_cpu_characteristics(struct h_cpu_char_result * p)334 static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
335 {
336 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
337 long rc;
338
339 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
340 if (rc == H_SUCCESS) {
341 p->character = retbuf[0];
342 p->behaviour = retbuf[1];
343 }
344
345 return rc;
346 }
347
plpar_guest_create(unsigned long flags,unsigned long * guest_id)348 static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
349 {
350 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
351 unsigned long token;
352 long rc;
353
354 token = -1UL;
355 do {
356 rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token);
357 if (rc == H_SUCCESS)
358 *guest_id = retbuf[0];
359
360 if (rc == H_BUSY) {
361 token = retbuf[0];
362 cond_resched();
363 }
364
365 if (H_IS_LONG_BUSY(rc)) {
366 token = retbuf[0];
367 msleep(get_longbusy_msecs(rc));
368 rc = H_BUSY;
369 }
370
371 } while (rc == H_BUSY);
372
373 return rc;
374 }
375
plpar_guest_create_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id)376 static inline long plpar_guest_create_vcpu(unsigned long flags,
377 unsigned long guest_id,
378 unsigned long vcpu_id)
379 {
380 long rc;
381
382 do {
383 rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id);
384
385 if (rc == H_BUSY)
386 cond_resched();
387
388 if (H_IS_LONG_BUSY(rc)) {
389 msleep(get_longbusy_msecs(rc));
390 rc = H_BUSY;
391 }
392
393 } while (rc == H_BUSY);
394
395 return rc;
396 }
397
plpar_guest_set_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)398 static inline long plpar_guest_set_state(unsigned long flags,
399 unsigned long guest_id,
400 unsigned long vcpu_id,
401 unsigned long data_buffer,
402 unsigned long data_size,
403 unsigned long *failed_index)
404 {
405 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
406 long rc;
407
408 while (true) {
409 rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id,
410 vcpu_id, data_buffer, data_size);
411
412 if (rc == H_BUSY) {
413 cpu_relax();
414 continue;
415 }
416
417 if (H_IS_LONG_BUSY(rc)) {
418 mdelay(get_longbusy_msecs(rc));
419 continue;
420 }
421
422 if (rc == H_INVALID_ELEMENT_ID)
423 *failed_index = retbuf[0];
424 else if (rc == H_INVALID_ELEMENT_SIZE)
425 *failed_index = retbuf[0];
426 else if (rc == H_INVALID_ELEMENT_VALUE)
427 *failed_index = retbuf[0];
428
429 break;
430 }
431
432 return rc;
433 }
434
plpar_guest_get_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)435 static inline long plpar_guest_get_state(unsigned long flags,
436 unsigned long guest_id,
437 unsigned long vcpu_id,
438 unsigned long data_buffer,
439 unsigned long data_size,
440 unsigned long *failed_index)
441 {
442 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
443 long rc;
444
445 while (true) {
446 rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id,
447 vcpu_id, data_buffer, data_size);
448
449 if (rc == H_BUSY) {
450 cpu_relax();
451 continue;
452 }
453
454 if (H_IS_LONG_BUSY(rc)) {
455 mdelay(get_longbusy_msecs(rc));
456 continue;
457 }
458
459 if (rc == H_INVALID_ELEMENT_ID)
460 *failed_index = retbuf[0];
461 else if (rc == H_INVALID_ELEMENT_SIZE)
462 *failed_index = retbuf[0];
463 else if (rc == H_INVALID_ELEMENT_VALUE)
464 *failed_index = retbuf[0];
465
466 break;
467 }
468
469 return rc;
470 }
471
plpar_guest_run_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,int * trap,unsigned long * failed_index)472 static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
473 unsigned long vcpu_id, int *trap,
474 unsigned long *failed_index)
475 {
476 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
477 long rc;
478
479 rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id);
480 if (rc == H_SUCCESS)
481 *trap = retbuf[0];
482 else if (rc == H_INVALID_ELEMENT_ID)
483 *failed_index = retbuf[0];
484 else if (rc == H_INVALID_ELEMENT_SIZE)
485 *failed_index = retbuf[0];
486 else if (rc == H_INVALID_ELEMENT_VALUE)
487 *failed_index = retbuf[0];
488
489 return rc;
490 }
491
plpar_guest_delete(unsigned long flags,u64 guest_id)492 static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
493 {
494 long rc;
495
496 do {
497 rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id);
498 if (rc == H_BUSY)
499 cond_resched();
500
501 if (H_IS_LONG_BUSY(rc)) {
502 msleep(get_longbusy_msecs(rc));
503 rc = H_BUSY;
504 }
505
506 } while (rc == H_BUSY);
507
508 return rc;
509 }
510
plpar_guest_set_capabilities(unsigned long flags,unsigned long capabilities)511 static inline long plpar_guest_set_capabilities(unsigned long flags,
512 unsigned long capabilities)
513 {
514 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
515 long rc;
516
517 do {
518 rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities);
519 if (rc == H_BUSY)
520 cond_resched();
521
522 if (H_IS_LONG_BUSY(rc)) {
523 msleep(get_longbusy_msecs(rc));
524 rc = H_BUSY;
525 }
526 } while (rc == H_BUSY);
527
528 return rc;
529 }
530
plpar_guest_get_capabilities(unsigned long flags,unsigned long * capabilities)531 static inline long plpar_guest_get_capabilities(unsigned long flags,
532 unsigned long *capabilities)
533 {
534 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
535 long rc;
536
537 do {
538 rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags);
539 if (rc == H_BUSY)
540 cond_resched();
541
542 if (H_IS_LONG_BUSY(rc)) {
543 msleep(get_longbusy_msecs(rc));
544 rc = H_BUSY;
545 }
546 } while (rc == H_BUSY);
547
548 if (rc == H_SUCCESS)
549 *capabilities = retbuf[0];
550
551 return rc;
552 }
553
554 /*
555 * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
556 *
557 * - Returns H_SUCCESS on success
558 * - For H_BUSY return value, we retry the hcall.
559 * - For any other hcall failures, attempt a full flush once before
560 * resorting to BUG().
561 *
562 * Note: This hcall is expected to fail only very rarely. The correct
563 * error recovery of killing the process/guest will be eventually
564 * needed.
565 */
pseries_rpt_invalidate(u64 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)566 static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
567 u64 page_sizes, u64 start, u64 end)
568 {
569 long rc;
570 unsigned long all;
571
572 while (true) {
573 rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
574 page_sizes, start, end);
575 if (rc == H_BUSY) {
576 cpu_relax();
577 continue;
578 } else if (rc == H_SUCCESS)
579 return rc;
580
581 /* Flush request failed, try with a full flush once */
582 if (type & H_RPTI_TYPE_NESTED)
583 all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
584 else
585 all = H_RPTI_TYPE_ALL;
586 retry:
587 rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
588 all, page_sizes, 0, -1UL);
589 if (rc == H_BUSY) {
590 cpu_relax();
591 goto retry;
592 } else if (rc == H_SUCCESS)
593 return rc;
594
595 BUG();
596 }
597 }
598
599 #else /* !CONFIG_PPC_PSERIES */
600
plpar_set_ciabr(unsigned long ciabr)601 static inline long plpar_set_ciabr(unsigned long ciabr)
602 {
603 return 0;
604 }
605
plpar_pte_read_4(unsigned long flags,unsigned long ptex,unsigned long * ptes)606 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
607 unsigned long *ptes)
608 {
609 return 0;
610 }
611
pseries_rpt_invalidate(u64 pid,u64 target,u64 type,u64 page_sizes,u64 start,u64 end)612 static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
613 u64 page_sizes, u64 start, u64 end)
614 {
615 return 0;
616 }
617
plpar_guest_create_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id)618 static inline long plpar_guest_create_vcpu(unsigned long flags,
619 unsigned long guest_id,
620 unsigned long vcpu_id)
621 {
622 return 0;
623 }
624
plpar_guest_get_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)625 static inline long plpar_guest_get_state(unsigned long flags,
626 unsigned long guest_id,
627 unsigned long vcpu_id,
628 unsigned long data_buffer,
629 unsigned long data_size,
630 unsigned long *failed_index)
631 {
632 return 0;
633 }
634
plpar_guest_set_state(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,unsigned long data_buffer,unsigned long data_size,unsigned long * failed_index)635 static inline long plpar_guest_set_state(unsigned long flags,
636 unsigned long guest_id,
637 unsigned long vcpu_id,
638 unsigned long data_buffer,
639 unsigned long data_size,
640 unsigned long *failed_index)
641 {
642 return 0;
643 }
644
plpar_guest_run_vcpu(unsigned long flags,unsigned long guest_id,unsigned long vcpu_id,int * trap,unsigned long * failed_index)645 static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
646 unsigned long vcpu_id, int *trap,
647 unsigned long *failed_index)
648 {
649 return 0;
650 }
651
plpar_guest_create(unsigned long flags,unsigned long * guest_id)652 static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
653 {
654 return 0;
655 }
656
plpar_guest_delete(unsigned long flags,u64 guest_id)657 static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
658 {
659 return 0;
660 }
661
plpar_guest_get_capabilities(unsigned long flags,unsigned long * capabilities)662 static inline long plpar_guest_get_capabilities(unsigned long flags,
663 unsigned long *capabilities)
664 {
665 return 0;
666 }
667
plpar_guest_set_capabilities(unsigned long flags,unsigned long capabilities)668 static inline long plpar_guest_set_capabilities(unsigned long flags,
669 unsigned long capabilities)
670 {
671 return 0;
672 }
673
674 #endif /* CONFIG_PPC_PSERIES */
675
676 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
677