xref: /qemu/hw/ppc/spapr_hcall.c (revision 33c11879fd422b759483ed25fef133ea900ea8d7)
1 #include "qemu/osdep.h"
2 #include "qapi/error.h"
3 #include "sysemu/sysemu.h"
4 #include "cpu.h"
5 #include "helper_regs.h"
6 #include "hw/ppc/spapr.h"
7 #include "mmu-hash64.h"
8 #include "cpu-models.h"
9 #include "trace.h"
10 #include "sysemu/kvm.h"
11 #include "kvm_ppc.h"
12 
13 struct SPRSyncState {
14     CPUState *cs;
15     int spr;
16     target_ulong value;
17     target_ulong mask;
18 };
19 
20 static void do_spr_sync(void *arg)
21 {
22     struct SPRSyncState *s = arg;
23     PowerPCCPU *cpu = POWERPC_CPU(s->cs);
24     CPUPPCState *env = &cpu->env;
25 
26     cpu_synchronize_state(s->cs);
27     env->spr[s->spr] &= ~s->mask;
28     env->spr[s->spr] |= s->value;
29 }
30 
31 static void set_spr(CPUState *cs, int spr, target_ulong value,
32                     target_ulong mask)
33 {
34     struct SPRSyncState s = {
35         .cs = cs,
36         .spr = spr,
37         .value = value,
38         .mask = mask
39     };
40     run_on_cpu(cs, do_spr_sync, &s);
41 }
42 
43 static bool has_spr(PowerPCCPU *cpu, int spr)
44 {
45     /* We can test whether the SPR is defined by checking for a valid name */
46     return cpu->env.spr_cb[spr].name != NULL;
47 }
48 
49 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index)
50 {
51     /*
52      * hash value/pteg group index is normalized by htab_mask
53      */
54     if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) {
55         return false;
56     }
57     return true;
58 }
59 
60 static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr)
61 {
62     MachineState *machine = MACHINE(spapr);
63     MemoryHotplugState *hpms = &spapr->hotplug_memory;
64 
65     if (addr < machine->ram_size) {
66         return true;
67     }
68     if ((addr >= hpms->base)
69         && ((addr - hpms->base) < memory_region_size(&hpms->mr))) {
70         return true;
71     }
72 
73     return false;
74 }
75 
76 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr,
77                             target_ulong opcode, target_ulong *args)
78 {
79     CPUPPCState *env = &cpu->env;
80     target_ulong flags = args[0];
81     target_ulong pte_index = args[1];
82     target_ulong pteh = args[2];
83     target_ulong ptel = args[3];
84     unsigned apshift, spshift;
85     target_ulong raddr;
86     target_ulong index;
87     uint64_t token;
88 
89     apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift);
90     if (!apshift) {
91         /* Bad page size encoding */
92         return H_PARAMETER;
93     }
94 
95     raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1);
96 
97     if (is_ram_address(spapr, raddr)) {
98         /* Regular RAM - should have WIMG=0010 */
99         if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) {
100             return H_PARAMETER;
101         }
102     } else {
103         /* Looks like an IO address */
104         /* FIXME: What WIMG combinations could be sensible for IO?
105          * For now we allow WIMG=010x, but are there others? */
106         /* FIXME: Should we check against registered IO addresses? */
107         if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) {
108             return H_PARAMETER;
109         }
110     }
111 
112     pteh &= ~0x60ULL;
113 
114     if (!valid_pte_index(env, pte_index)) {
115         return H_PARAMETER;
116     }
117 
118     index = 0;
119     if (likely((flags & H_EXACT) == 0)) {
120         pte_index &= ~7ULL;
121         token = ppc_hash64_start_access(cpu, pte_index);
122         for (; index < 8; index++) {
123             if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) {
124                 break;
125             }
126         }
127         ppc_hash64_stop_access(cpu, token);
128         if (index == 8) {
129             return H_PTEG_FULL;
130         }
131     } else {
132         token = ppc_hash64_start_access(cpu, pte_index);
133         if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) {
134             ppc_hash64_stop_access(cpu, token);
135             return H_PTEG_FULL;
136         }
137         ppc_hash64_stop_access(cpu, token);
138     }
139 
140     ppc_hash64_store_hpte(cpu, pte_index + index,
141                           pteh | HPTE64_V_HPTE_DIRTY, ptel);
142 
143     args[0] = pte_index + index;
144     return H_SUCCESS;
145 }
146 
147 typedef enum {
148     REMOVE_SUCCESS = 0,
149     REMOVE_NOT_FOUND = 1,
150     REMOVE_PARM = 2,
151     REMOVE_HW = 3,
152 } RemoveResult;
153 
154 static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex,
155                                 target_ulong avpn,
156                                 target_ulong flags,
157                                 target_ulong *vp, target_ulong *rp)
158 {
159     CPUPPCState *env = &cpu->env;
160     uint64_t token;
161     target_ulong v, r;
162 
163     if (!valid_pte_index(env, ptex)) {
164         return REMOVE_PARM;
165     }
166 
167     token = ppc_hash64_start_access(cpu, ptex);
168     v = ppc_hash64_load_hpte0(cpu, token, 0);
169     r = ppc_hash64_load_hpte1(cpu, token, 0);
170     ppc_hash64_stop_access(cpu, token);
171 
172     if ((v & HPTE64_V_VALID) == 0 ||
173         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) ||
174         ((flags & H_ANDCOND) && (v & avpn) != 0)) {
175         return REMOVE_NOT_FOUND;
176     }
177     *vp = v;
178     *rp = r;
179     ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0);
180     ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r);
181     return REMOVE_SUCCESS;
182 }
183 
184 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
185                              target_ulong opcode, target_ulong *args)
186 {
187     target_ulong flags = args[0];
188     target_ulong pte_index = args[1];
189     target_ulong avpn = args[2];
190     RemoveResult ret;
191 
192     ret = remove_hpte(cpu, pte_index, avpn, flags,
193                       &args[0], &args[1]);
194 
195     switch (ret) {
196     case REMOVE_SUCCESS:
197         return H_SUCCESS;
198 
199     case REMOVE_NOT_FOUND:
200         return H_NOT_FOUND;
201 
202     case REMOVE_PARM:
203         return H_PARAMETER;
204 
205     case REMOVE_HW:
206         return H_HARDWARE;
207     }
208 
209     g_assert_not_reached();
210 }
211 
212 #define H_BULK_REMOVE_TYPE             0xc000000000000000ULL
213 #define   H_BULK_REMOVE_REQUEST        0x4000000000000000ULL
214 #define   H_BULK_REMOVE_RESPONSE       0x8000000000000000ULL
215 #define   H_BULK_REMOVE_END            0xc000000000000000ULL
216 #define H_BULK_REMOVE_CODE             0x3000000000000000ULL
217 #define   H_BULK_REMOVE_SUCCESS        0x0000000000000000ULL
218 #define   H_BULK_REMOVE_NOT_FOUND      0x1000000000000000ULL
219 #define   H_BULK_REMOVE_PARM           0x2000000000000000ULL
220 #define   H_BULK_REMOVE_HW             0x3000000000000000ULL
221 #define H_BULK_REMOVE_RC               0x0c00000000000000ULL
222 #define H_BULK_REMOVE_FLAGS            0x0300000000000000ULL
223 #define   H_BULK_REMOVE_ABSOLUTE       0x0000000000000000ULL
224 #define   H_BULK_REMOVE_ANDCOND        0x0100000000000000ULL
225 #define   H_BULK_REMOVE_AVPN           0x0200000000000000ULL
226 #define H_BULK_REMOVE_PTEX             0x00ffffffffffffffULL
227 
228 #define H_BULK_REMOVE_MAX_BATCH        4
229 
230 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr,
231                                   target_ulong opcode, target_ulong *args)
232 {
233     int i;
234 
235     for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
236         target_ulong *tsh = &args[i*2];
237         target_ulong tsl = args[i*2 + 1];
238         target_ulong v, r, ret;
239 
240         if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
241             break;
242         } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) {
243             return H_PARAMETER;
244         }
245 
246         *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
247         *tsh |= H_BULK_REMOVE_RESPONSE;
248 
249         if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) {
250             *tsh |= H_BULK_REMOVE_PARM;
251             return H_PARAMETER;
252         }
253 
254         ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl,
255                           (*tsh & H_BULK_REMOVE_FLAGS) >> 26,
256                           &v, &r);
257 
258         *tsh |= ret << 60;
259 
260         switch (ret) {
261         case REMOVE_SUCCESS:
262             *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43;
263             break;
264 
265         case REMOVE_PARM:
266             return H_PARAMETER;
267 
268         case REMOVE_HW:
269             return H_HARDWARE;
270         }
271     }
272 
273     return H_SUCCESS;
274 }
275 
276 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
277                               target_ulong opcode, target_ulong *args)
278 {
279     CPUPPCState *env = &cpu->env;
280     target_ulong flags = args[0];
281     target_ulong pte_index = args[1];
282     target_ulong avpn = args[2];
283     uint64_t token;
284     target_ulong v, r;
285 
286     if (!valid_pte_index(env, pte_index)) {
287         return H_PARAMETER;
288     }
289 
290     token = ppc_hash64_start_access(cpu, pte_index);
291     v = ppc_hash64_load_hpte0(cpu, token, 0);
292     r = ppc_hash64_load_hpte1(cpu, token, 0);
293     ppc_hash64_stop_access(cpu, token);
294 
295     if ((v & HPTE64_V_VALID) == 0 ||
296         ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) {
297         return H_NOT_FOUND;
298     }
299 
300     r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N |
301            HPTE64_R_KEY_HI | HPTE64_R_KEY_LO);
302     r |= (flags << 55) & HPTE64_R_PP0;
303     r |= (flags << 48) & HPTE64_R_KEY_HI;
304     r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO);
305     ppc_hash64_store_hpte(cpu, pte_index,
306                           (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0);
307     ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r);
308     /* Don't need a memory barrier, due to qemu's global lock */
309     ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r);
310     return H_SUCCESS;
311 }
312 
313 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
314                            target_ulong opcode, target_ulong *args)
315 {
316     CPUPPCState *env = &cpu->env;
317     target_ulong flags = args[0];
318     target_ulong pte_index = args[1];
319     uint8_t *hpte;
320     int i, ridx, n_entries = 1;
321 
322     if (!valid_pte_index(env, pte_index)) {
323         return H_PARAMETER;
324     }
325 
326     if (flags & H_READ_4) {
327         /* Clear the two low order bits */
328         pte_index &= ~(3ULL);
329         n_entries = 4;
330     }
331 
332     hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64);
333 
334     for (i = 0, ridx = 0; i < n_entries; i++) {
335         args[ridx++] = ldq_p(hpte);
336         args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2));
337         hpte += HASH_PTE_SIZE_64;
338     }
339 
340     return H_SUCCESS;
341 }
342 
343 static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr,
344                                 target_ulong opcode, target_ulong *args)
345 {
346     cpu_synchronize_state(CPU(cpu));
347     cpu->env.spr[SPR_SPRG0] = args[0];
348 
349     return H_SUCCESS;
350 }
351 
352 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
353                                target_ulong opcode, target_ulong *args)
354 {
355     if (!has_spr(cpu, SPR_DABR)) {
356         return H_HARDWARE;              /* DABR register not available */
357     }
358     cpu_synchronize_state(CPU(cpu));
359 
360     if (has_spr(cpu, SPR_DABRX)) {
361         cpu->env.spr[SPR_DABRX] = 0x3;  /* Use Problem and Privileged state */
362     } else if (!(args[0] & 0x4)) {      /* Breakpoint Translation set? */
363         return H_RESERVED_DABR;
364     }
365 
366     cpu->env.spr[SPR_DABR] = args[0];
367     return H_SUCCESS;
368 }
369 
370 static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr,
371                                 target_ulong opcode, target_ulong *args)
372 {
373     target_ulong dabrx = args[1];
374 
375     if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) {
376         return H_HARDWARE;
377     }
378 
379     if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0
380         || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) {
381         return H_PARAMETER;
382     }
383 
384     cpu_synchronize_state(CPU(cpu));
385     cpu->env.spr[SPR_DABRX] = dabrx;
386     cpu->env.spr[SPR_DABR] = args[0];
387 
388     return H_SUCCESS;
389 }
390 
391 static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr,
392                                 target_ulong opcode, target_ulong *args)
393 {
394     target_ulong flags = args[0];
395     hwaddr dst = args[1];
396     hwaddr src = args[2];
397     hwaddr len = TARGET_PAGE_SIZE;
398     uint8_t *pdst, *psrc;
399     target_long ret = H_SUCCESS;
400 
401     if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE
402                   | H_COPY_PAGE | H_ZERO_PAGE)) {
403         qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n",
404                       flags);
405         return H_PARAMETER;
406     }
407 
408     /* Map-in destination */
409     if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) {
410         return H_PARAMETER;
411     }
412     pdst = cpu_physical_memory_map(dst, &len, 1);
413     if (!pdst || len != TARGET_PAGE_SIZE) {
414         return H_PARAMETER;
415     }
416 
417     if (flags & H_COPY_PAGE) {
418         /* Map-in source, copy to destination, and unmap source again */
419         if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) {
420             ret = H_PARAMETER;
421             goto unmap_out;
422         }
423         psrc = cpu_physical_memory_map(src, &len, 0);
424         if (!psrc || len != TARGET_PAGE_SIZE) {
425             ret = H_PARAMETER;
426             goto unmap_out;
427         }
428         memcpy(pdst, psrc, len);
429         cpu_physical_memory_unmap(psrc, len, 0, len);
430     } else if (flags & H_ZERO_PAGE) {
431         memset(pdst, 0, len);          /* Just clear the destination page */
432     }
433 
434     if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) {
435         kvmppc_dcbst_range(cpu, pdst, len);
436     }
437     if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) {
438         if (kvm_enabled()) {
439             kvmppc_icbi_range(cpu, pdst, len);
440         } else {
441             tb_flush(CPU(cpu));
442         }
443     }
444 
445 unmap_out:
446     cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len);
447     return ret;
448 }
449 
450 #define FLAGS_REGISTER_VPA         0x0000200000000000ULL
451 #define FLAGS_REGISTER_DTL         0x0000400000000000ULL
452 #define FLAGS_REGISTER_SLBSHADOW   0x0000600000000000ULL
453 #define FLAGS_DEREGISTER_VPA       0x0000a00000000000ULL
454 #define FLAGS_DEREGISTER_DTL       0x0000c00000000000ULL
455 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL
456 
457 #define VPA_MIN_SIZE           640
458 #define VPA_SIZE_OFFSET        0x4
459 #define VPA_SHARED_PROC_OFFSET 0x9
460 #define VPA_SHARED_PROC_VAL    0x2
461 
462 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa)
463 {
464     CPUState *cs = CPU(ppc_env_get_cpu(env));
465     uint16_t size;
466     uint8_t tmp;
467 
468     if (vpa == 0) {
469         hcall_dprintf("Can't cope with registering a VPA at logical 0\n");
470         return H_HARDWARE;
471     }
472 
473     if (vpa % env->dcache_line_size) {
474         return H_PARAMETER;
475     }
476     /* FIXME: bounds check the address */
477 
478     size = lduw_be_phys(cs->as, vpa + 0x4);
479 
480     if (size < VPA_MIN_SIZE) {
481         return H_PARAMETER;
482     }
483 
484     /* VPA is not allowed to cross a page boundary */
485     if ((vpa / 4096) != ((vpa + size - 1) / 4096)) {
486         return H_PARAMETER;
487     }
488 
489     env->vpa_addr = vpa;
490 
491     tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET);
492     tmp |= VPA_SHARED_PROC_VAL;
493     stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp);
494 
495     return H_SUCCESS;
496 }
497 
498 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa)
499 {
500     if (env->slb_shadow_addr) {
501         return H_RESOURCE;
502     }
503 
504     if (env->dtl_addr) {
505         return H_RESOURCE;
506     }
507 
508     env->vpa_addr = 0;
509     return H_SUCCESS;
510 }
511 
512 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr)
513 {
514     CPUState *cs = CPU(ppc_env_get_cpu(env));
515     uint32_t size;
516 
517     if (addr == 0) {
518         hcall_dprintf("Can't cope with SLB shadow at logical 0\n");
519         return H_HARDWARE;
520     }
521 
522     size = ldl_be_phys(cs->as, addr + 0x4);
523     if (size < 0x8) {
524         return H_PARAMETER;
525     }
526 
527     if ((addr / 4096) != ((addr + size - 1) / 4096)) {
528         return H_PARAMETER;
529     }
530 
531     if (!env->vpa_addr) {
532         return H_RESOURCE;
533     }
534 
535     env->slb_shadow_addr = addr;
536     env->slb_shadow_size = size;
537 
538     return H_SUCCESS;
539 }
540 
541 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr)
542 {
543     env->slb_shadow_addr = 0;
544     env->slb_shadow_size = 0;
545     return H_SUCCESS;
546 }
547 
548 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr)
549 {
550     CPUState *cs = CPU(ppc_env_get_cpu(env));
551     uint32_t size;
552 
553     if (addr == 0) {
554         hcall_dprintf("Can't cope with DTL at logical 0\n");
555         return H_HARDWARE;
556     }
557 
558     size = ldl_be_phys(cs->as, addr + 0x4);
559 
560     if (size < 48) {
561         return H_PARAMETER;
562     }
563 
564     if (!env->vpa_addr) {
565         return H_RESOURCE;
566     }
567 
568     env->dtl_addr = addr;
569     env->dtl_size = size;
570 
571     return H_SUCCESS;
572 }
573 
574 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr)
575 {
576     env->dtl_addr = 0;
577     env->dtl_size = 0;
578 
579     return H_SUCCESS;
580 }
581 
582 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr,
583                                    target_ulong opcode, target_ulong *args)
584 {
585     target_ulong flags = args[0];
586     target_ulong procno = args[1];
587     target_ulong vpa = args[2];
588     target_ulong ret = H_PARAMETER;
589     CPUPPCState *tenv;
590     PowerPCCPU *tcpu;
591 
592     tcpu = ppc_get_vcpu_by_dt_id(procno);
593     if (!tcpu) {
594         return H_PARAMETER;
595     }
596     tenv = &tcpu->env;
597 
598     switch (flags) {
599     case FLAGS_REGISTER_VPA:
600         ret = register_vpa(tenv, vpa);
601         break;
602 
603     case FLAGS_DEREGISTER_VPA:
604         ret = deregister_vpa(tenv, vpa);
605         break;
606 
607     case FLAGS_REGISTER_SLBSHADOW:
608         ret = register_slb_shadow(tenv, vpa);
609         break;
610 
611     case FLAGS_DEREGISTER_SLBSHADOW:
612         ret = deregister_slb_shadow(tenv, vpa);
613         break;
614 
615     case FLAGS_REGISTER_DTL:
616         ret = register_dtl(tenv, vpa);
617         break;
618 
619     case FLAGS_DEREGISTER_DTL:
620         ret = deregister_dtl(tenv, vpa);
621         break;
622     }
623 
624     return ret;
625 }
626 
627 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr,
628                            target_ulong opcode, target_ulong *args)
629 {
630     CPUPPCState *env = &cpu->env;
631     CPUState *cs = CPU(cpu);
632 
633     env->msr |= (1ULL << MSR_EE);
634     hreg_compute_hflags(env);
635     if (!cpu_has_work(cs)) {
636         cs->halted = 1;
637         cs->exception_index = EXCP_HLT;
638         cs->exit_request = 1;
639     }
640     return H_SUCCESS;
641 }
642 
643 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr,
644                            target_ulong opcode, target_ulong *args)
645 {
646     target_ulong rtas_r3 = args[0];
647     uint32_t token = rtas_ld(rtas_r3, 0);
648     uint32_t nargs = rtas_ld(rtas_r3, 1);
649     uint32_t nret = rtas_ld(rtas_r3, 2);
650 
651     return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12,
652                            nret, rtas_r3 + 12 + 4*nargs);
653 }
654 
655 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr,
656                                    target_ulong opcode, target_ulong *args)
657 {
658     CPUState *cs = CPU(cpu);
659     target_ulong size = args[0];
660     target_ulong addr = args[1];
661 
662     switch (size) {
663     case 1:
664         args[0] = ldub_phys(cs->as, addr);
665         return H_SUCCESS;
666     case 2:
667         args[0] = lduw_phys(cs->as, addr);
668         return H_SUCCESS;
669     case 4:
670         args[0] = ldl_phys(cs->as, addr);
671         return H_SUCCESS;
672     case 8:
673         args[0] = ldq_phys(cs->as, addr);
674         return H_SUCCESS;
675     }
676     return H_PARAMETER;
677 }
678 
679 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr,
680                                     target_ulong opcode, target_ulong *args)
681 {
682     CPUState *cs = CPU(cpu);
683 
684     target_ulong size = args[0];
685     target_ulong addr = args[1];
686     target_ulong val  = args[2];
687 
688     switch (size) {
689     case 1:
690         stb_phys(cs->as, addr, val);
691         return H_SUCCESS;
692     case 2:
693         stw_phys(cs->as, addr, val);
694         return H_SUCCESS;
695     case 4:
696         stl_phys(cs->as, addr, val);
697         return H_SUCCESS;
698     case 8:
699         stq_phys(cs->as, addr, val);
700         return H_SUCCESS;
701     }
702     return H_PARAMETER;
703 }
704 
705 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr,
706                                     target_ulong opcode, target_ulong *args)
707 {
708     CPUState *cs = CPU(cpu);
709 
710     target_ulong dst   = args[0]; /* Destination address */
711     target_ulong src   = args[1]; /* Source address */
712     target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */
713     target_ulong count = args[3]; /* Element count */
714     target_ulong op    = args[4]; /* 0 = copy, 1 = invert */
715     uint64_t tmp;
716     unsigned int mask = (1 << esize) - 1;
717     int step = 1 << esize;
718 
719     if (count > 0x80000000) {
720         return H_PARAMETER;
721     }
722 
723     if ((dst & mask) || (src & mask) || (op > 1)) {
724         return H_PARAMETER;
725     }
726 
727     if (dst >= src && dst < (src + (count << esize))) {
728             dst = dst + ((count - 1) << esize);
729             src = src + ((count - 1) << esize);
730             step = -step;
731     }
732 
733     while (count--) {
734         switch (esize) {
735         case 0:
736             tmp = ldub_phys(cs->as, src);
737             break;
738         case 1:
739             tmp = lduw_phys(cs->as, src);
740             break;
741         case 2:
742             tmp = ldl_phys(cs->as, src);
743             break;
744         case 3:
745             tmp = ldq_phys(cs->as, src);
746             break;
747         default:
748             return H_PARAMETER;
749         }
750         if (op == 1) {
751             tmp = ~tmp;
752         }
753         switch (esize) {
754         case 0:
755             stb_phys(cs->as, dst, tmp);
756             break;
757         case 1:
758             stw_phys(cs->as, dst, tmp);
759             break;
760         case 2:
761             stl_phys(cs->as, dst, tmp);
762             break;
763         case 3:
764             stq_phys(cs->as, dst, tmp);
765             break;
766         }
767         dst = dst + step;
768         src = src + step;
769     }
770 
771     return H_SUCCESS;
772 }
773 
774 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr,
775                                    target_ulong opcode, target_ulong *args)
776 {
777     /* Nothing to do on emulation, KVM will trap this in the kernel */
778     return H_SUCCESS;
779 }
780 
781 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr,
782                                    target_ulong opcode, target_ulong *args)
783 {
784     /* Nothing to do on emulation, KVM will trap this in the kernel */
785     return H_SUCCESS;
786 }
787 
788 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
789                                            target_ulong mflags,
790                                            target_ulong value1,
791                                            target_ulong value2)
792 {
793     CPUState *cs;
794 
795     if (value1) {
796         return H_P3;
797     }
798     if (value2) {
799         return H_P4;
800     }
801 
802     switch (mflags) {
803     case H_SET_MODE_ENDIAN_BIG:
804         CPU_FOREACH(cs) {
805             set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
806         }
807         spapr_pci_switch_vga(true);
808         return H_SUCCESS;
809 
810     case H_SET_MODE_ENDIAN_LITTLE:
811         CPU_FOREACH(cs) {
812             set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
813         }
814         spapr_pci_switch_vga(false);
815         return H_SUCCESS;
816     }
817 
818     return H_UNSUPPORTED_FLAG;
819 }
820 
821 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
822                                                         target_ulong mflags,
823                                                         target_ulong value1,
824                                                         target_ulong value2)
825 {
826     CPUState *cs;
827     PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
828 
829     if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
830         return H_P2;
831     }
832     if (value1) {
833         return H_P3;
834     }
835     if (value2) {
836         return H_P4;
837     }
838 
839     if (mflags == AIL_RESERVED) {
840         return H_UNSUPPORTED_FLAG;
841     }
842 
843     CPU_FOREACH(cs) {
844         set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
845     }
846 
847     return H_SUCCESS;
848 }
849 
850 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr,
851                                target_ulong opcode, target_ulong *args)
852 {
853     target_ulong resource = args[1];
854     target_ulong ret = H_P2;
855 
856     switch (resource) {
857     case H_SET_MODE_RESOURCE_LE:
858         ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]);
859         break;
860     case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
861         ret = h_set_mode_resource_addr_trans_mode(cpu, args[0],
862                                                   args[2], args[3]);
863         break;
864     }
865 
866     return ret;
867 }
868 
869 /*
870  * Return the offset to the requested option vector @vector in the
871  * option vector table @table.
872  */
873 static target_ulong cas_get_option_vector(int vector, target_ulong table)
874 {
875     int i;
876     char nr_vectors, nr_entries;
877 
878     if (!table) {
879         return 0;
880     }
881 
882     nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1;
883     if (!vector || vector > nr_vectors) {
884         return 0;
885     }
886     table++; /* skip nr option vectors */
887 
888     for (i = 0; i < vector - 1; i++) {
889         nr_entries = ldl_phys(&address_space_memory, table) >> 24;
890         table += nr_entries + 2;
891     }
892     return table;
893 }
894 
895 typedef struct {
896     PowerPCCPU *cpu;
897     uint32_t cpu_version;
898     Error *err;
899 } SetCompatState;
900 
901 static void do_set_compat(void *arg)
902 {
903     SetCompatState *s = arg;
904 
905     cpu_synchronize_state(CPU(s->cpu));
906     ppc_set_compat(s->cpu, s->cpu_version, &s->err);
907 }
908 
909 #define get_compat_level(cpuver) ( \
910     ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \
911     ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \
912     ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \
913     ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0)
914 
915 #define OV5_DRCONF_MEMORY 0x20
916 
917 static target_ulong h_client_architecture_support(PowerPCCPU *cpu_,
918                                                   sPAPRMachineState *spapr,
919                                                   target_ulong opcode,
920                                                   target_ulong *args)
921 {
922     target_ulong list = ppc64_phys_to_real(args[0]);
923     target_ulong ov_table, ov5;
924     PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_);
925     CPUState *cs;
926     bool cpu_match = false, cpu_update = true, memory_update = false;
927     unsigned old_cpu_version = cpu_->cpu_version;
928     unsigned compat_lvl = 0, cpu_version = 0;
929     unsigned max_lvl = get_compat_level(cpu_->max_compat);
930     int counter;
931     char ov5_byte2;
932 
933     /* Parse PVR list */
934     for (counter = 0; counter < 512; ++counter) {
935         uint32_t pvr, pvr_mask;
936 
937         pvr_mask = ldl_be_phys(&address_space_memory, list);
938         list += 4;
939         pvr = ldl_be_phys(&address_space_memory, list);
940         list += 4;
941 
942         trace_spapr_cas_pvr_try(pvr);
943         if (!max_lvl &&
944             ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) {
945             cpu_match = true;
946             cpu_version = 0;
947         } else if (pvr == cpu_->cpu_version) {
948             cpu_match = true;
949             cpu_version = cpu_->cpu_version;
950         } else if (!cpu_match) {
951             /* If it is a logical PVR, try to determine the highest level */
952             unsigned lvl = get_compat_level(pvr);
953             if (lvl) {
954                 bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) &&
955                      (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05));
956                 bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) &&
957                     ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) ||
958                     (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS)));
959 
960                 if (is205 || is206) {
961                     if (!max_lvl) {
962                         /* User did not set the level, choose the highest */
963                         if (compat_lvl <= lvl) {
964                             compat_lvl = lvl;
965                             cpu_version = pvr;
966                         }
967                     } else if (max_lvl >= lvl) {
968                         /* User chose the level, don't set higher than this */
969                         compat_lvl = lvl;
970                         cpu_version = pvr;
971                     }
972                 }
973             }
974         }
975         /* Terminator record */
976         if (~pvr_mask & pvr) {
977             break;
978         }
979     }
980 
981     /* Parsing finished */
982     trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match,
983                         cpu_version, pcc_->pcr_mask);
984 
985     /* Update CPUs */
986     if (old_cpu_version != cpu_version) {
987         CPU_FOREACH(cs) {
988             SetCompatState s = {
989                 .cpu = POWERPC_CPU(cs),
990                 .cpu_version = cpu_version,
991                 .err = NULL,
992             };
993 
994             run_on_cpu(cs, do_set_compat, &s);
995 
996             if (s.err) {
997                 error_report_err(s.err);
998                 return H_HARDWARE;
999             }
1000         }
1001     }
1002 
1003     if (!cpu_version) {
1004         cpu_update = false;
1005     }
1006 
1007     /* For the future use: here @ov_table points to the first option vector */
1008     ov_table = list;
1009 
1010     ov5 = cas_get_option_vector(5, ov_table);
1011     if (!ov5) {
1012         return H_SUCCESS;
1013     }
1014 
1015     /* @list now points to OV 5 */
1016     ov5_byte2 = ldub_phys(&address_space_memory, ov5 + 2);
1017     if (ov5_byte2 & OV5_DRCONF_MEMORY) {
1018         memory_update = true;
1019     }
1020 
1021     if (spapr_h_cas_compose_response(spapr, args[1], args[2],
1022                                      cpu_update, memory_update)) {
1023         qemu_system_reset_request();
1024     }
1025 
1026     return H_SUCCESS;
1027 }
1028 
1029 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1];
1030 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1031 
1032 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn)
1033 {
1034     spapr_hcall_fn *slot;
1035 
1036     if (opcode <= MAX_HCALL_OPCODE) {
1037         assert((opcode & 0x3) == 0);
1038 
1039         slot = &papr_hypercall_table[opcode / 4];
1040     } else {
1041         assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX));
1042 
1043         slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1044     }
1045 
1046     assert(!(*slot));
1047     *slot = fn;
1048 }
1049 
1050 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
1051                              target_ulong *args)
1052 {
1053     sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
1054 
1055     if ((opcode <= MAX_HCALL_OPCODE)
1056         && ((opcode & 0x3) == 0)) {
1057         spapr_hcall_fn fn = papr_hypercall_table[opcode / 4];
1058 
1059         if (fn) {
1060             return fn(cpu, spapr, opcode, args);
1061         }
1062     } else if ((opcode >= KVMPPC_HCALL_BASE) &&
1063                (opcode <= KVMPPC_HCALL_MAX)) {
1064         spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE];
1065 
1066         if (fn) {
1067             return fn(cpu, spapr, opcode, args);
1068         }
1069     }
1070 
1071     qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n",
1072                   opcode);
1073     return H_FUNCTION;
1074 }
1075 
1076 static void hypercall_register_types(void)
1077 {
1078     /* hcall-pft */
1079     spapr_register_hypercall(H_ENTER, h_enter);
1080     spapr_register_hypercall(H_REMOVE, h_remove);
1081     spapr_register_hypercall(H_PROTECT, h_protect);
1082     spapr_register_hypercall(H_READ, h_read);
1083 
1084     /* hcall-bulk */
1085     spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove);
1086 
1087     /* hcall-splpar */
1088     spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa);
1089     spapr_register_hypercall(H_CEDE, h_cede);
1090 
1091     /* processor register resource access h-calls */
1092     spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0);
1093     spapr_register_hypercall(H_SET_DABR, h_set_dabr);
1094     spapr_register_hypercall(H_SET_XDABR, h_set_xdabr);
1095     spapr_register_hypercall(H_PAGE_INIT, h_page_init);
1096     spapr_register_hypercall(H_SET_MODE, h_set_mode);
1097 
1098     /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate
1099      * here between the "CI" and the "CACHE" variants, they will use whatever
1100      * mapping attributes qemu is using. When using KVM, the kernel will
1101      * enforce the attributes more strongly
1102      */
1103     spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load);
1104     spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store);
1105     spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load);
1106     spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store);
1107     spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi);
1108     spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf);
1109     spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop);
1110 
1111     /* qemu/KVM-PPC specific hcalls */
1112     spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas);
1113 
1114     /* ibm,client-architecture-support support */
1115     spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
1116 }
1117 
1118 type_init(hypercall_register_types)
1119