1 #include "qemu/osdep.h" 2 #include "qapi/error.h" 3 #include "sysemu/sysemu.h" 4 #include "qemu/log.h" 5 #include "cpu.h" 6 #include "exec/exec-all.h" 7 #include "helper_regs.h" 8 #include "hw/ppc/spapr.h" 9 #include "mmu-hash64.h" 10 #include "cpu-models.h" 11 #include "trace.h" 12 #include "sysemu/kvm.h" 13 #include "kvm_ppc.h" 14 15 struct SPRSyncState { 16 CPUState *cs; 17 int spr; 18 target_ulong value; 19 target_ulong mask; 20 }; 21 22 static void do_spr_sync(void *arg) 23 { 24 struct SPRSyncState *s = arg; 25 PowerPCCPU *cpu = POWERPC_CPU(s->cs); 26 CPUPPCState *env = &cpu->env; 27 28 cpu_synchronize_state(s->cs); 29 env->spr[s->spr] &= ~s->mask; 30 env->spr[s->spr] |= s->value; 31 } 32 33 static void set_spr(CPUState *cs, int spr, target_ulong value, 34 target_ulong mask) 35 { 36 struct SPRSyncState s = { 37 .cs = cs, 38 .spr = spr, 39 .value = value, 40 .mask = mask 41 }; 42 run_on_cpu(cs, do_spr_sync, &s); 43 } 44 45 static bool has_spr(PowerPCCPU *cpu, int spr) 46 { 47 /* We can test whether the SPR is defined by checking for a valid name */ 48 return cpu->env.spr_cb[spr].name != NULL; 49 } 50 51 static inline bool valid_pte_index(CPUPPCState *env, target_ulong pte_index) 52 { 53 /* 54 * hash value/pteg group index is normalized by htab_mask 55 */ 56 if (((pte_index & ~7ULL) / HPTES_PER_GROUP) & ~env->htab_mask) { 57 return false; 58 } 59 return true; 60 } 61 62 static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr) 63 { 64 MachineState *machine = MACHINE(spapr); 65 MemoryHotplugState *hpms = &spapr->hotplug_memory; 66 67 if (addr < machine->ram_size) { 68 return true; 69 } 70 if ((addr >= hpms->base) 71 && ((addr - hpms->base) < memory_region_size(&hpms->mr))) { 72 return true; 73 } 74 75 return false; 76 } 77 78 static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, 79 target_ulong opcode, target_ulong *args) 80 { 81 CPUPPCState *env = &cpu->env; 82 target_ulong flags = args[0]; 83 target_ulong pte_index = args[1]; 84 target_ulong pteh = args[2]; 85 target_ulong ptel = args[3]; 86 unsigned apshift, spshift; 87 target_ulong raddr; 88 target_ulong index; 89 uint64_t token; 90 91 apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel, &spshift); 92 if (!apshift) { 93 /* Bad page size encoding */ 94 return H_PARAMETER; 95 } 96 97 raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1); 98 99 if (is_ram_address(spapr, raddr)) { 100 /* Regular RAM - should have WIMG=0010 */ 101 if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) { 102 return H_PARAMETER; 103 } 104 } else { 105 /* Looks like an IO address */ 106 /* FIXME: What WIMG combinations could be sensible for IO? 107 * For now we allow WIMG=010x, but are there others? */ 108 /* FIXME: Should we check against registered IO addresses? */ 109 if ((ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)) != HPTE64_R_I) { 110 return H_PARAMETER; 111 } 112 } 113 114 pteh &= ~0x60ULL; 115 116 if (!valid_pte_index(env, pte_index)) { 117 return H_PARAMETER; 118 } 119 120 index = 0; 121 if (likely((flags & H_EXACT) == 0)) { 122 pte_index &= ~7ULL; 123 token = ppc_hash64_start_access(cpu, pte_index); 124 for (; index < 8; index++) { 125 if (!(ppc_hash64_load_hpte0(cpu, token, index) & HPTE64_V_VALID)) { 126 break; 127 } 128 } 129 ppc_hash64_stop_access(cpu, token); 130 if (index == 8) { 131 return H_PTEG_FULL; 132 } 133 } else { 134 token = ppc_hash64_start_access(cpu, pte_index); 135 if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) { 136 ppc_hash64_stop_access(cpu, token); 137 return H_PTEG_FULL; 138 } 139 ppc_hash64_stop_access(cpu, token); 140 } 141 142 ppc_hash64_store_hpte(cpu, pte_index + index, 143 pteh | HPTE64_V_HPTE_DIRTY, ptel); 144 145 args[0] = pte_index + index; 146 return H_SUCCESS; 147 } 148 149 typedef enum { 150 REMOVE_SUCCESS = 0, 151 REMOVE_NOT_FOUND = 1, 152 REMOVE_PARM = 2, 153 REMOVE_HW = 3, 154 } RemoveResult; 155 156 static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, 157 target_ulong avpn, 158 target_ulong flags, 159 target_ulong *vp, target_ulong *rp) 160 { 161 CPUPPCState *env = &cpu->env; 162 uint64_t token; 163 target_ulong v, r; 164 165 if (!valid_pte_index(env, ptex)) { 166 return REMOVE_PARM; 167 } 168 169 token = ppc_hash64_start_access(cpu, ptex); 170 v = ppc_hash64_load_hpte0(cpu, token, 0); 171 r = ppc_hash64_load_hpte1(cpu, token, 0); 172 ppc_hash64_stop_access(cpu, token); 173 174 if ((v & HPTE64_V_VALID) == 0 || 175 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || 176 ((flags & H_ANDCOND) && (v & avpn) != 0)) { 177 return REMOVE_NOT_FOUND; 178 } 179 *vp = v; 180 *rp = r; 181 ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); 182 ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); 183 return REMOVE_SUCCESS; 184 } 185 186 static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, 187 target_ulong opcode, target_ulong *args) 188 { 189 target_ulong flags = args[0]; 190 target_ulong pte_index = args[1]; 191 target_ulong avpn = args[2]; 192 RemoveResult ret; 193 194 ret = remove_hpte(cpu, pte_index, avpn, flags, 195 &args[0], &args[1]); 196 197 switch (ret) { 198 case REMOVE_SUCCESS: 199 return H_SUCCESS; 200 201 case REMOVE_NOT_FOUND: 202 return H_NOT_FOUND; 203 204 case REMOVE_PARM: 205 return H_PARAMETER; 206 207 case REMOVE_HW: 208 return H_HARDWARE; 209 } 210 211 g_assert_not_reached(); 212 } 213 214 #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL 215 #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL 216 #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL 217 #define H_BULK_REMOVE_END 0xc000000000000000ULL 218 #define H_BULK_REMOVE_CODE 0x3000000000000000ULL 219 #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL 220 #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL 221 #define H_BULK_REMOVE_PARM 0x2000000000000000ULL 222 #define H_BULK_REMOVE_HW 0x3000000000000000ULL 223 #define H_BULK_REMOVE_RC 0x0c00000000000000ULL 224 #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL 225 #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL 226 #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL 227 #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL 228 #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL 229 230 #define H_BULK_REMOVE_MAX_BATCH 4 231 232 static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, 233 target_ulong opcode, target_ulong *args) 234 { 235 int i; 236 237 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { 238 target_ulong *tsh = &args[i*2]; 239 target_ulong tsl = args[i*2 + 1]; 240 target_ulong v, r, ret; 241 242 if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { 243 break; 244 } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { 245 return H_PARAMETER; 246 } 247 248 *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; 249 *tsh |= H_BULK_REMOVE_RESPONSE; 250 251 if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) { 252 *tsh |= H_BULK_REMOVE_PARM; 253 return H_PARAMETER; 254 } 255 256 ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl, 257 (*tsh & H_BULK_REMOVE_FLAGS) >> 26, 258 &v, &r); 259 260 *tsh |= ret << 60; 261 262 switch (ret) { 263 case REMOVE_SUCCESS: 264 *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43; 265 break; 266 267 case REMOVE_PARM: 268 return H_PARAMETER; 269 270 case REMOVE_HW: 271 return H_HARDWARE; 272 } 273 } 274 275 return H_SUCCESS; 276 } 277 278 static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, 279 target_ulong opcode, target_ulong *args) 280 { 281 CPUPPCState *env = &cpu->env; 282 target_ulong flags = args[0]; 283 target_ulong pte_index = args[1]; 284 target_ulong avpn = args[2]; 285 uint64_t token; 286 target_ulong v, r; 287 288 if (!valid_pte_index(env, pte_index)) { 289 return H_PARAMETER; 290 } 291 292 token = ppc_hash64_start_access(cpu, pte_index); 293 v = ppc_hash64_load_hpte0(cpu, token, 0); 294 r = ppc_hash64_load_hpte1(cpu, token, 0); 295 ppc_hash64_stop_access(cpu, token); 296 297 if ((v & HPTE64_V_VALID) == 0 || 298 ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { 299 return H_NOT_FOUND; 300 } 301 302 r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N | 303 HPTE64_R_KEY_HI | HPTE64_R_KEY_LO); 304 r |= (flags << 55) & HPTE64_R_PP0; 305 r |= (flags << 48) & HPTE64_R_KEY_HI; 306 r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); 307 ppc_hash64_store_hpte(cpu, pte_index, 308 (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); 309 ppc_hash64_tlb_flush_hpte(cpu, pte_index, v, r); 310 /* Don't need a memory barrier, due to qemu's global lock */ 311 ppc_hash64_store_hpte(cpu, pte_index, v | HPTE64_V_HPTE_DIRTY, r); 312 return H_SUCCESS; 313 } 314 315 static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr, 316 target_ulong opcode, target_ulong *args) 317 { 318 CPUPPCState *env = &cpu->env; 319 target_ulong flags = args[0]; 320 target_ulong pte_index = args[1]; 321 uint8_t *hpte; 322 int i, ridx, n_entries = 1; 323 324 if (!valid_pte_index(env, pte_index)) { 325 return H_PARAMETER; 326 } 327 328 if (flags & H_READ_4) { 329 /* Clear the two low order bits */ 330 pte_index &= ~(3ULL); 331 n_entries = 4; 332 } 333 334 hpte = env->external_htab + (pte_index * HASH_PTE_SIZE_64); 335 336 for (i = 0, ridx = 0; i < n_entries; i++) { 337 args[ridx++] = ldq_p(hpte); 338 args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); 339 hpte += HASH_PTE_SIZE_64; 340 } 341 342 return H_SUCCESS; 343 } 344 345 static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr, 346 target_ulong opcode, target_ulong *args) 347 { 348 cpu_synchronize_state(CPU(cpu)); 349 cpu->env.spr[SPR_SPRG0] = args[0]; 350 351 return H_SUCCESS; 352 } 353 354 static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 355 target_ulong opcode, target_ulong *args) 356 { 357 if (!has_spr(cpu, SPR_DABR)) { 358 return H_HARDWARE; /* DABR register not available */ 359 } 360 cpu_synchronize_state(CPU(cpu)); 361 362 if (has_spr(cpu, SPR_DABRX)) { 363 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */ 364 } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */ 365 return H_RESERVED_DABR; 366 } 367 368 cpu->env.spr[SPR_DABR] = args[0]; 369 return H_SUCCESS; 370 } 371 372 static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, 373 target_ulong opcode, target_ulong *args) 374 { 375 target_ulong dabrx = args[1]; 376 377 if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) { 378 return H_HARDWARE; 379 } 380 381 if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0 382 || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) { 383 return H_PARAMETER; 384 } 385 386 cpu_synchronize_state(CPU(cpu)); 387 cpu->env.spr[SPR_DABRX] = dabrx; 388 cpu->env.spr[SPR_DABR] = args[0]; 389 390 return H_SUCCESS; 391 } 392 393 static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr, 394 target_ulong opcode, target_ulong *args) 395 { 396 target_ulong flags = args[0]; 397 hwaddr dst = args[1]; 398 hwaddr src = args[2]; 399 hwaddr len = TARGET_PAGE_SIZE; 400 uint8_t *pdst, *psrc; 401 target_long ret = H_SUCCESS; 402 403 if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE 404 | H_COPY_PAGE | H_ZERO_PAGE)) { 405 qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n", 406 flags); 407 return H_PARAMETER; 408 } 409 410 /* Map-in destination */ 411 if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) { 412 return H_PARAMETER; 413 } 414 pdst = cpu_physical_memory_map(dst, &len, 1); 415 if (!pdst || len != TARGET_PAGE_SIZE) { 416 return H_PARAMETER; 417 } 418 419 if (flags & H_COPY_PAGE) { 420 /* Map-in source, copy to destination, and unmap source again */ 421 if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) { 422 ret = H_PARAMETER; 423 goto unmap_out; 424 } 425 psrc = cpu_physical_memory_map(src, &len, 0); 426 if (!psrc || len != TARGET_PAGE_SIZE) { 427 ret = H_PARAMETER; 428 goto unmap_out; 429 } 430 memcpy(pdst, psrc, len); 431 cpu_physical_memory_unmap(psrc, len, 0, len); 432 } else if (flags & H_ZERO_PAGE) { 433 memset(pdst, 0, len); /* Just clear the destination page */ 434 } 435 436 if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) { 437 kvmppc_dcbst_range(cpu, pdst, len); 438 } 439 if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) { 440 if (kvm_enabled()) { 441 kvmppc_icbi_range(cpu, pdst, len); 442 } else { 443 tb_flush(CPU(cpu)); 444 } 445 } 446 447 unmap_out: 448 cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len); 449 return ret; 450 } 451 452 #define FLAGS_REGISTER_VPA 0x0000200000000000ULL 453 #define FLAGS_REGISTER_DTL 0x0000400000000000ULL 454 #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL 455 #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL 456 #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL 457 #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL 458 459 #define VPA_MIN_SIZE 640 460 #define VPA_SIZE_OFFSET 0x4 461 #define VPA_SHARED_PROC_OFFSET 0x9 462 #define VPA_SHARED_PROC_VAL 0x2 463 464 static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa) 465 { 466 CPUState *cs = CPU(ppc_env_get_cpu(env)); 467 uint16_t size; 468 uint8_t tmp; 469 470 if (vpa == 0) { 471 hcall_dprintf("Can't cope with registering a VPA at logical 0\n"); 472 return H_HARDWARE; 473 } 474 475 if (vpa % env->dcache_line_size) { 476 return H_PARAMETER; 477 } 478 /* FIXME: bounds check the address */ 479 480 size = lduw_be_phys(cs->as, vpa + 0x4); 481 482 if (size < VPA_MIN_SIZE) { 483 return H_PARAMETER; 484 } 485 486 /* VPA is not allowed to cross a page boundary */ 487 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) { 488 return H_PARAMETER; 489 } 490 491 env->vpa_addr = vpa; 492 493 tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET); 494 tmp |= VPA_SHARED_PROC_VAL; 495 stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp); 496 497 return H_SUCCESS; 498 } 499 500 static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa) 501 { 502 if (env->slb_shadow_addr) { 503 return H_RESOURCE; 504 } 505 506 if (env->dtl_addr) { 507 return H_RESOURCE; 508 } 509 510 env->vpa_addr = 0; 511 return H_SUCCESS; 512 } 513 514 static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr) 515 { 516 CPUState *cs = CPU(ppc_env_get_cpu(env)); 517 uint32_t size; 518 519 if (addr == 0) { 520 hcall_dprintf("Can't cope with SLB shadow at logical 0\n"); 521 return H_HARDWARE; 522 } 523 524 size = ldl_be_phys(cs->as, addr + 0x4); 525 if (size < 0x8) { 526 return H_PARAMETER; 527 } 528 529 if ((addr / 4096) != ((addr + size - 1) / 4096)) { 530 return H_PARAMETER; 531 } 532 533 if (!env->vpa_addr) { 534 return H_RESOURCE; 535 } 536 537 env->slb_shadow_addr = addr; 538 env->slb_shadow_size = size; 539 540 return H_SUCCESS; 541 } 542 543 static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr) 544 { 545 env->slb_shadow_addr = 0; 546 env->slb_shadow_size = 0; 547 return H_SUCCESS; 548 } 549 550 static target_ulong register_dtl(CPUPPCState *env, target_ulong addr) 551 { 552 CPUState *cs = CPU(ppc_env_get_cpu(env)); 553 uint32_t size; 554 555 if (addr == 0) { 556 hcall_dprintf("Can't cope with DTL at logical 0\n"); 557 return H_HARDWARE; 558 } 559 560 size = ldl_be_phys(cs->as, addr + 0x4); 561 562 if (size < 48) { 563 return H_PARAMETER; 564 } 565 566 if (!env->vpa_addr) { 567 return H_RESOURCE; 568 } 569 570 env->dtl_addr = addr; 571 env->dtl_size = size; 572 573 return H_SUCCESS; 574 } 575 576 static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr) 577 { 578 env->dtl_addr = 0; 579 env->dtl_size = 0; 580 581 return H_SUCCESS; 582 } 583 584 static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr, 585 target_ulong opcode, target_ulong *args) 586 { 587 target_ulong flags = args[0]; 588 target_ulong procno = args[1]; 589 target_ulong vpa = args[2]; 590 target_ulong ret = H_PARAMETER; 591 CPUPPCState *tenv; 592 PowerPCCPU *tcpu; 593 594 tcpu = ppc_get_vcpu_by_dt_id(procno); 595 if (!tcpu) { 596 return H_PARAMETER; 597 } 598 tenv = &tcpu->env; 599 600 switch (flags) { 601 case FLAGS_REGISTER_VPA: 602 ret = register_vpa(tenv, vpa); 603 break; 604 605 case FLAGS_DEREGISTER_VPA: 606 ret = deregister_vpa(tenv, vpa); 607 break; 608 609 case FLAGS_REGISTER_SLBSHADOW: 610 ret = register_slb_shadow(tenv, vpa); 611 break; 612 613 case FLAGS_DEREGISTER_SLBSHADOW: 614 ret = deregister_slb_shadow(tenv, vpa); 615 break; 616 617 case FLAGS_REGISTER_DTL: 618 ret = register_dtl(tenv, vpa); 619 break; 620 621 case FLAGS_DEREGISTER_DTL: 622 ret = deregister_dtl(tenv, vpa); 623 break; 624 } 625 626 return ret; 627 } 628 629 static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr, 630 target_ulong opcode, target_ulong *args) 631 { 632 CPUPPCState *env = &cpu->env; 633 CPUState *cs = CPU(cpu); 634 635 env->msr |= (1ULL << MSR_EE); 636 hreg_compute_hflags(env); 637 if (!cpu_has_work(cs)) { 638 cs->halted = 1; 639 cs->exception_index = EXCP_HLT; 640 cs->exit_request = 1; 641 } 642 return H_SUCCESS; 643 } 644 645 static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr, 646 target_ulong opcode, target_ulong *args) 647 { 648 target_ulong rtas_r3 = args[0]; 649 uint32_t token = rtas_ld(rtas_r3, 0); 650 uint32_t nargs = rtas_ld(rtas_r3, 1); 651 uint32_t nret = rtas_ld(rtas_r3, 2); 652 653 return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12, 654 nret, rtas_r3 + 12 + 4*nargs); 655 } 656 657 static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr, 658 target_ulong opcode, target_ulong *args) 659 { 660 CPUState *cs = CPU(cpu); 661 target_ulong size = args[0]; 662 target_ulong addr = args[1]; 663 664 switch (size) { 665 case 1: 666 args[0] = ldub_phys(cs->as, addr); 667 return H_SUCCESS; 668 case 2: 669 args[0] = lduw_phys(cs->as, addr); 670 return H_SUCCESS; 671 case 4: 672 args[0] = ldl_phys(cs->as, addr); 673 return H_SUCCESS; 674 case 8: 675 args[0] = ldq_phys(cs->as, addr); 676 return H_SUCCESS; 677 } 678 return H_PARAMETER; 679 } 680 681 static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr, 682 target_ulong opcode, target_ulong *args) 683 { 684 CPUState *cs = CPU(cpu); 685 686 target_ulong size = args[0]; 687 target_ulong addr = args[1]; 688 target_ulong val = args[2]; 689 690 switch (size) { 691 case 1: 692 stb_phys(cs->as, addr, val); 693 return H_SUCCESS; 694 case 2: 695 stw_phys(cs->as, addr, val); 696 return H_SUCCESS; 697 case 4: 698 stl_phys(cs->as, addr, val); 699 return H_SUCCESS; 700 case 8: 701 stq_phys(cs->as, addr, val); 702 return H_SUCCESS; 703 } 704 return H_PARAMETER; 705 } 706 707 static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr, 708 target_ulong opcode, target_ulong *args) 709 { 710 CPUState *cs = CPU(cpu); 711 712 target_ulong dst = args[0]; /* Destination address */ 713 target_ulong src = args[1]; /* Source address */ 714 target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */ 715 target_ulong count = args[3]; /* Element count */ 716 target_ulong op = args[4]; /* 0 = copy, 1 = invert */ 717 uint64_t tmp; 718 unsigned int mask = (1 << esize) - 1; 719 int step = 1 << esize; 720 721 if (count > 0x80000000) { 722 return H_PARAMETER; 723 } 724 725 if ((dst & mask) || (src & mask) || (op > 1)) { 726 return H_PARAMETER; 727 } 728 729 if (dst >= src && dst < (src + (count << esize))) { 730 dst = dst + ((count - 1) << esize); 731 src = src + ((count - 1) << esize); 732 step = -step; 733 } 734 735 while (count--) { 736 switch (esize) { 737 case 0: 738 tmp = ldub_phys(cs->as, src); 739 break; 740 case 1: 741 tmp = lduw_phys(cs->as, src); 742 break; 743 case 2: 744 tmp = ldl_phys(cs->as, src); 745 break; 746 case 3: 747 tmp = ldq_phys(cs->as, src); 748 break; 749 default: 750 return H_PARAMETER; 751 } 752 if (op == 1) { 753 tmp = ~tmp; 754 } 755 switch (esize) { 756 case 0: 757 stb_phys(cs->as, dst, tmp); 758 break; 759 case 1: 760 stw_phys(cs->as, dst, tmp); 761 break; 762 case 2: 763 stl_phys(cs->as, dst, tmp); 764 break; 765 case 3: 766 stq_phys(cs->as, dst, tmp); 767 break; 768 } 769 dst = dst + step; 770 src = src + step; 771 } 772 773 return H_SUCCESS; 774 } 775 776 static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr, 777 target_ulong opcode, target_ulong *args) 778 { 779 /* Nothing to do on emulation, KVM will trap this in the kernel */ 780 return H_SUCCESS; 781 } 782 783 static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr, 784 target_ulong opcode, target_ulong *args) 785 { 786 /* Nothing to do on emulation, KVM will trap this in the kernel */ 787 return H_SUCCESS; 788 } 789 790 static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu, 791 target_ulong mflags, 792 target_ulong value1, 793 target_ulong value2) 794 { 795 CPUState *cs; 796 797 if (value1) { 798 return H_P3; 799 } 800 if (value2) { 801 return H_P4; 802 } 803 804 switch (mflags) { 805 case H_SET_MODE_ENDIAN_BIG: 806 CPU_FOREACH(cs) { 807 set_spr(cs, SPR_LPCR, 0, LPCR_ILE); 808 } 809 spapr_pci_switch_vga(true); 810 return H_SUCCESS; 811 812 case H_SET_MODE_ENDIAN_LITTLE: 813 CPU_FOREACH(cs) { 814 set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE); 815 } 816 spapr_pci_switch_vga(false); 817 return H_SUCCESS; 818 } 819 820 return H_UNSUPPORTED_FLAG; 821 } 822 823 static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu, 824 target_ulong mflags, 825 target_ulong value1, 826 target_ulong value2) 827 { 828 CPUState *cs; 829 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); 830 831 if (!(pcc->insns_flags2 & PPC2_ISA207S)) { 832 return H_P2; 833 } 834 if (value1) { 835 return H_P3; 836 } 837 if (value2) { 838 return H_P4; 839 } 840 841 if (mflags == AIL_RESERVED) { 842 return H_UNSUPPORTED_FLAG; 843 } 844 845 CPU_FOREACH(cs) { 846 set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL); 847 } 848 849 return H_SUCCESS; 850 } 851 852 static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr, 853 target_ulong opcode, target_ulong *args) 854 { 855 target_ulong resource = args[1]; 856 target_ulong ret = H_P2; 857 858 switch (resource) { 859 case H_SET_MODE_RESOURCE_LE: 860 ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]); 861 break; 862 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: 863 ret = h_set_mode_resource_addr_trans_mode(cpu, args[0], 864 args[2], args[3]); 865 break; 866 } 867 868 return ret; 869 } 870 871 /* 872 * Return the offset to the requested option vector @vector in the 873 * option vector table @table. 874 */ 875 static target_ulong cas_get_option_vector(int vector, target_ulong table) 876 { 877 int i; 878 char nr_vectors, nr_entries; 879 880 if (!table) { 881 return 0; 882 } 883 884 nr_vectors = (ldl_phys(&address_space_memory, table) >> 24) + 1; 885 if (!vector || vector > nr_vectors) { 886 return 0; 887 } 888 table++; /* skip nr option vectors */ 889 890 for (i = 0; i < vector - 1; i++) { 891 nr_entries = ldl_phys(&address_space_memory, table) >> 24; 892 table += nr_entries + 2; 893 } 894 return table; 895 } 896 897 typedef struct { 898 PowerPCCPU *cpu; 899 uint32_t cpu_version; 900 Error *err; 901 } SetCompatState; 902 903 static void do_set_compat(void *arg) 904 { 905 SetCompatState *s = arg; 906 907 cpu_synchronize_state(CPU(s->cpu)); 908 ppc_set_compat(s->cpu, s->cpu_version, &s->err); 909 } 910 911 #define get_compat_level(cpuver) ( \ 912 ((cpuver) == CPU_POWERPC_LOGICAL_2_05) ? 2050 : \ 913 ((cpuver) == CPU_POWERPC_LOGICAL_2_06) ? 2060 : \ 914 ((cpuver) == CPU_POWERPC_LOGICAL_2_06_PLUS) ? 2061 : \ 915 ((cpuver) == CPU_POWERPC_LOGICAL_2_07) ? 2070 : 0) 916 917 #define OV5_DRCONF_MEMORY 0x20 918 919 static target_ulong h_client_architecture_support(PowerPCCPU *cpu_, 920 sPAPRMachineState *spapr, 921 target_ulong opcode, 922 target_ulong *args) 923 { 924 target_ulong list = ppc64_phys_to_real(args[0]); 925 target_ulong ov_table, ov5; 926 PowerPCCPUClass *pcc_ = POWERPC_CPU_GET_CLASS(cpu_); 927 CPUState *cs; 928 bool cpu_match = false, cpu_update = true, memory_update = false; 929 unsigned old_cpu_version = cpu_->cpu_version; 930 unsigned compat_lvl = 0, cpu_version = 0; 931 unsigned max_lvl = get_compat_level(cpu_->max_compat); 932 int counter; 933 char ov5_byte2; 934 935 /* Parse PVR list */ 936 for (counter = 0; counter < 512; ++counter) { 937 uint32_t pvr, pvr_mask; 938 939 pvr_mask = ldl_be_phys(&address_space_memory, list); 940 list += 4; 941 pvr = ldl_be_phys(&address_space_memory, list); 942 list += 4; 943 944 trace_spapr_cas_pvr_try(pvr); 945 if (!max_lvl && 946 ((cpu_->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask))) { 947 cpu_match = true; 948 cpu_version = 0; 949 } else if (pvr == cpu_->cpu_version) { 950 cpu_match = true; 951 cpu_version = cpu_->cpu_version; 952 } else if (!cpu_match) { 953 /* If it is a logical PVR, try to determine the highest level */ 954 unsigned lvl = get_compat_level(pvr); 955 if (lvl) { 956 bool is205 = (pcc_->pcr_mask & PCR_COMPAT_2_05) && 957 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_05)); 958 bool is206 = (pcc_->pcr_mask & PCR_COMPAT_2_06) && 959 ((lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06)) || 960 (lvl == get_compat_level(CPU_POWERPC_LOGICAL_2_06_PLUS))); 961 962 if (is205 || is206) { 963 if (!max_lvl) { 964 /* User did not set the level, choose the highest */ 965 if (compat_lvl <= lvl) { 966 compat_lvl = lvl; 967 cpu_version = pvr; 968 } 969 } else if (max_lvl >= lvl) { 970 /* User chose the level, don't set higher than this */ 971 compat_lvl = lvl; 972 cpu_version = pvr; 973 } 974 } 975 } 976 } 977 /* Terminator record */ 978 if (~pvr_mask & pvr) { 979 break; 980 } 981 } 982 983 /* Parsing finished */ 984 trace_spapr_cas_pvr(cpu_->cpu_version, cpu_match, 985 cpu_version, pcc_->pcr_mask); 986 987 /* Update CPUs */ 988 if (old_cpu_version != cpu_version) { 989 CPU_FOREACH(cs) { 990 SetCompatState s = { 991 .cpu = POWERPC_CPU(cs), 992 .cpu_version = cpu_version, 993 .err = NULL, 994 }; 995 996 run_on_cpu(cs, do_set_compat, &s); 997 998 if (s.err) { 999 error_report_err(s.err); 1000 return H_HARDWARE; 1001 } 1002 } 1003 } 1004 1005 if (!cpu_version) { 1006 cpu_update = false; 1007 } 1008 1009 /* For the future use: here @ov_table points to the first option vector */ 1010 ov_table = list; 1011 1012 ov5 = cas_get_option_vector(5, ov_table); 1013 if (!ov5) { 1014 return H_SUCCESS; 1015 } 1016 1017 /* @list now points to OV 5 */ 1018 ov5_byte2 = ldub_phys(&address_space_memory, ov5 + 2); 1019 if (ov5_byte2 & OV5_DRCONF_MEMORY) { 1020 memory_update = true; 1021 } 1022 1023 if (spapr_h_cas_compose_response(spapr, args[1], args[2], 1024 cpu_update, memory_update)) { 1025 qemu_system_reset_request(); 1026 } 1027 1028 return H_SUCCESS; 1029 } 1030 1031 static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; 1032 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; 1033 1034 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) 1035 { 1036 spapr_hcall_fn *slot; 1037 1038 if (opcode <= MAX_HCALL_OPCODE) { 1039 assert((opcode & 0x3) == 0); 1040 1041 slot = &papr_hypercall_table[opcode / 4]; 1042 } else { 1043 assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); 1044 1045 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 1046 } 1047 1048 assert(!(*slot)); 1049 *slot = fn; 1050 } 1051 1052 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, 1053 target_ulong *args) 1054 { 1055 sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); 1056 1057 if ((opcode <= MAX_HCALL_OPCODE) 1058 && ((opcode & 0x3) == 0)) { 1059 spapr_hcall_fn fn = papr_hypercall_table[opcode / 4]; 1060 1061 if (fn) { 1062 return fn(cpu, spapr, opcode, args); 1063 } 1064 } else if ((opcode >= KVMPPC_HCALL_BASE) && 1065 (opcode <= KVMPPC_HCALL_MAX)) { 1066 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; 1067 1068 if (fn) { 1069 return fn(cpu, spapr, opcode, args); 1070 } 1071 } 1072 1073 qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n", 1074 opcode); 1075 return H_FUNCTION; 1076 } 1077 1078 static void hypercall_register_types(void) 1079 { 1080 /* hcall-pft */ 1081 spapr_register_hypercall(H_ENTER, h_enter); 1082 spapr_register_hypercall(H_REMOVE, h_remove); 1083 spapr_register_hypercall(H_PROTECT, h_protect); 1084 spapr_register_hypercall(H_READ, h_read); 1085 1086 /* hcall-bulk */ 1087 spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove); 1088 1089 /* hcall-splpar */ 1090 spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); 1091 spapr_register_hypercall(H_CEDE, h_cede); 1092 1093 /* processor register resource access h-calls */ 1094 spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0); 1095 spapr_register_hypercall(H_SET_DABR, h_set_dabr); 1096 spapr_register_hypercall(H_SET_XDABR, h_set_xdabr); 1097 spapr_register_hypercall(H_PAGE_INIT, h_page_init); 1098 spapr_register_hypercall(H_SET_MODE, h_set_mode); 1099 1100 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate 1101 * here between the "CI" and the "CACHE" variants, they will use whatever 1102 * mapping attributes qemu is using. When using KVM, the kernel will 1103 * enforce the attributes more strongly 1104 */ 1105 spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load); 1106 spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store); 1107 spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load); 1108 spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); 1109 spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); 1110 spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); 1111 spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); 1112 1113 /* qemu/KVM-PPC specific hcalls */ 1114 spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); 1115 1116 /* ibm,client-architecture-support support */ 1117 spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); 1118 } 1119 1120 type_init(hypercall_register_types) 1121