Lines Matching +full:- +full:- +full:enable +full:- +full:fdt

8 #include "qemu/main-loop.h"
10 #include "qemu/error-report.h"
11 #include "exec/tb-flush.h"
18 #include "mmu-hash64.h"
19 #include "cpu-models.h"
22 #include "hw/ppc/fdt.h"
25 #include "mmu-book3s-v3.h"
26 #include "hw/mem/memory-device.h"
31 DeviceMemoryState *dms = machine->device_memory; in is_ram_address()
33 if (addr < machine->ram_size) { in is_ram_address()
36 if (dms && (addr >= dms->base) in is_ram_address()
37 && ((addr - dms->base) < memory_region_size(&dms->mr))) { in is_ram_address()
65 case -EPERM: in resize_hpt_convert_rc()
67 case -EINVAL: in resize_hpt_convert_rc()
69 case -ENXIO: in resize_hpt_convert_rc()
71 case -ENOSPC: in resize_hpt_convert_rc()
73 case -EBUSY: in resize_hpt_convert_rc()
75 case -ENOMEM: in resize_hpt_convert_rc()
92 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { in h_resize_hpt_prepare()
96 if (!spapr->htab_shift) { in h_resize_hpt_prepare()
111 current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); in h_resize_hpt_prepare()
121 if (rc != -ENOSYS) { in h_resize_hpt_prepare()
142 error_report("failed to push sregs to KVM: %s", strerror(-ret)); in do_push_sregs_to_kvm_pr()
152 * This is a hack for the benefit of KVM PR - it abuses the SDR1 in push_sregs_to_kvm_pr()
156 if (!kvm_enabled() || !spapr->htab) { in push_sregs_to_kvm_pr()
174 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { in h_resize_hpt_commit()
178 if (!spapr->htab_shift) { in h_resize_hpt_commit()
186 if (rc != -ENOSYS) { in h_resize_hpt_commit()
190 spapr->htab_shift = shift; in h_resize_hpt_commit()
210 cpu->env.spr[SPR_SPRG0] = args[0]; in h_set_sprg0()
224 cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */ in h_set_dabr()
229 cpu->env.spr[SPR_DABR] = args[0]; in h_set_dabr()
248 cpu->env.spr[SPR_DABRX] = dabrx; in h_set_xdabr()
249 cpu->env.spr[SPR_DABR] = args[0]; in h_set_xdabr()
271 /* Map-in destination */ in h_page_init()
281 /* Map-in source, copy to destination, and unmap source again */ in h_page_init()
325 CPUPPCState *env = &cpu->env; in register_vpa()
335 if (vpa % env->dcache_line_size) { in register_vpa()
340 size = lduw_be_phys(cs->as, vpa + 0x4); in register_vpa()
347 if ((vpa / 4096) != ((vpa + size - 1) / 4096)) { in register_vpa()
351 spapr_cpu->vpa_addr = vpa; in register_vpa()
353 tmp = ldub_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET); in register_vpa()
355 stb_phys(cs->as, spapr_cpu->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp); in register_vpa()
364 if (spapr_cpu->slb_shadow_addr) { in deregister_vpa()
368 if (spapr_cpu->dtl_addr) { in deregister_vpa()
372 spapr_cpu->vpa_addr = 0; in deregister_vpa()
386 size = ldl_be_phys(CPU(cpu)->as, addr + 0x4); in register_slb_shadow()
391 if ((addr / 4096) != ((addr + size - 1) / 4096)) { in register_slb_shadow()
395 if (!spapr_cpu->vpa_addr) { in register_slb_shadow()
399 spapr_cpu->slb_shadow_addr = addr; in register_slb_shadow()
400 spapr_cpu->slb_shadow_size = size; in register_slb_shadow()
409 spapr_cpu->slb_shadow_addr = 0; in deregister_slb_shadow()
410 spapr_cpu->slb_shadow_size = 0; in deregister_slb_shadow()
424 size = ldl_be_phys(CPU(cpu)->as, addr + 0x4); in register_dtl()
430 if (!spapr_cpu->vpa_addr) { in register_dtl()
434 spapr_cpu->dtl_addr = addr; in register_dtl()
435 spapr_cpu->dtl_size = size; in register_dtl()
444 spapr_cpu->dtl_addr = 0; in deregister_dtl()
445 spapr_cpu->dtl_size = 0; in deregister_dtl()
496 CPUPPCState *env = &cpu->env; in h_cede()
500 env->msr |= (1ULL << MSR_EE); in h_cede()
504 if (spapr_cpu->prod) { in h_cede()
505 spapr_cpu->prod = false; in h_cede()
510 cs->halted = 1; in h_cede()
511 cs->exception_index = EXCP_HLT; in h_cede()
512 cs->exit_request = 1; in h_cede()
528 if (spapr_cpu->prod) { in h_confer_self()
529 spapr_cpu->prod = false; in h_confer_self()
532 cs->halted = 1; in h_confer_self()
533 cs->exception_index = EXCP_HALTED; in h_confer_self()
534 cs->exit_request = 1; in h_confer_self()
535 ppc_maybe_interrupt(&cpu->env); in h_confer_self()
543 CPUPPCState *env = &cpu->env; in h_join()
547 if (env->msr & (1ULL << MSR_EE)) { in h_join()
553 * for H_CONFER-to-self, but that is probably not intended to be used in h_join()
558 CPUPPCState *e = &c->env; in h_join()
564 if (!cs->halted || (e->msr & (1ULL << MSR_EE))) { in h_join()
587 * -1 means confer to all other CPUs without dispatch counter check, in h_confer()
590 if (target != -1) { in h_confer()
607 if (!spapr_cpu->vpa_addr || ((dispatch & 1) == 0)) { in h_confer()
611 target_dispatch = ldl_be_phys(cs->as, in h_confer()
612 spapr_cpu->vpa_addr + VPA_DISPATCH_COUNTER); in h_confer()
620 * At least for single-threaded tcg, it gives the target a chance to in h_confer()
621 * run before we run again. Multi-threaded tcg does not really do in h_confer()
626 cs->exception_index = EXCP_YIELD; in h_confer()
627 cs->exit_request = 1; in h_confer()
648 spapr_cpu->prod = true; in h_prod()
649 cs->halted = 0; in h_prod()
650 ppc_maybe_interrupt(&cpu->env); in h_prod()
677 args[0] = ldub_phys(cs->as, addr); in h_logical_load()
680 args[0] = lduw_phys(cs->as, addr); in h_logical_load()
683 args[0] = ldl_phys(cs->as, addr); in h_logical_load()
686 args[0] = ldq_phys(cs->as, addr); in h_logical_load()
703 stb_phys(cs->as, addr, val); in h_logical_store()
706 stw_phys(cs->as, addr, val); in h_logical_store()
709 stl_phys(cs->as, addr, val); in h_logical_store()
712 stq_phys(cs->as, addr, val); in h_logical_store()
729 unsigned int mask = (1 << esize) - 1; in h_logical_memop()
741 dst = dst + ((count - 1) << esize); in h_logical_memop()
742 src = src + ((count - 1) << esize); in h_logical_memop()
743 step = -step; in h_logical_memop()
746 while (count--) { in h_logical_memop()
749 tmp = ldub_phys(cs->as, src); in h_logical_memop()
752 tmp = lduw_phys(cs->as, src); in h_logical_memop()
755 tmp = ldl_phys(cs->as, src); in h_logical_memop()
758 tmp = ldq_phys(cs->as, src); in h_logical_memop()
768 stb_phys(cs->as, dst, tmp); in h_logical_memop()
771 stw_phys(cs->as, dst, tmp); in h_logical_memop()
774 stl_phys(cs->as, dst, tmp); in h_logical_memop()
777 stq_phys(cs->as, dst, tmp); in h_logical_memop()
807 CPUPPCState *env = &cpu->env; in h_set_mode_resource_set_ciabr()
833 CPUPPCState *env = &cpu->env; in h_set_mode_resource_set_dawr()
900 * AIL-1 is not architected, and AIL-2 is not supported by QEMU spapr. in h_set_mode_resource_addr_trans_mode()
968 * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing in spapr_check_setup_free_hpt()
969 * HASH->RADIX : Free HPT in spapr_check_setup_free_hpt()
970 * RADIX->HASH : Allocate HPT in spapr_check_setup_free_hpt()
971 * NOTHING->HASH : Allocate HPT in spapr_check_setup_free_hpt()
979 /* HASH->RADIX : Free HPT */ in spapr_check_setup_free_hpt()
982 /* RADIX->HASH || NOTHING->HASH : Allocate HPT */ in spapr_check_setup_free_hpt()
1014 if (proc_tbl & (table_byte_size - 1)) { in h_register_process_table()
1031 /* TODO - Not Supported */ in h_register_process_table()
1052 cproc = spapr->patb_entry & PATE1_GR; in h_register_process_table()
1055 if (!(flags & FLAG_RADIX) != !(spapr->patb_entry & PATE1_GR)) { in h_register_process_table()
1059 cproc = spapr->patb_entry; in h_register_process_table()
1063 spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc); in h_register_process_table()
1065 spapr->patb_entry = cproc; /* Save new process table */ in h_register_process_table()
1072 if (flags & FLAG_GTSE) /* Guest translation shootdown enable */ in h_register_process_table()
1084 #define H_SIGNAL_SYS_RESET_ALL -1
1085 #define H_SIGNAL_SYS_RESET_ALLBUTSELF -2
1147 if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) { in cas_check_pvr()
1159 trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat); in cas_check_pvr()
1177 void *fdt; in do_client_architecture_support() local
1178 uint32_t max_compat = spapr->max_compat_pvr; in do_client_architecture_support()
1185 if (!cs->halted) { in do_client_architecture_support()
1203 if (cpu->compat_pvr != cas_pvr) { in do_client_architecture_support()
1257 int maxshift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); in do_client_architecture_support()
1259 if (spapr->resize_hpt == SPAPR_RESIZE_HPT_REQUIRED) { in do_client_architecture_support()
1261 … "h_client_architecture_support: Guest doesn't support HPT resizing, but resize-hpt=required"); in do_client_architecture_support()
1265 if (spapr->htab_shift < maxshift) { in do_client_architecture_support()
1267 * pre-emptively resize for the maximum permitted RAM. At in do_client_architecture_support()
1284 spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest); in do_client_architecture_support()
1289 spapr->cas_pre_isa3_guest = !spapr_ovec_test(ov1_guest, OV1_PPC_3_00); in do_client_architecture_support()
1303 if (!spapr->irq->xive) { in do_client_architecture_support()
1305 "Guest requested unavailable interrupt mode (XIVE), try the ic-mode=xive or ic-mode=dual machine pr… in do_client_architecture_support()
1309 if (!spapr->irq->xics) { in do_client_architecture_support()
1311 …lable interrupt mode (XICS), either don't set the ic-mode machine property or try ic-mode=xics or … in do_client_architecture_support()
1319 * Process all pending hot-plug/unplug requests now. An updated full in do_client_architecture_support()
1320 * rendered FDT will be returned to the guest. in do_client_architecture_support()
1329 if ((spapr->patb_entry & PATE1_GR) && !guest_radix) { in do_client_architecture_support()
1334 fdt = spapr_build_fdt(spapr, spapr->vof != NULL, fdt_bufsize); in do_client_architecture_support()
1335 g_free(spapr->fdt_blob); in do_client_architecture_support()
1336 spapr->fdt_size = fdt_totalsize(fdt); in do_client_architecture_support()
1337 spapr->fdt_initial_size = spapr->fdt_size; in do_client_architecture_support()
1338 spapr->fdt_blob = fdt; in do_client_architecture_support()
1341 * Set the machine->fdt pointer again since we just freed in do_client_architecture_support()
1342 * it above (by freeing spapr->fdt_blob). We set this in do_client_architecture_support()
1343 * pointer to enable support for the 'dumpdtb' QMP/HMP in do_client_architecture_support()
1346 MACHINE(spapr)->fdt = fdt; in do_client_architecture_support()
1368 fdt_bufsize -= sizeof(hdr); in h_client_architecture_support()
1372 _FDT((fdt_pack(spapr->fdt_blob))); in h_client_architecture_support()
1373 spapr->fdt_size = fdt_totalsize(spapr->fdt_blob); in h_client_architecture_support()
1374 spapr->fdt_initial_size = spapr->fdt_size; in h_client_architecture_support()
1377 cpu_physical_memory_write(fdt_buf + sizeof(hdr), spapr->fdt_blob, in h_client_architecture_support()
1378 spapr->fdt_size); in h_client_architecture_support()
1379 trace_spapr_cas_continue(spapr->fdt_size + sizeof(hdr)); in h_client_architecture_support()
1396 * It is alright to update the FDT here as do_client_architecture_support() in spapr_vof_client_architecture_support()
1399 spapr_vof_client_dt_finalize(spapr, spapr->fdt_blob); in spapr_vof_client_architecture_support()
1480 void *fdt; in h_update_dt() local
1485 if (!smc->update_dt_enabled) { in h_update_dt()
1489 /* Check that the fdt did not grow out of proportion */ in h_update_dt()
1490 if (cb > spapr->fdt_initial_size * 2) { in h_update_dt()
1491 trace_spapr_update_dt_failed_size(spapr->fdt_initial_size, cb, in h_update_dt()
1496 fdt = g_malloc0(cb); in h_update_dt()
1497 cpu_physical_memory_read(dt, fdt, cb); in h_update_dt()
1499 /* Check the fdt consistency */ in h_update_dt()
1500 if (fdt_check_full(fdt, cb)) { in h_update_dt()
1501 trace_spapr_update_dt_failed_check(spapr->fdt_initial_size, cb, in h_update_dt()
1506 g_free(spapr->fdt_blob); in h_update_dt()
1507 spapr->fdt_size = cb; in h_update_dt()
1508 spapr->fdt_blob = fdt; in h_update_dt()
1515 static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1];
1516 static spapr_hcall_fn svm_hypercall_table[(SVM_HCALL_MAX - SVM_HCALL_BASE) / 4 + 1];
1527 /* we only have SVM-related hcall numbers assigned in multiples of 4 */ in spapr_register_hypercall()
1530 slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4]; in spapr_register_hypercall()
1534 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; in spapr_register_hypercall()
1550 /* we only have SVM-related hcall numbers assigned in multiples of 4 */ in spapr_unregister_hypercall()
1553 slot = &svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4]; in spapr_unregister_hypercall()
1557 slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; in spapr_unregister_hypercall()
1577 spapr_hcall_fn fn = svm_hypercall_table[(opcode - SVM_HCALL_BASE) / 4]; in spapr_hypercall()
1584 spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; in spapr_hypercall()
1610 /* hcall-pft */ in hypercall_register_softmmu()
1616 /* hcall-bulk */ in hypercall_register_softmmu()
1625 /* hcall-hpt-resize */ in hypercall_register_types()
1629 /* hcall-splpar */ in hypercall_register_types()
1635 /* hcall-join */ in hypercall_register_types()
1640 /* processor register resource access h-calls */ in hypercall_register_types()
1647 /* In Memory Table MMU h-calls */ in hypercall_register_types()
1652 /* hcall-get-cpu-characteristics */ in hypercall_register_types()
1656 /* "debugger" hcalls (also used by SLOF). Note: We do -not- differentiate in hypercall_register_types()
1669 /* qemu/KVM-PPC specific hcalls */ in hypercall_register_types()
1672 /* ibm,client-architecture-support support */ in hypercall_register_types()