1 /* 2 * QEMU HPPA CPU 3 * 4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2.1 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see 18 * <http://www.gnu.org/licenses/lgpl-2.1.html> 19 */ 20 21 #include "qemu/osdep.h" 22 #include "qapi/error.h" 23 #include "qemu/qemu-print.h" 24 #include "qemu/timer.h" 25 #include "cpu.h" 26 #include "qemu/module.h" 27 #include "exec/exec-all.h" 28 #include "exec/translation-block.h" 29 #include "exec/target_page.h" 30 #include "fpu/softfloat.h" 31 #include "tcg/tcg.h" 32 #include "hw/hppa/hppa_hardware.h" 33 34 static void hppa_cpu_set_pc(CPUState *cs, vaddr value) 35 { 36 HPPACPU *cpu = HPPA_CPU(cs); 37 38 #ifdef CONFIG_USER_ONLY 39 value |= PRIV_USER; 40 #endif 41 cpu->env.iaoq_f = value; 42 cpu->env.iaoq_b = value + 4; 43 } 44 45 static vaddr hppa_cpu_get_pc(CPUState *cs) 46 { 47 CPUHPPAState *env = cpu_env(cs); 48 49 return hppa_form_gva_mask(env->gva_offset_mask, 50 (env->psw & PSW_C ? env->iasq_f : 0), 51 env->iaoq_f & -4); 52 } 53 54 void cpu_get_tb_cpu_state(CPUHPPAState *env, vaddr *pc, 55 uint64_t *pcsbase, uint32_t *pflags) 56 { 57 uint32_t flags = 0; 58 uint64_t cs_base = 0; 59 60 /* 61 * TB lookup assumes that PC contains the complete virtual address. 62 * If we leave space+offset separate, we'll get ITLB misses to an 63 * incomplete virtual address. This also means that we must separate 64 * out current cpu privilege from the low bits of IAOQ_F. 65 */ 66 *pc = hppa_cpu_get_pc(env_cpu(env)); 67 flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT; 68 69 /* 70 * The only really interesting case is if IAQ_Back is on the same page 71 * as IAQ_Front, so that we can use goto_tb between the blocks. In all 72 * other cases, we'll be ending the TranslationBlock with one insn and 73 * not linking between them. 74 */ 75 if (env->iasq_f != env->iasq_b) { 76 cs_base |= CS_BASE_DIFFSPACE; 77 } else if ((env->iaoq_f ^ env->iaoq_b) & TARGET_PAGE_MASK) { 78 cs_base |= CS_BASE_DIFFPAGE; 79 } else { 80 cs_base |= env->iaoq_b & ~TARGET_PAGE_MASK; 81 } 82 83 /* ??? E, T, H, L bits need to be here, when implemented. */ 84 flags |= env->psw_n * PSW_N; 85 flags |= env->psw_xb; 86 flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P); 87 88 #ifdef CONFIG_USER_ONLY 89 flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus; 90 #else 91 if ((env->sr[4] == env->sr[5]) 92 & (env->sr[4] == env->sr[6]) 93 & (env->sr[4] == env->sr[7])) { 94 flags |= TB_FLAG_SR_SAME; 95 } 96 if ((env->psw & PSW_W) && 97 (env->dr[2] & HPPA64_DIAG_SPHASH_ENABLE)) { 98 flags |= TB_FLAG_SPHASH; 99 } 100 #endif 101 102 *pcsbase = cs_base; 103 *pflags = flags; 104 } 105 106 static void hppa_cpu_synchronize_from_tb(CPUState *cs, 107 const TranslationBlock *tb) 108 { 109 HPPACPU *cpu = HPPA_CPU(cs); 110 111 /* IAQ is always up-to-date before goto_tb. */ 112 cpu->env.psw_n = (tb->flags & PSW_N) != 0; 113 cpu->env.psw_xb = tb->flags & (PSW_X | PSW_B); 114 } 115 116 static void hppa_restore_state_to_opc(CPUState *cs, 117 const TranslationBlock *tb, 118 const uint64_t *data) 119 { 120 CPUHPPAState *env = cpu_env(cs); 121 122 env->iaoq_f = (env->iaoq_f & TARGET_PAGE_MASK) | data[0]; 123 if (data[1] != INT32_MIN) { 124 env->iaoq_b = env->iaoq_f + data[1]; 125 } 126 env->unwind_breg = data[2]; 127 /* 128 * Since we were executing the instruction at IAOQ_F, and took some 129 * sort of action that provoked the cpu_restore_state, we can infer 130 * that the instruction was not nullified. 131 */ 132 env->psw_n = 0; 133 } 134 135 #ifndef CONFIG_USER_ONLY 136 static bool hppa_cpu_has_work(CPUState *cs) 137 { 138 return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); 139 } 140 #endif /* !CONFIG_USER_ONLY */ 141 142 static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch) 143 { 144 CPUHPPAState *env = cpu_env(cs); 145 146 if (env->psw & (ifetch ? PSW_C : PSW_D)) { 147 return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P); 148 } 149 /* mmu disabled */ 150 return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX; 151 } 152 153 static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info) 154 { 155 info->mach = bfd_mach_hppa20; 156 info->endian = BFD_ENDIAN_BIG; 157 info->print_insn = print_insn_hppa; 158 } 159 160 #ifndef CONFIG_USER_ONLY 161 static G_NORETURN 162 void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 163 MMUAccessType access_type, int mmu_idx, 164 uintptr_t retaddr) 165 { 166 HPPACPU *cpu = HPPA_CPU(cs); 167 CPUHPPAState *env = &cpu->env; 168 169 cs->exception_index = EXCP_UNALIGN; 170 cpu_restore_state(cs, retaddr); 171 hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx)); 172 173 cpu_loop_exit(cs); 174 } 175 #endif /* CONFIG_USER_ONLY */ 176 177 static void hppa_cpu_realizefn(DeviceState *dev, Error **errp) 178 { 179 CPUState *cs = CPU(dev); 180 HPPACPUClass *acc = HPPA_CPU_GET_CLASS(dev); 181 Error *local_err = NULL; 182 183 cpu_exec_realizefn(cs, &local_err); 184 if (local_err != NULL) { 185 error_propagate(errp, local_err); 186 return; 187 } 188 189 qemu_init_vcpu(cs); 190 acc->parent_realize(dev, errp); 191 192 #ifndef CONFIG_USER_ONLY 193 { 194 HPPACPU *cpu = HPPA_CPU(cs); 195 196 cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, 197 hppa_cpu_alarm_timer, cpu); 198 hppa_ptlbe(&cpu->env); 199 } 200 #endif 201 202 /* Use pc-relative instructions always to simplify the translator. */ 203 tcg_cflags_set(cs, CF_PCREL); 204 } 205 206 static void hppa_cpu_initfn(Object *obj) 207 { 208 CPUHPPAState *env = cpu_env(CPU(obj)); 209 210 env->is_pa20 = !!object_dynamic_cast(obj, TYPE_HPPA64_CPU); 211 } 212 213 static void hppa_cpu_reset_hold(Object *obj, ResetType type) 214 { 215 HPPACPUClass *scc = HPPA_CPU_GET_CLASS(obj); 216 CPUState *cs = CPU(obj); 217 HPPACPU *cpu = HPPA_CPU(obj); 218 CPUHPPAState *env = &cpu->env; 219 220 if (scc->parent_phases.hold) { 221 scc->parent_phases.hold(obj, type); 222 } 223 cs->exception_index = -1; 224 cs->halted = 0; 225 cpu_set_pc(cs, 0xf0000004); 226 227 memset(env, 0, offsetof(CPUHPPAState, end_reset_fields)); 228 229 cpu_hppa_loaded_fr0(env); 230 231 /* 64-bit machines start with space-register hashing enabled in %dr2 */ 232 env->dr[2] = hppa_is_pa20(env) ? HPPA64_DIAG_SPHASH_ENABLE : 0; 233 234 cpu_hppa_put_psw(env, PSW_M); 235 } 236 237 static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model) 238 { 239 g_autofree char *typename = g_strconcat(cpu_model, "-cpu", NULL); 240 241 return object_class_by_name(typename); 242 } 243 244 #ifndef CONFIG_USER_ONLY 245 #include "hw/core/sysemu-cpu-ops.h" 246 247 static const struct SysemuCPUOps hppa_sysemu_ops = { 248 .has_work = hppa_cpu_has_work, 249 .get_phys_page_debug = hppa_cpu_get_phys_page_debug, 250 }; 251 #endif 252 253 #include "accel/tcg/cpu-ops.h" 254 255 static const TCGCPUOps hppa_tcg_ops = { 256 /* PA-RISC 1.x processors have a strong memory model. */ 257 /* 258 * ??? While we do not yet implement PA-RISC 2.0, those processors have 259 * a weak memory model, but with TLB bits that force ordering on a per-page 260 * basis. It's probably easier to fall back to a strong memory model. 261 */ 262 .guest_default_memory_order = TCG_MO_ALL, 263 264 .initialize = hppa_translate_init, 265 .translate_code = hppa_translate_code, 266 .synchronize_from_tb = hppa_cpu_synchronize_from_tb, 267 .restore_state_to_opc = hppa_restore_state_to_opc, 268 .mmu_index = hppa_cpu_mmu_index, 269 270 #ifndef CONFIG_USER_ONLY 271 .tlb_fill_align = hppa_cpu_tlb_fill_align, 272 .cpu_exec_interrupt = hppa_cpu_exec_interrupt, 273 .cpu_exec_halt = hppa_cpu_has_work, 274 .do_interrupt = hppa_cpu_do_interrupt, 275 .do_unaligned_access = hppa_cpu_do_unaligned_access, 276 .do_transaction_failed = hppa_cpu_do_transaction_failed, 277 #endif /* !CONFIG_USER_ONLY */ 278 }; 279 280 static void hppa_cpu_class_init(ObjectClass *oc, void *data) 281 { 282 DeviceClass *dc = DEVICE_CLASS(oc); 283 CPUClass *cc = CPU_CLASS(oc); 284 HPPACPUClass *acc = HPPA_CPU_CLASS(oc); 285 ResettableClass *rc = RESETTABLE_CLASS(oc); 286 287 device_class_set_parent_realize(dc, hppa_cpu_realizefn, 288 &acc->parent_realize); 289 290 resettable_class_set_parent_phases(rc, NULL, hppa_cpu_reset_hold, NULL, 291 &acc->parent_phases); 292 293 cc->class_by_name = hppa_cpu_class_by_name; 294 cc->dump_state = hppa_cpu_dump_state; 295 cc->set_pc = hppa_cpu_set_pc; 296 cc->get_pc = hppa_cpu_get_pc; 297 cc->gdb_read_register = hppa_cpu_gdb_read_register; 298 cc->gdb_write_register = hppa_cpu_gdb_write_register; 299 #ifndef CONFIG_USER_ONLY 300 dc->vmsd = &vmstate_hppa_cpu; 301 cc->sysemu_ops = &hppa_sysemu_ops; 302 #endif 303 cc->disas_set_info = hppa_cpu_disas_set_info; 304 cc->gdb_num_core_regs = 128; 305 cc->tcg_ops = &hppa_tcg_ops; 306 } 307 308 static const TypeInfo hppa_cpu_type_infos[] = { 309 { 310 .name = TYPE_HPPA_CPU, 311 .parent = TYPE_CPU, 312 .instance_size = sizeof(HPPACPU), 313 .instance_align = __alignof(HPPACPU), 314 .instance_init = hppa_cpu_initfn, 315 .abstract = false, 316 .class_size = sizeof(HPPACPUClass), 317 .class_init = hppa_cpu_class_init, 318 }, 319 { 320 .name = TYPE_HPPA64_CPU, 321 .parent = TYPE_HPPA_CPU, 322 }, 323 }; 324 325 DEFINE_TYPES(hppa_cpu_type_infos) 326