xref: /qemu/target/hppa/cpu.c (revision 3072961b6edc99abfbd87caac3de29bb58a52ccf)
1 /*
2  * QEMU HPPA CPU
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see
18  * <http://www.gnu.org/licenses/lgpl-2.1.html>
19  */
20 
21 #include "qemu/osdep.h"
22 #include "qapi/error.h"
23 #include "qemu/qemu-print.h"
24 #include "qemu/timer.h"
25 #include "cpu.h"
26 #include "qemu/module.h"
27 #include "exec/translation-block.h"
28 #include "exec/target_page.h"
29 #include "fpu/softfloat.h"
30 #include "tcg/tcg.h"
31 #include "hw/hppa/hppa_hardware.h"
32 #include "accel/tcg/cpu-ops.h"
33 
hppa_cpu_set_pc(CPUState * cs,vaddr value)34 static void hppa_cpu_set_pc(CPUState *cs, vaddr value)
35 {
36     HPPACPU *cpu = HPPA_CPU(cs);
37 
38 #ifdef CONFIG_USER_ONLY
39     value |= PRIV_USER;
40 #endif
41     cpu->env.iaoq_f = value;
42     cpu->env.iaoq_b = value + 4;
43 }
44 
hppa_cpu_get_pc(CPUState * cs)45 static vaddr hppa_cpu_get_pc(CPUState *cs)
46 {
47     CPUHPPAState *env = cpu_env(cs);
48 
49     return hppa_form_gva_mask(env->gva_offset_mask,
50                          (env->psw & PSW_C ? env->iasq_f : 0),
51                          env->iaoq_f & -4);
52 }
53 
hppa_get_tb_cpu_state(CPUState * cs)54 static TCGTBCPUState hppa_get_tb_cpu_state(CPUState *cs)
55 {
56     CPUHPPAState *env = cpu_env(cs);
57     uint32_t flags = 0;
58     uint64_t cs_base = 0;
59     vaddr pc;
60 
61     /*
62      * TB lookup assumes that PC contains the complete virtual address.
63      * If we leave space+offset separate, we'll get ITLB misses to an
64      * incomplete virtual address.  This also means that we must separate
65      * out current cpu privilege from the low bits of IAOQ_F.
66      */
67     pc = hppa_cpu_get_pc(env_cpu(env));
68     flags |= (env->iaoq_f & 3) << TB_FLAG_PRIV_SHIFT;
69 
70     /*
71      * The only really interesting case is if IAQ_Back is on the same page
72      * as IAQ_Front, so that we can use goto_tb between the blocks.  In all
73      * other cases, we'll be ending the TranslationBlock with one insn and
74      * not linking between them.
75      */
76     if (env->iasq_f != env->iasq_b) {
77         cs_base |= CS_BASE_DIFFSPACE;
78     } else if ((env->iaoq_f ^ env->iaoq_b) & TARGET_PAGE_MASK) {
79         cs_base |= CS_BASE_DIFFPAGE;
80     } else {
81         cs_base |= env->iaoq_b & ~TARGET_PAGE_MASK;
82     }
83 
84     /* ??? E, T, H, L bits need to be here, when implemented.  */
85     flags |= env->psw_n * PSW_N;
86     flags |= env->psw_xb;
87     flags |= env->psw & (PSW_W | PSW_C | PSW_D | PSW_P);
88 
89 #ifdef CONFIG_USER_ONLY
90     flags |= TB_FLAG_UNALIGN * !env_cpu(env)->prctl_unalign_sigbus;
91 #else
92     if ((env->sr[4] == env->sr[5])
93         & (env->sr[4] == env->sr[6])
94         & (env->sr[4] == env->sr[7])) {
95         flags |= TB_FLAG_SR_SAME;
96     }
97     if ((env->psw & PSW_W) &&
98         (env->dr[2] & HPPA64_DIAG_SPHASH_ENABLE)) {
99         flags |= TB_FLAG_SPHASH;
100     }
101 #endif
102 
103     return (TCGTBCPUState){ .pc = pc, .flags = flags, .cs_base = cs_base };
104 }
105 
hppa_cpu_synchronize_from_tb(CPUState * cs,const TranslationBlock * tb)106 static void hppa_cpu_synchronize_from_tb(CPUState *cs,
107                                          const TranslationBlock *tb)
108 {
109     HPPACPU *cpu = HPPA_CPU(cs);
110 
111     /* IAQ is always up-to-date before goto_tb. */
112     cpu->env.psw_n = (tb->flags & PSW_N) != 0;
113     cpu->env.psw_xb = tb->flags & (PSW_X | PSW_B);
114 }
115 
hppa_restore_state_to_opc(CPUState * cs,const TranslationBlock * tb,const uint64_t * data)116 static void hppa_restore_state_to_opc(CPUState *cs,
117                                       const TranslationBlock *tb,
118                                       const uint64_t *data)
119 {
120     CPUHPPAState *env = cpu_env(cs);
121 
122     env->iaoq_f = (env->iaoq_f & TARGET_PAGE_MASK) | data[0];
123     if (data[1] != INT32_MIN) {
124         env->iaoq_b = env->iaoq_f + data[1];
125     }
126     env->unwind_breg = data[2];
127     /*
128      * Since we were executing the instruction at IAOQ_F, and took some
129      * sort of action that provoked the cpu_restore_state, we can infer
130      * that the instruction was not nullified.
131      */
132     env->psw_n = 0;
133 }
134 
135 #ifndef CONFIG_USER_ONLY
hppa_cpu_has_work(CPUState * cs)136 static bool hppa_cpu_has_work(CPUState *cs)
137 {
138     return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI);
139 }
140 #endif /* !CONFIG_USER_ONLY */
141 
hppa_cpu_mmu_index(CPUState * cs,bool ifetch)142 static int hppa_cpu_mmu_index(CPUState *cs, bool ifetch)
143 {
144     CPUHPPAState *env = cpu_env(cs);
145 
146     if (env->psw & (ifetch ? PSW_C : PSW_D)) {
147         return PRIV_P_TO_MMU_IDX(env->iaoq_f & 3, env->psw & PSW_P);
148     }
149     /* mmu disabled */
150     return env->psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
151 }
152 
hppa_cpu_disas_set_info(CPUState * cs,disassemble_info * info)153 static void hppa_cpu_disas_set_info(CPUState *cs, disassemble_info *info)
154 {
155     info->mach = bfd_mach_hppa20;
156     info->endian = BFD_ENDIAN_BIG;
157     info->print_insn = print_insn_hppa;
158 }
159 
160 #ifndef CONFIG_USER_ONLY
161 static G_NORETURN
hppa_cpu_do_unaligned_access(CPUState * cs,vaddr addr,MMUAccessType access_type,int mmu_idx,uintptr_t retaddr)162 void hppa_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
163                                   MMUAccessType access_type, int mmu_idx,
164                                   uintptr_t retaddr)
165 {
166     HPPACPU *cpu = HPPA_CPU(cs);
167     CPUHPPAState *env = &cpu->env;
168 
169     cs->exception_index = EXCP_UNALIGN;
170     cpu_restore_state(cs, retaddr);
171     hppa_set_ior_and_isr(env, addr, MMU_IDX_MMU_DISABLED(mmu_idx));
172 
173     cpu_loop_exit(cs);
174 }
175 #endif /* CONFIG_USER_ONLY */
176 
hppa_cpu_realizefn(DeviceState * dev,Error ** errp)177 static void hppa_cpu_realizefn(DeviceState *dev, Error **errp)
178 {
179     CPUState *cs = CPU(dev);
180     HPPACPUClass *acc = HPPA_CPU_GET_CLASS(dev);
181     Error *local_err = NULL;
182 
183     cpu_exec_realizefn(cs, &local_err);
184     if (local_err != NULL) {
185         error_propagate(errp, local_err);
186         return;
187     }
188 
189     qemu_init_vcpu(cs);
190     acc->parent_realize(dev, errp);
191 
192 #ifndef CONFIG_USER_ONLY
193     {
194         HPPACPU *cpu = HPPA_CPU(cs);
195 
196         cpu->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
197                                         hppa_cpu_alarm_timer, cpu);
198         hppa_ptlbe(&cpu->env);
199     }
200 #endif
201 
202     /* Use pc-relative instructions always to simplify the translator. */
203     tcg_cflags_set(cs, CF_PCREL);
204 }
205 
hppa_cpu_initfn(Object * obj)206 static void hppa_cpu_initfn(Object *obj)
207 {
208     CPUHPPAState *env = cpu_env(CPU(obj));
209 
210     env->is_pa20 = !!object_dynamic_cast(obj, TYPE_HPPA64_CPU);
211 }
212 
hppa_cpu_reset_hold(Object * obj,ResetType type)213 static void hppa_cpu_reset_hold(Object *obj, ResetType type)
214 {
215     HPPACPUClass *scc = HPPA_CPU_GET_CLASS(obj);
216     CPUState *cs = CPU(obj);
217     HPPACPU *cpu = HPPA_CPU(obj);
218     CPUHPPAState *env = &cpu->env;
219 
220     if (scc->parent_phases.hold) {
221         scc->parent_phases.hold(obj, type);
222     }
223     cs->exception_index = -1;
224     cs->halted = 0;
225     cpu_set_pc(cs, 0xf0000004);
226 
227     memset(env, 0, offsetof(CPUHPPAState, end_reset_fields));
228 
229     cpu_hppa_loaded_fr0(env);
230 
231     /* 64-bit machines start with space-register hashing enabled in %dr2 */
232     env->dr[2] = hppa_is_pa20(env) ? HPPA64_DIAG_SPHASH_ENABLE : 0;
233 
234     cpu_hppa_put_psw(env, PSW_M);
235 }
236 
hppa_cpu_class_by_name(const char * cpu_model)237 static ObjectClass *hppa_cpu_class_by_name(const char *cpu_model)
238 {
239     g_autofree char *typename = g_strconcat(cpu_model, "-cpu", NULL);
240 
241     return object_class_by_name(typename);
242 }
243 
244 #ifndef CONFIG_USER_ONLY
245 #include "hw/core/sysemu-cpu-ops.h"
246 
247 static const struct SysemuCPUOps hppa_sysemu_ops = {
248     .has_work = hppa_cpu_has_work,
249     .get_phys_page_debug = hppa_cpu_get_phys_page_debug,
250 };
251 #endif
252 
253 static const TCGCPUOps hppa_tcg_ops = {
254     /* PA-RISC 1.x processors have a strong memory model.  */
255     /*
256      * ??? While we do not yet implement PA-RISC 2.0, those processors have
257      * a weak memory model, but with TLB bits that force ordering on a per-page
258      * basis.  It's probably easier to fall back to a strong memory model.
259      */
260     .guest_default_memory_order = TCG_MO_ALL,
261     .mttcg_supported = true,
262 
263     .initialize = hppa_translate_init,
264     .translate_code = hppa_translate_code,
265     .get_tb_cpu_state = hppa_get_tb_cpu_state,
266     .synchronize_from_tb = hppa_cpu_synchronize_from_tb,
267     .restore_state_to_opc = hppa_restore_state_to_opc,
268     .mmu_index = hppa_cpu_mmu_index,
269 
270 #ifndef CONFIG_USER_ONLY
271     .tlb_fill_align = hppa_cpu_tlb_fill_align,
272     .pointer_wrap = cpu_pointer_wrap_notreached,
273     .cpu_exec_interrupt = hppa_cpu_exec_interrupt,
274     .cpu_exec_halt = hppa_cpu_has_work,
275     .cpu_exec_reset = cpu_reset,
276     .do_interrupt = hppa_cpu_do_interrupt,
277     .do_unaligned_access = hppa_cpu_do_unaligned_access,
278     .do_transaction_failed = hppa_cpu_do_transaction_failed,
279 #endif /* !CONFIG_USER_ONLY */
280 };
281 
hppa_cpu_class_init(ObjectClass * oc,const void * data)282 static void hppa_cpu_class_init(ObjectClass *oc, const void *data)
283 {
284     DeviceClass *dc = DEVICE_CLASS(oc);
285     CPUClass *cc = CPU_CLASS(oc);
286     HPPACPUClass *acc = HPPA_CPU_CLASS(oc);
287     ResettableClass *rc = RESETTABLE_CLASS(oc);
288 
289     device_class_set_parent_realize(dc, hppa_cpu_realizefn,
290                                     &acc->parent_realize);
291 
292     resettable_class_set_parent_phases(rc, NULL, hppa_cpu_reset_hold, NULL,
293                                        &acc->parent_phases);
294 
295     cc->class_by_name = hppa_cpu_class_by_name;
296     cc->dump_state = hppa_cpu_dump_state;
297     cc->set_pc = hppa_cpu_set_pc;
298     cc->get_pc = hppa_cpu_get_pc;
299     cc->gdb_read_register = hppa_cpu_gdb_read_register;
300     cc->gdb_write_register = hppa_cpu_gdb_write_register;
301 #ifndef CONFIG_USER_ONLY
302     dc->vmsd = &vmstate_hppa_cpu;
303     cc->sysemu_ops = &hppa_sysemu_ops;
304 #endif
305     cc->disas_set_info = hppa_cpu_disas_set_info;
306     cc->gdb_num_core_regs = 128;
307     cc->tcg_ops = &hppa_tcg_ops;
308 }
309 
310 static const TypeInfo hppa_cpu_type_infos[] = {
311     {
312         .name = TYPE_HPPA_CPU,
313         .parent = TYPE_CPU,
314         .instance_size = sizeof(HPPACPU),
315         .instance_align = __alignof(HPPACPU),
316         .instance_init = hppa_cpu_initfn,
317         .abstract = false,
318         .class_size = sizeof(HPPACPUClass),
319         .class_init = hppa_cpu_class_init,
320     },
321     {
322         .name = TYPE_HPPA64_CPU,
323         .parent = TYPE_HPPA_CPU,
324     },
325 };
326 
327 DEFINE_TYPES(hppa_cpu_type_infos)
328