xref: /qemu/target/hppa/translate.c (revision 7cef6d686309e2792186504ae17cf4f3eb57ef68)
1 /*
2  * HPPA emulation cpu translation for qemu.
3  *
4  * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "qemu/host-utils.h"
23 #include "exec/page-protection.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/helper-proto.h"
27 #include "exec/helper-gen.h"
28 #include "exec/translator.h"
29 #include "exec/translation-block.h"
30 #include "exec/target_page.h"
31 #include "exec/log.h"
32 
33 #define HELPER_H "helper.h"
34 #include "exec/helper-info.c.inc"
35 #undef  HELPER_H
36 
37 /* Choose to use explicit sizes within this file. */
38 #undef tcg_temp_new
39 
40 typedef struct DisasCond {
41     TCGCond c;
42     TCGv_i64 a0, a1;
43 } DisasCond;
44 
45 typedef struct DisasIAQE {
46     /* IASQ; may be null for no change from TB. */
47     TCGv_i64 space;
48     /* IAOQ base; may be null for relative address. */
49     TCGv_i64 base;
50     /* IAOQ addend; if base is null, relative to cpu_iaoq_f. */
51     int64_t disp;
52 } DisasIAQE;
53 
54 typedef struct DisasDelayException {
55     struct DisasDelayException *next;
56     TCGLabel *lab;
57     uint32_t insn;
58     bool set_iir;
59     int8_t set_n;
60     uint8_t excp;
61     /* Saved state at parent insn. */
62     DisasIAQE iaq_f, iaq_b;
63 } DisasDelayException;
64 
65 typedef struct DisasContext {
66     DisasContextBase base;
67     CPUState *cs;
68 
69     /* IAQ_Front, IAQ_Back. */
70     DisasIAQE iaq_f, iaq_b;
71     /* IAQ_Next, for jumps, otherwise null for simple advance. */
72     DisasIAQE iaq_j, *iaq_n;
73 
74     /* IAOQ_Front at entry to TB. */
75     uint64_t iaoq_first;
76     uint64_t gva_offset_mask;
77 
78     DisasCond null_cond;
79     TCGLabel *null_lab;
80 
81     DisasDelayException *delay_excp_list;
82     TCGv_i64 zero;
83 
84     uint32_t insn;
85     uint32_t tb_flags;
86     int mmu_idx;
87     int privilege;
88     uint32_t psw_xb;
89     bool psw_n_nonzero;
90     bool psw_b_next;
91     bool is_pa20;
92     bool insn_start_updated;
93 
94 #ifdef CONFIG_USER_ONLY
95     MemOp unalign;
96 #endif
97 } DisasContext;
98 
99 #ifdef CONFIG_USER_ONLY
100 #define UNALIGN(C)       (C)->unalign
101 #define MMU_DISABLED(C)  false
102 #else
103 #define UNALIGN(C)       MO_ALIGN
104 #define MMU_DISABLED(C)  MMU_IDX_MMU_DISABLED((C)->mmu_idx)
105 #endif
106 
107 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently.  */
expand_sm_imm(DisasContext * ctx,int val)108 static int expand_sm_imm(DisasContext *ctx, int val)
109 {
110     /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
111     if (ctx->is_pa20) {
112         if (val & PSW_SM_W) {
113             val |= PSW_W;
114         }
115         val &= ~(PSW_SM_W | PSW_SM_E | PSW_G);
116     } else {
117         val &= ~(PSW_SM_W | PSW_SM_E | PSW_O);
118     }
119     return val;
120 }
121 
122 /* Inverted space register indicates 0 means sr0 not inferred from base.  */
expand_sr3x(DisasContext * ctx,int val)123 static int expand_sr3x(DisasContext *ctx, int val)
124 {
125     return ~val;
126 }
127 
128 /* Convert the M:A bits within a memory insn to the tri-state value
129    we use for the final M.  */
ma_to_m(DisasContext * ctx,int val)130 static int ma_to_m(DisasContext *ctx, int val)
131 {
132     return val & 2 ? (val & 1 ? -1 : 1) : 0;
133 }
134 
135 /* Convert the sign of the displacement to a pre or post-modify.  */
pos_to_m(DisasContext * ctx,int val)136 static int pos_to_m(DisasContext *ctx, int val)
137 {
138     return val ? 1 : -1;
139 }
140 
neg_to_m(DisasContext * ctx,int val)141 static int neg_to_m(DisasContext *ctx, int val)
142 {
143     return val ? -1 : 1;
144 }
145 
146 /* Used for branch targets and fp memory ops.  */
expand_shl2(DisasContext * ctx,int val)147 static int expand_shl2(DisasContext *ctx, int val)
148 {
149     return val << 2;
150 }
151 
152 /* Used for assemble_21.  */
expand_shl11(DisasContext * ctx,int val)153 static int expand_shl11(DisasContext *ctx, int val)
154 {
155     return val << 11;
156 }
157 
assemble_6(DisasContext * ctx,int val)158 static int assemble_6(DisasContext *ctx, int val)
159 {
160     /*
161      * Officially, 32 * x + 32 - y.
162      * Here, x is already in bit 5, and y is [4:0].
163      * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
164      * with the overflow from bit 4 summing with x.
165      */
166     return (val ^ 31) + 1;
167 }
168 
169 /* Expander for assemble_16a(s,cat(im10a,0),i). */
expand_11a(DisasContext * ctx,int val)170 static int expand_11a(DisasContext *ctx, int val)
171 {
172     /*
173      * @val is bit 0 and bits [4:15].
174      * Swizzle thing around depending on PSW.W.
175      */
176     int im10a = extract32(val, 1, 10);
177     int s = extract32(val, 11, 2);
178     int i = (-(val & 1) << 13) | (im10a << 3);
179 
180     if (ctx->tb_flags & PSW_W) {
181         i ^= s << 13;
182     }
183     return i;
184 }
185 
186 /* Expander for assemble_16a(s,im11a,i). */
expand_12a(DisasContext * ctx,int val)187 static int expand_12a(DisasContext *ctx, int val)
188 {
189     /*
190      * @val is bit 0 and bits [3:15].
191      * Swizzle thing around depending on PSW.W.
192      */
193     int im11a = extract32(val, 1, 11);
194     int s = extract32(val, 12, 2);
195     int i = (-(val & 1) << 13) | (im11a << 2);
196 
197     if (ctx->tb_flags & PSW_W) {
198         i ^= s << 13;
199     }
200     return i;
201 }
202 
203 /* Expander for assemble_16(s,im14). */
expand_16(DisasContext * ctx,int val)204 static int expand_16(DisasContext *ctx, int val)
205 {
206     /*
207      * @val is bits [0:15], containing both im14 and s.
208      * Swizzle thing around depending on PSW.W.
209      */
210     int s = extract32(val, 14, 2);
211     int i = (-(val & 1) << 13) | extract32(val, 1, 13);
212 
213     if (ctx->tb_flags & PSW_W) {
214         i ^= s << 13;
215     }
216     return i;
217 }
218 
219 /* The sp field is only present with !PSW_W. */
sp0_if_wide(DisasContext * ctx,int sp)220 static int sp0_if_wide(DisasContext *ctx, int sp)
221 {
222     return ctx->tb_flags & PSW_W ? 0 : sp;
223 }
224 
225 /* Translate CMPI doubleword conditions to standard. */
cmpbid_c(DisasContext * ctx,int val)226 static int cmpbid_c(DisasContext *ctx, int val)
227 {
228     return val ? val : 4; /* 0 == "*<<" */
229 }
230 
231 /*
232  * In many places pa1.x did not decode the bit that later became
233  * the pa2.0 D bit.  Suppress D unless the cpu is pa2.0.
234  */
pa20_d(DisasContext * ctx,int val)235 static int pa20_d(DisasContext *ctx, int val)
236 {
237     return ctx->is_pa20 & val;
238 }
239 
240 /* Include the auto-generated decoder.  */
241 #include "decode-insns.c.inc"
242 
243 /* We are not using a goto_tb (for whatever reason), but have updated
244    the iaq (for whatever reason), so don't do it again on exit.  */
245 #define DISAS_IAQ_N_UPDATED  DISAS_TARGET_0
246 
247 /* We are exiting the TB, but have neither emitted a goto_tb, nor
248    updated the iaq for the next instruction to be executed.  */
249 #define DISAS_IAQ_N_STALE    DISAS_TARGET_1
250 
251 /* Similarly, but we want to return to the main loop immediately
252    to recognize unmasked interrupts.  */
253 #define DISAS_IAQ_N_STALE_EXIT      DISAS_TARGET_2
254 #define DISAS_EXIT                  DISAS_TARGET_3
255 
256 /* global register indexes */
257 static TCGv_i64 cpu_gr[32];
258 static TCGv_i64 cpu_sr[4];
259 static TCGv_i64 cpu_srH;
260 static TCGv_i64 cpu_iaoq_f;
261 static TCGv_i64 cpu_iaoq_b;
262 static TCGv_i64 cpu_iasq_f;
263 static TCGv_i64 cpu_iasq_b;
264 static TCGv_i64 cpu_sar;
265 static TCGv_i64 cpu_psw_n;
266 static TCGv_i64 cpu_psw_v;
267 static TCGv_i64 cpu_psw_cb;
268 static TCGv_i64 cpu_psw_cb_msb;
269 static TCGv_i32 cpu_psw_xb;
270 
hppa_translate_init(void)271 void hppa_translate_init(void)
272 {
273 #define DEF_VAR(V)  { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
274 
275     typedef struct { TCGv_i64 *var; const char *name; int ofs; } GlobalVar;
276     static const GlobalVar vars[] = {
277         { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
278         DEF_VAR(psw_n),
279         DEF_VAR(psw_v),
280         DEF_VAR(psw_cb),
281         DEF_VAR(psw_cb_msb),
282         DEF_VAR(iaoq_f),
283         DEF_VAR(iaoq_b),
284     };
285 
286 #undef DEF_VAR
287 
288     /* Use the symbolic register names that match the disassembler.  */
289     static const char gr_names[32][4] = {
290         "r0",  "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
291         "r8",  "r9",  "r10", "r11", "r12", "r13", "r14", "r15",
292         "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
293         "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
294     };
295     /* SR[4-7] are not global registers so that we can index them.  */
296     static const char sr_names[5][4] = {
297         "sr0", "sr1", "sr2", "sr3", "srH"
298     };
299 
300     int i;
301 
302     cpu_gr[0] = NULL;
303     for (i = 1; i < 32; i++) {
304         cpu_gr[i] = tcg_global_mem_new(tcg_env,
305                                        offsetof(CPUHPPAState, gr[i]),
306                                        gr_names[i]);
307     }
308     for (i = 0; i < 4; i++) {
309         cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
310                                            offsetof(CPUHPPAState, sr[i]),
311                                            sr_names[i]);
312     }
313     cpu_srH = tcg_global_mem_new_i64(tcg_env,
314                                      offsetof(CPUHPPAState, sr[4]),
315                                      sr_names[4]);
316 
317     for (i = 0; i < ARRAY_SIZE(vars); ++i) {
318         const GlobalVar *v = &vars[i];
319         *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
320     }
321 
322     cpu_psw_xb = tcg_global_mem_new_i32(tcg_env,
323                                         offsetof(CPUHPPAState, psw_xb),
324                                         "psw_xb");
325     cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
326                                         offsetof(CPUHPPAState, iasq_f),
327                                         "iasq_f");
328     cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
329                                         offsetof(CPUHPPAState, iasq_b),
330                                         "iasq_b");
331 }
332 
set_insn_breg(DisasContext * ctx,int breg)333 static void set_insn_breg(DisasContext *ctx, int breg)
334 {
335     assert(!ctx->insn_start_updated);
336     ctx->insn_start_updated = true;
337     tcg_set_insn_start_param(ctx->base.insn_start, 2, breg);
338 }
339 
cond_make_f(void)340 static DisasCond cond_make_f(void)
341 {
342     return (DisasCond){
343         .c = TCG_COND_NEVER,
344         .a0 = NULL,
345         .a1 = NULL,
346     };
347 }
348 
cond_make_t(void)349 static DisasCond cond_make_t(void)
350 {
351     return (DisasCond){
352         .c = TCG_COND_ALWAYS,
353         .a0 = NULL,
354         .a1 = NULL,
355     };
356 }
357 
cond_make_n(void)358 static DisasCond cond_make_n(void)
359 {
360     return (DisasCond){
361         .c = TCG_COND_NE,
362         .a0 = cpu_psw_n,
363         .a1 = tcg_constant_i64(0)
364     };
365 }
366 
cond_make_tt(TCGCond c,TCGv_i64 a0,TCGv_i64 a1)367 static DisasCond cond_make_tt(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
368 {
369     assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
370     return (DisasCond){ .c = c, .a0 = a0, .a1 = a1 };
371 }
372 
cond_make_ti(TCGCond c,TCGv_i64 a0,uint64_t imm)373 static DisasCond cond_make_ti(TCGCond c, TCGv_i64 a0, uint64_t imm)
374 {
375     return cond_make_tt(c, a0, tcg_constant_i64(imm));
376 }
377 
cond_make_vi(TCGCond c,TCGv_i64 a0,uint64_t imm)378 static DisasCond cond_make_vi(TCGCond c, TCGv_i64 a0, uint64_t imm)
379 {
380     TCGv_i64 tmp = tcg_temp_new_i64();
381     tcg_gen_mov_i64(tmp, a0);
382     return cond_make_ti(c, tmp, imm);
383 }
384 
cond_make_vv(TCGCond c,TCGv_i64 a0,TCGv_i64 a1)385 static DisasCond cond_make_vv(TCGCond c, TCGv_i64 a0, TCGv_i64 a1)
386 {
387     TCGv_i64 t0 = tcg_temp_new_i64();
388     TCGv_i64 t1 = tcg_temp_new_i64();
389 
390     tcg_gen_mov_i64(t0, a0);
391     tcg_gen_mov_i64(t1, a1);
392     return cond_make_tt(c, t0, t1);
393 }
394 
load_gpr(DisasContext * ctx,unsigned reg)395 static TCGv_i64 load_gpr(DisasContext *ctx, unsigned reg)
396 {
397     if (reg == 0) {
398         return ctx->zero;
399     } else {
400         return cpu_gr[reg];
401     }
402 }
403 
dest_gpr(DisasContext * ctx,unsigned reg)404 static TCGv_i64 dest_gpr(DisasContext *ctx, unsigned reg)
405 {
406     if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
407         return tcg_temp_new_i64();
408     } else {
409         return cpu_gr[reg];
410     }
411 }
412 
save_or_nullify(DisasContext * ctx,TCGv_i64 dest,TCGv_i64 t)413 static void save_or_nullify(DisasContext *ctx, TCGv_i64 dest, TCGv_i64 t)
414 {
415     if (ctx->null_cond.c != TCG_COND_NEVER) {
416         tcg_gen_movcond_i64(ctx->null_cond.c, dest, ctx->null_cond.a0,
417                             ctx->null_cond.a1, dest, t);
418     } else {
419         tcg_gen_mov_i64(dest, t);
420     }
421 }
422 
save_gpr(DisasContext * ctx,unsigned reg,TCGv_i64 t)423 static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_i64 t)
424 {
425     if (reg != 0) {
426         save_or_nullify(ctx, cpu_gr[reg], t);
427     }
428 }
429 
430 #if HOST_BIG_ENDIAN
431 # define HI_OFS  0
432 # define LO_OFS  4
433 #else
434 # define HI_OFS  4
435 # define LO_OFS  0
436 #endif
437 
load_frw_i32(unsigned rt)438 static TCGv_i32 load_frw_i32(unsigned rt)
439 {
440     TCGv_i32 ret = tcg_temp_new_i32();
441     tcg_gen_ld_i32(ret, tcg_env,
442                    offsetof(CPUHPPAState, fr[rt & 31])
443                    + (rt & 32 ? LO_OFS : HI_OFS));
444     return ret;
445 }
446 
load_frw0_i32(unsigned rt)447 static TCGv_i32 load_frw0_i32(unsigned rt)
448 {
449     if (rt == 0) {
450         TCGv_i32 ret = tcg_temp_new_i32();
451         tcg_gen_movi_i32(ret, 0);
452         return ret;
453     } else {
454         return load_frw_i32(rt);
455     }
456 }
457 
load_frw0_i64(unsigned rt)458 static TCGv_i64 load_frw0_i64(unsigned rt)
459 {
460     TCGv_i64 ret = tcg_temp_new_i64();
461     if (rt == 0) {
462         tcg_gen_movi_i64(ret, 0);
463     } else {
464         tcg_gen_ld32u_i64(ret, tcg_env,
465                           offsetof(CPUHPPAState, fr[rt & 31])
466                           + (rt & 32 ? LO_OFS : HI_OFS));
467     }
468     return ret;
469 }
470 
save_frw_i32(unsigned rt,TCGv_i32 val)471 static void save_frw_i32(unsigned rt, TCGv_i32 val)
472 {
473     tcg_gen_st_i32(val, tcg_env,
474                    offsetof(CPUHPPAState, fr[rt & 31])
475                    + (rt & 32 ? LO_OFS : HI_OFS));
476 }
477 
478 #undef HI_OFS
479 #undef LO_OFS
480 
load_frd(unsigned rt)481 static TCGv_i64 load_frd(unsigned rt)
482 {
483     TCGv_i64 ret = tcg_temp_new_i64();
484     tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
485     return ret;
486 }
487 
load_frd0(unsigned rt)488 static TCGv_i64 load_frd0(unsigned rt)
489 {
490     if (rt == 0) {
491         TCGv_i64 ret = tcg_temp_new_i64();
492         tcg_gen_movi_i64(ret, 0);
493         return ret;
494     } else {
495         return load_frd(rt);
496     }
497 }
498 
save_frd(unsigned rt,TCGv_i64 val)499 static void save_frd(unsigned rt, TCGv_i64 val)
500 {
501     tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
502 }
503 
load_spr(DisasContext * ctx,TCGv_i64 dest,unsigned reg)504 static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
505 {
506 #ifdef CONFIG_USER_ONLY
507     tcg_gen_movi_i64(dest, 0);
508 #else
509     if (reg < 4) {
510         tcg_gen_mov_i64(dest, cpu_sr[reg]);
511     } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
512         tcg_gen_mov_i64(dest, cpu_srH);
513     } else {
514         tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
515     }
516 #endif
517 }
518 
519 /*
520  * Write a value to psw_xb, bearing in mind the known value.
521  * To be used just before exiting the TB, so do not update the known value.
522  */
store_psw_xb(DisasContext * ctx,uint32_t xb)523 static void store_psw_xb(DisasContext *ctx, uint32_t xb)
524 {
525     tcg_debug_assert(xb == 0 || xb == PSW_B);
526     if (ctx->psw_xb != xb) {
527         tcg_gen_movi_i32(cpu_psw_xb, xb);
528     }
529 }
530 
531 /* Write a value to psw_xb, and update the known value. */
set_psw_xb(DisasContext * ctx,uint32_t xb)532 static void set_psw_xb(DisasContext *ctx, uint32_t xb)
533 {
534     store_psw_xb(ctx, xb);
535     ctx->psw_xb = xb;
536 }
537 
538 /* Skip over the implementation of an insn that has been nullified.
539    Use this when the insn is too complex for a conditional move.  */
nullify_over(DisasContext * ctx)540 static void nullify_over(DisasContext *ctx)
541 {
542     if (ctx->null_cond.c != TCG_COND_NEVER) {
543         /* The always condition should have been handled in the main loop.  */
544         assert(ctx->null_cond.c != TCG_COND_ALWAYS);
545 
546         ctx->null_lab = gen_new_label();
547 
548         /* If we're using PSW[N], copy it to a temp because... */
549         if (ctx->null_cond.a0 == cpu_psw_n) {
550             ctx->null_cond.a0 = tcg_temp_new_i64();
551             tcg_gen_mov_i64(ctx->null_cond.a0, cpu_psw_n);
552         }
553         /* ... we clear it before branching over the implementation,
554            so that (1) it's clear after nullifying this insn and
555            (2) if this insn nullifies the next, PSW[N] is valid.  */
556         if (ctx->psw_n_nonzero) {
557             ctx->psw_n_nonzero = false;
558             tcg_gen_movi_i64(cpu_psw_n, 0);
559         }
560 
561         tcg_gen_brcond_i64(ctx->null_cond.c, ctx->null_cond.a0,
562                            ctx->null_cond.a1, ctx->null_lab);
563         ctx->null_cond = cond_make_f();
564     }
565 }
566 
567 /* Save the current nullification state to PSW[N].  */
nullify_save(DisasContext * ctx)568 static void nullify_save(DisasContext *ctx)
569 {
570     if (ctx->null_cond.c == TCG_COND_NEVER) {
571         if (ctx->psw_n_nonzero) {
572             tcg_gen_movi_i64(cpu_psw_n, 0);
573         }
574         return;
575     }
576     if (ctx->null_cond.a0 != cpu_psw_n) {
577         tcg_gen_setcond_i64(ctx->null_cond.c, cpu_psw_n,
578                             ctx->null_cond.a0, ctx->null_cond.a1);
579         ctx->psw_n_nonzero = true;
580     }
581     ctx->null_cond = cond_make_f();
582 }
583 
584 /* Set a PSW[N] to X.  The intention is that this is used immediately
585    before a goto_tb/exit_tb, so that there is no fallthru path to other
586    code within the TB.  Therefore we do not update psw_n_nonzero.  */
nullify_set(DisasContext * ctx,bool x)587 static void nullify_set(DisasContext *ctx, bool x)
588 {
589     if (ctx->psw_n_nonzero || x) {
590         tcg_gen_movi_i64(cpu_psw_n, x);
591     }
592 }
593 
594 /* Mark the end of an instruction that may have been nullified.
595    This is the pair to nullify_over.  Always returns true so that
596    it may be tail-called from a translate function.  */
nullify_end(DisasContext * ctx)597 static bool nullify_end(DisasContext *ctx)
598 {
599     TCGLabel *null_lab = ctx->null_lab;
600     DisasJumpType status = ctx->base.is_jmp;
601 
602     /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
603        For UPDATED, we cannot update on the nullified path.  */
604     assert(status != DISAS_IAQ_N_UPDATED);
605     /* Taken branches are handled manually. */
606     assert(!ctx->psw_b_next);
607 
608     if (likely(null_lab == NULL)) {
609         /* The current insn wasn't conditional or handled the condition
610            applied to it without a branch, so the (new) setting of
611            NULL_COND can be applied directly to the next insn.  */
612         return true;
613     }
614     ctx->null_lab = NULL;
615 
616     if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
617         /* The next instruction will be unconditional,
618            and NULL_COND already reflects that.  */
619         gen_set_label(null_lab);
620     } else {
621         /* The insn that we just executed is itself nullifying the next
622            instruction.  Store the condition in the PSW[N] global.
623            We asserted PSW[N] = 0 in nullify_over, so that after the
624            label we have the proper value in place.  */
625         nullify_save(ctx);
626         gen_set_label(null_lab);
627         ctx->null_cond = cond_make_n();
628     }
629     if (status == DISAS_NORETURN) {
630         ctx->base.is_jmp = DISAS_NEXT;
631     }
632     return true;
633 }
634 
iaqe_variable(const DisasIAQE * e)635 static bool iaqe_variable(const DisasIAQE *e)
636 {
637     return e->base || e->space;
638 }
639 
iaqe_incr(const DisasIAQE * e,int64_t disp)640 static DisasIAQE iaqe_incr(const DisasIAQE *e, int64_t disp)
641 {
642     return (DisasIAQE){
643         .space = e->space,
644         .base = e->base,
645         .disp = e->disp + disp,
646     };
647 }
648 
iaqe_branchi(DisasContext * ctx,int64_t disp)649 static DisasIAQE iaqe_branchi(DisasContext *ctx, int64_t disp)
650 {
651     return (DisasIAQE){
652         .space = ctx->iaq_b.space,
653         .disp = ctx->iaq_f.disp + 8 + disp,
654     };
655 }
656 
iaqe_next_absv(DisasContext * ctx,TCGv_i64 var)657 static DisasIAQE iaqe_next_absv(DisasContext *ctx, TCGv_i64 var)
658 {
659     return (DisasIAQE){
660         .space = ctx->iaq_b.space,
661         .base = var,
662     };
663 }
664 
copy_iaoq_entry(DisasContext * ctx,TCGv_i64 dest,const DisasIAQE * src)665 static void copy_iaoq_entry(DisasContext *ctx, TCGv_i64 dest,
666                             const DisasIAQE *src)
667 {
668     tcg_gen_addi_i64(dest, src->base ? : cpu_iaoq_f, src->disp);
669 }
670 
install_iaq_entries(DisasContext * ctx,const DisasIAQE * f,const DisasIAQE * b)671 static void install_iaq_entries(DisasContext *ctx, const DisasIAQE *f,
672                                 const DisasIAQE *b)
673 {
674     DisasIAQE b_next;
675 
676     if (b == NULL) {
677         b_next = iaqe_incr(f, 4);
678         b = &b_next;
679     }
680 
681     /*
682      * There is an edge case
683      *    bv   r0(rN)
684      *    b,l  disp,r0
685      * for which F will use cpu_iaoq_b (from the indirect branch),
686      * and B will use cpu_iaoq_f (from the direct branch).
687      * In this case we need an extra temporary.
688      */
689     if (f->base != cpu_iaoq_b) {
690         copy_iaoq_entry(ctx, cpu_iaoq_b, b);
691         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
692     } else if (f->base == b->base) {
693         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
694         tcg_gen_addi_i64(cpu_iaoq_b, cpu_iaoq_f, b->disp - f->disp);
695     } else {
696         TCGv_i64 tmp = tcg_temp_new_i64();
697         copy_iaoq_entry(ctx, tmp, b);
698         copy_iaoq_entry(ctx, cpu_iaoq_f, f);
699         tcg_gen_mov_i64(cpu_iaoq_b, tmp);
700     }
701 
702     if (f->space) {
703         tcg_gen_mov_i64(cpu_iasq_f, f->space);
704     }
705     if (b->space || f->space) {
706         tcg_gen_mov_i64(cpu_iasq_b, b->space ? : f->space);
707     }
708 }
709 
install_link(DisasContext * ctx,unsigned link,bool with_sr0)710 static void install_link(DisasContext *ctx, unsigned link, bool with_sr0)
711 {
712     tcg_debug_assert(ctx->null_cond.c == TCG_COND_NEVER);
713     if (!link) {
714         return;
715     }
716     DisasIAQE next = iaqe_incr(&ctx->iaq_b, 4);
717     copy_iaoq_entry(ctx, cpu_gr[link], &next);
718 #ifndef CONFIG_USER_ONLY
719     if (with_sr0) {
720         tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_b);
721     }
722 #endif
723 }
724 
gen_excp_1(int exception)725 static void gen_excp_1(int exception)
726 {
727     gen_helper_excp(tcg_env, tcg_constant_i32(exception));
728 }
729 
gen_excp(DisasContext * ctx,int exception)730 static void gen_excp(DisasContext *ctx, int exception)
731 {
732     install_iaq_entries(ctx, &ctx->iaq_f, &ctx->iaq_b);
733     nullify_save(ctx);
734     gen_excp_1(exception);
735     ctx->base.is_jmp = DISAS_NORETURN;
736 }
737 
delay_excp(DisasContext * ctx,uint8_t excp)738 static DisasDelayException *delay_excp(DisasContext *ctx, uint8_t excp)
739 {
740     DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException));
741 
742     memset(e, 0, sizeof(*e));
743     e->next = ctx->delay_excp_list;
744     ctx->delay_excp_list = e;
745 
746     e->lab = gen_new_label();
747     e->insn = ctx->insn;
748     e->set_iir = true;
749     e->set_n = ctx->psw_n_nonzero ? 0 : -1;
750     e->excp = excp;
751     e->iaq_f = ctx->iaq_f;
752     e->iaq_b = ctx->iaq_b;
753 
754     return e;
755 }
756 
gen_excp_iir(DisasContext * ctx,int exc)757 static bool gen_excp_iir(DisasContext *ctx, int exc)
758 {
759     if (ctx->null_cond.c == TCG_COND_NEVER) {
760         tcg_gen_st_i64(tcg_constant_i64(ctx->insn),
761                        tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
762         gen_excp(ctx, exc);
763     } else {
764         DisasDelayException *e = delay_excp(ctx, exc);
765         tcg_gen_brcond_i64(tcg_invert_cond(ctx->null_cond.c),
766                            ctx->null_cond.a0, ctx->null_cond.a1, e->lab);
767         ctx->null_cond = cond_make_f();
768     }
769     return true;
770 }
771 
gen_illegal(DisasContext * ctx)772 static bool gen_illegal(DisasContext *ctx)
773 {
774     return gen_excp_iir(ctx, EXCP_ILL);
775 }
776 
777 #ifdef CONFIG_USER_ONLY
778 #define CHECK_MOST_PRIVILEGED(EXCP) \
779     return gen_excp_iir(ctx, EXCP)
780 #else
781 #define CHECK_MOST_PRIVILEGED(EXCP) \
782     do {                                     \
783         if (ctx->privilege != 0) {           \
784             return gen_excp_iir(ctx, EXCP);  \
785         }                                    \
786     } while (0)
787 #endif
788 
use_goto_tb(DisasContext * ctx,const DisasIAQE * f,const DisasIAQE * b)789 static bool use_goto_tb(DisasContext *ctx, const DisasIAQE *f,
790                         const DisasIAQE *b)
791 {
792     return (!iaqe_variable(f) &&
793             (b == NULL || !iaqe_variable(b)) &&
794             translator_use_goto_tb(&ctx->base, ctx->iaoq_first + f->disp));
795 }
796 
797 /* If the next insn is to be nullified, and it's on the same page,
798    and we're not attempting to set a breakpoint on it, then we can
799    totally skip the nullified insn.  This avoids creating and
800    executing a TB that merely branches to the next TB.  */
use_nullify_skip(DisasContext * ctx)801 static bool use_nullify_skip(DisasContext *ctx)
802 {
803     return (!(tb_cflags(ctx->base.tb) & CF_BP_PAGE)
804             && !iaqe_variable(&ctx->iaq_b)
805             && (((ctx->iaoq_first + ctx->iaq_b.disp) ^ ctx->iaoq_first)
806                 & TARGET_PAGE_MASK) == 0);
807 }
808 
gen_goto_tb(DisasContext * ctx,int which,const DisasIAQE * f,const DisasIAQE * b)809 static void gen_goto_tb(DisasContext *ctx, int which,
810                         const DisasIAQE *f, const DisasIAQE *b)
811 {
812     install_iaq_entries(ctx, f, b);
813     if (use_goto_tb(ctx, f, b)) {
814         tcg_gen_goto_tb(which);
815         tcg_gen_exit_tb(ctx->base.tb, which);
816     } else {
817         tcg_gen_lookup_and_goto_ptr();
818     }
819 }
820 
cond_need_sv(int c)821 static bool cond_need_sv(int c)
822 {
823     return c == 2 || c == 3 || c == 6;
824 }
825 
cond_need_cb(int c)826 static bool cond_need_cb(int c)
827 {
828     return c == 4 || c == 5;
829 }
830 
831 /*
832  * Compute conditional for arithmetic.  See Page 5-3, Table 5-1, of
833  * the Parisc 1.1 Architecture Reference Manual for details.
834  */
835 
do_cond(DisasContext * ctx,unsigned cf,bool d,TCGv_i64 res,TCGv_i64 uv,TCGv_i64 sv)836 static DisasCond do_cond(DisasContext *ctx, unsigned cf, bool d,
837                          TCGv_i64 res, TCGv_i64 uv, TCGv_i64 sv)
838 {
839     TCGCond sign_cond, zero_cond;
840     uint64_t sign_imm, zero_imm;
841     DisasCond cond;
842     TCGv_i64 tmp;
843 
844     if (d) {
845         /* 64-bit condition. */
846         sign_imm = 0;
847         sign_cond = TCG_COND_LT;
848         zero_imm = 0;
849         zero_cond = TCG_COND_EQ;
850     } else {
851         /* 32-bit condition. */
852         sign_imm = 1ull << 31;
853         sign_cond = TCG_COND_TSTNE;
854         zero_imm = UINT32_MAX;
855         zero_cond = TCG_COND_TSTEQ;
856     }
857 
858     switch (cf >> 1) {
859     case 0: /* Never / TR    (0 / 1) */
860         cond = cond_make_f();
861         break;
862     case 1: /* = / <>        (Z / !Z) */
863         cond = cond_make_vi(zero_cond, res, zero_imm);
864         break;
865     case 2: /* < / >=        (N ^ V / !(N ^ V) */
866         tmp = tcg_temp_new_i64();
867         tcg_gen_xor_i64(tmp, res, sv);
868         cond = cond_make_ti(sign_cond, tmp, sign_imm);
869         break;
870     case 3: /* <= / >        (N ^ V) | Z / !((N ^ V) | Z) */
871         /*
872          * Simplify:
873          *   (N ^ V) | Z
874          *   ((res < 0) ^ (sv < 0)) | !res
875          *   ((res ^ sv) < 0) | !res
876          *   ((res ^ sv) < 0 ? 1 : !res)
877          *   !((res ^ sv) < 0 ? 0 : res)
878          */
879         tmp = tcg_temp_new_i64();
880         tcg_gen_xor_i64(tmp, res, sv);
881         tcg_gen_movcond_i64(sign_cond, tmp,
882                             tmp, tcg_constant_i64(sign_imm),
883                             ctx->zero, res);
884         cond = cond_make_ti(zero_cond, tmp, zero_imm);
885         break;
886     case 4: /* NUV / UV      (!UV / UV) */
887         cond = cond_make_vi(TCG_COND_EQ, uv, 0);
888         break;
889     case 5: /* ZNV / VNZ     (!UV | Z / UV & !Z) */
890         tmp = tcg_temp_new_i64();
891         tcg_gen_movcond_i64(TCG_COND_EQ, tmp, uv, ctx->zero, ctx->zero, res);
892         cond = cond_make_ti(zero_cond, tmp, zero_imm);
893         break;
894     case 6: /* SV / NSV      (V / !V) */
895         cond = cond_make_vi(sign_cond, sv, sign_imm);
896         break;
897     case 7: /* OD / EV */
898         cond = cond_make_vi(TCG_COND_TSTNE, res, 1);
899         break;
900     default:
901         g_assert_not_reached();
902     }
903     if (cf & 1) {
904         cond.c = tcg_invert_cond(cond.c);
905     }
906 
907     return cond;
908 }
909 
910 /* Similar, but for the special case of subtraction without borrow, we
911    can use the inputs directly.  This can allow other computation to be
912    deleted as unused.  */
913 
do_sub_cond(DisasContext * ctx,unsigned cf,bool d,TCGv_i64 res,TCGv_i64 in1,TCGv_i64 in2,TCGv_i64 sv)914 static DisasCond do_sub_cond(DisasContext *ctx, unsigned cf, bool d,
915                              TCGv_i64 res, TCGv_i64 in1,
916                              TCGv_i64 in2, TCGv_i64 sv)
917 {
918     TCGCond tc;
919     bool ext_uns;
920 
921     switch (cf >> 1) {
922     case 1: /* = / <> */
923         tc = TCG_COND_EQ;
924         ext_uns = true;
925         break;
926     case 2: /* < / >= */
927         tc = TCG_COND_LT;
928         ext_uns = false;
929         break;
930     case 3: /* <= / > */
931         tc = TCG_COND_LE;
932         ext_uns = false;
933         break;
934     case 4: /* << / >>= */
935         tc = TCG_COND_LTU;
936         ext_uns = true;
937         break;
938     case 5: /* <<= / >> */
939         tc = TCG_COND_LEU;
940         ext_uns = true;
941         break;
942     default:
943         return do_cond(ctx, cf, d, res, NULL, sv);
944     }
945 
946     if (cf & 1) {
947         tc = tcg_invert_cond(tc);
948     }
949     if (!d) {
950         TCGv_i64 t1 = tcg_temp_new_i64();
951         TCGv_i64 t2 = tcg_temp_new_i64();
952 
953         if (ext_uns) {
954             tcg_gen_ext32u_i64(t1, in1);
955             tcg_gen_ext32u_i64(t2, in2);
956         } else {
957             tcg_gen_ext32s_i64(t1, in1);
958             tcg_gen_ext32s_i64(t2, in2);
959         }
960         return cond_make_tt(tc, t1, t2);
961     }
962     return cond_make_vv(tc, in1, in2);
963 }
964 
965 /*
966  * Similar, but for logicals, where the carry and overflow bits are not
967  * computed, and use of them is undefined.
968  *
969  * Undefined or not, hardware does not trap.  It seems reasonable to
970  * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
971  * how cases c={2,3} are treated.
972  */
973 
do_log_cond(DisasContext * ctx,unsigned cf,bool d,TCGv_i64 res)974 static DisasCond do_log_cond(DisasContext *ctx, unsigned cf, bool d,
975                              TCGv_i64 res)
976 {
977     TCGCond tc;
978     uint64_t imm;
979 
980     switch (cf >> 1) {
981     case 0:  /* never / always */
982     case 4:  /* undef, C */
983     case 5:  /* undef, C & !Z */
984     case 6:  /* undef, V */
985         return cf & 1 ? cond_make_t() : cond_make_f();
986     case 1:  /* == / <> */
987         tc = d ? TCG_COND_EQ : TCG_COND_TSTEQ;
988         imm = d ? 0 : UINT32_MAX;
989         break;
990     case 2:  /* < / >= */
991         tc = d ? TCG_COND_LT : TCG_COND_TSTNE;
992         imm = d ? 0 : 1ull << 31;
993         break;
994     case 3:  /* <= / > */
995         tc = cf & 1 ? TCG_COND_GT : TCG_COND_LE;
996         if (!d) {
997             TCGv_i64 tmp = tcg_temp_new_i64();
998             tcg_gen_ext32s_i64(tmp, res);
999             return cond_make_ti(tc, tmp, 0);
1000         }
1001         return cond_make_vi(tc, res, 0);
1002     case 7: /* OD / EV */
1003         tc = TCG_COND_TSTNE;
1004         imm = 1;
1005         break;
1006     default:
1007         g_assert_not_reached();
1008     }
1009     if (cf & 1) {
1010         tc = tcg_invert_cond(tc);
1011     }
1012     return cond_make_vi(tc, res, imm);
1013 }
1014 
1015 /* Similar, but for shift/extract/deposit conditions.  */
1016 
do_sed_cond(DisasContext * ctx,unsigned orig,bool d,TCGv_i64 res)1017 static DisasCond do_sed_cond(DisasContext *ctx, unsigned orig, bool d,
1018                              TCGv_i64 res)
1019 {
1020     unsigned c, f;
1021 
1022     /* Convert the compressed condition codes to standard.
1023        0-2 are the same as logicals (nv,<,<=), while 3 is OD.
1024        4-7 are the reverse of 0-3.  */
1025     c = orig & 3;
1026     if (c == 3) {
1027         c = 7;
1028     }
1029     f = (orig & 4) / 4;
1030 
1031     return do_log_cond(ctx, c * 2 + f, d, res);
1032 }
1033 
1034 /* Similar, but for unit zero conditions.  */
do_unit_zero_cond(unsigned cf,bool d,TCGv_i64 res)1035 static DisasCond do_unit_zero_cond(unsigned cf, bool d, TCGv_i64 res)
1036 {
1037     TCGv_i64 tmp;
1038     uint64_t d_repl = d ? 0x0000000100000001ull : 1;
1039     uint64_t ones = 0, sgns = 0;
1040 
1041     switch (cf >> 1) {
1042     case 1: /* SBW / NBW */
1043         if (d) {
1044             ones = d_repl;
1045             sgns = d_repl << 31;
1046         }
1047         break;
1048     case 2: /* SBZ / NBZ */
1049         ones = d_repl * 0x01010101u;
1050         sgns = ones << 7;
1051         break;
1052     case 3: /* SHZ / NHZ */
1053         ones = d_repl * 0x00010001u;
1054         sgns = ones << 15;
1055         break;
1056     }
1057     if (ones == 0) {
1058         /* Undefined, or 0/1 (never/always). */
1059         return cf & 1 ? cond_make_t() : cond_make_f();
1060     }
1061 
1062     /*
1063      * See hasless(v,1) from
1064      * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1065      */
1066     tmp = tcg_temp_new_i64();
1067     tcg_gen_subi_i64(tmp, res, ones);
1068     tcg_gen_andc_i64(tmp, tmp, res);
1069 
1070     return cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE, tmp, sgns);
1071 }
1072 
get_carry(DisasContext * ctx,bool d,TCGv_i64 cb,TCGv_i64 cb_msb)1073 static TCGv_i64 get_carry(DisasContext *ctx, bool d,
1074                           TCGv_i64 cb, TCGv_i64 cb_msb)
1075 {
1076     if (!d) {
1077         TCGv_i64 t = tcg_temp_new_i64();
1078         tcg_gen_extract_i64(t, cb, 32, 1);
1079         return t;
1080     }
1081     return cb_msb;
1082 }
1083 
get_psw_carry(DisasContext * ctx,bool d)1084 static TCGv_i64 get_psw_carry(DisasContext *ctx, bool d)
1085 {
1086     return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1087 }
1088 
1089 /* Compute signed overflow for addition.  */
do_add_sv(DisasContext * ctx,TCGv_i64 res,TCGv_i64 in1,TCGv_i64 in2,TCGv_i64 orig_in1,int shift,bool d)1090 static TCGv_i64 do_add_sv(DisasContext *ctx, TCGv_i64 res,
1091                           TCGv_i64 in1, TCGv_i64 in2,
1092                           TCGv_i64 orig_in1, int shift, bool d)
1093 {
1094     TCGv_i64 sv = tcg_temp_new_i64();
1095     TCGv_i64 tmp = tcg_temp_new_i64();
1096 
1097     tcg_gen_xor_i64(sv, res, in1);
1098     tcg_gen_xor_i64(tmp, in1, in2);
1099     tcg_gen_andc_i64(sv, sv, tmp);
1100 
1101     switch (shift) {
1102     case 0:
1103         break;
1104     case 1:
1105         /* Shift left by one and compare the sign. */
1106         tcg_gen_add_i64(tmp, orig_in1, orig_in1);
1107         tcg_gen_xor_i64(tmp, tmp, orig_in1);
1108         /* Incorporate into the overflow. */
1109         tcg_gen_or_i64(sv, sv, tmp);
1110         break;
1111     default:
1112         {
1113             int sign_bit = d ? 63 : 31;
1114 
1115             /* Compare the sign against all lower bits. */
1116             tcg_gen_sextract_i64(tmp, orig_in1, sign_bit, 1);
1117             tcg_gen_xor_i64(tmp, tmp, orig_in1);
1118             /*
1119              * If one of the bits shifting into or through the sign
1120              * differs, then we have overflow.
1121              */
1122             tcg_gen_extract_i64(tmp, tmp, sign_bit - shift, shift);
1123             tcg_gen_movcond_i64(TCG_COND_NE, sv, tmp, ctx->zero,
1124                                 tcg_constant_i64(-1), sv);
1125         }
1126     }
1127     return sv;
1128 }
1129 
1130 /* Compute unsigned overflow for addition.  */
do_add_uv(DisasContext * ctx,TCGv_i64 cb,TCGv_i64 cb_msb,TCGv_i64 in1,int shift,bool d)1131 static TCGv_i64 do_add_uv(DisasContext *ctx, TCGv_i64 cb, TCGv_i64 cb_msb,
1132                           TCGv_i64 in1, int shift, bool d)
1133 {
1134     if (shift == 0) {
1135         return get_carry(ctx, d, cb, cb_msb);
1136     } else {
1137         TCGv_i64 tmp = tcg_temp_new_i64();
1138         tcg_gen_extract_i64(tmp, in1, (d ? 63 : 31) - shift, shift);
1139         tcg_gen_or_i64(tmp, tmp, get_carry(ctx, d, cb, cb_msb));
1140         return tmp;
1141     }
1142 }
1143 
1144 /* Compute signed overflow for subtraction.  */
do_sub_sv(DisasContext * ctx,TCGv_i64 res,TCGv_i64 in1,TCGv_i64 in2)1145 static TCGv_i64 do_sub_sv(DisasContext *ctx, TCGv_i64 res,
1146                           TCGv_i64 in1, TCGv_i64 in2)
1147 {
1148     TCGv_i64 sv = tcg_temp_new_i64();
1149     TCGv_i64 tmp = tcg_temp_new_i64();
1150 
1151     tcg_gen_xor_i64(sv, res, in1);
1152     tcg_gen_xor_i64(tmp, in1, in2);
1153     tcg_gen_and_i64(sv, sv, tmp);
1154 
1155     return sv;
1156 }
1157 
gen_tc(DisasContext * ctx,DisasCond * cond)1158 static void gen_tc(DisasContext *ctx, DisasCond *cond)
1159 {
1160     DisasDelayException *e;
1161 
1162     switch (cond->c) {
1163     case TCG_COND_NEVER:
1164         break;
1165     case TCG_COND_ALWAYS:
1166         gen_excp_iir(ctx, EXCP_COND);
1167         break;
1168     default:
1169         e = delay_excp(ctx, EXCP_COND);
1170         tcg_gen_brcond_i64(cond->c, cond->a0, cond->a1, e->lab);
1171         /* In the non-trap path, the condition is known false. */
1172         *cond = cond_make_f();
1173         break;
1174     }
1175 }
1176 
gen_tsv(DisasContext * ctx,TCGv_i64 * sv,bool d)1177 static void gen_tsv(DisasContext *ctx, TCGv_i64 *sv, bool d)
1178 {
1179     DisasCond cond = do_cond(ctx, /* SV */ 12, d, NULL, NULL, *sv);
1180     DisasDelayException *e = delay_excp(ctx, EXCP_OVERFLOW);
1181 
1182     tcg_gen_brcond_i64(cond.c, cond.a0, cond.a1, e->lab);
1183 
1184     /* In the non-trap path, V is known zero. */
1185     *sv = tcg_constant_i64(0);
1186 }
1187 
do_add(DisasContext * ctx,unsigned rt,TCGv_i64 orig_in1,TCGv_i64 in2,unsigned shift,bool is_l,bool is_tsv,bool is_tc,bool is_c,unsigned cf,bool d)1188 static void do_add(DisasContext *ctx, unsigned rt, TCGv_i64 orig_in1,
1189                    TCGv_i64 in2, unsigned shift, bool is_l,
1190                    bool is_tsv, bool is_tc, bool is_c, unsigned cf, bool d)
1191 {
1192     TCGv_i64 dest, cb, cb_msb, in1, uv, sv, tmp;
1193     unsigned c = cf >> 1;
1194     DisasCond cond;
1195 
1196     dest = tcg_temp_new_i64();
1197     cb = NULL;
1198     cb_msb = NULL;
1199 
1200     in1 = orig_in1;
1201     if (shift) {
1202         tmp = tcg_temp_new_i64();
1203         tcg_gen_shli_i64(tmp, in1, shift);
1204         in1 = tmp;
1205     }
1206 
1207     if (!is_l || cond_need_cb(c)) {
1208         cb_msb = tcg_temp_new_i64();
1209         cb = tcg_temp_new_i64();
1210 
1211         if (is_c) {
1212             tcg_gen_addcio_i64(dest, cb_msb, in1, in2, get_psw_carry(ctx, d));
1213         } else {
1214             tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1215         }
1216         tcg_gen_xor_i64(cb, in1, in2);
1217         tcg_gen_xor_i64(cb, cb, dest);
1218     } else {
1219         tcg_gen_add_i64(dest, in1, in2);
1220         if (is_c) {
1221             tcg_gen_add_i64(dest, dest, get_psw_carry(ctx, d));
1222         }
1223     }
1224 
1225     /* Compute signed overflow if required.  */
1226     sv = NULL;
1227     if (is_tsv || cond_need_sv(c)) {
1228         sv = do_add_sv(ctx, dest, in1, in2, orig_in1, shift, d);
1229         if (is_tsv) {
1230             gen_tsv(ctx, &sv, d);
1231         }
1232     }
1233 
1234     /* Compute unsigned overflow if required.  */
1235     uv = NULL;
1236     if (cond_need_cb(c)) {
1237         uv = do_add_uv(ctx, cb, cb_msb, orig_in1, shift, d);
1238     }
1239 
1240     /* Emit any conditional trap before any writeback.  */
1241     cond = do_cond(ctx, cf, d, dest, uv, sv);
1242     if (is_tc) {
1243         gen_tc(ctx, &cond);
1244     }
1245 
1246     /* Write back the result.  */
1247     if (!is_l) {
1248         save_or_nullify(ctx, cpu_psw_cb, cb);
1249         save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1250     }
1251     save_gpr(ctx, rt, dest);
1252 
1253     /* Install the new nullification.  */
1254     ctx->null_cond = cond;
1255 }
1256 
do_add_reg(DisasContext * ctx,arg_rrr_cf_d_sh * a,bool is_l,bool is_tsv,bool is_tc,bool is_c)1257 static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_d_sh *a,
1258                        bool is_l, bool is_tsv, bool is_tc, bool is_c)
1259 {
1260     TCGv_i64 tcg_r1, tcg_r2;
1261 
1262     if (unlikely(is_tc && a->cf == 1)) {
1263         /* Unconditional trap on condition. */
1264         return gen_excp_iir(ctx, EXCP_COND);
1265     }
1266     if (a->cf) {
1267         nullify_over(ctx);
1268     }
1269     tcg_r1 = load_gpr(ctx, a->r1);
1270     tcg_r2 = load_gpr(ctx, a->r2);
1271     do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l,
1272            is_tsv, is_tc, is_c, a->cf, a->d);
1273     return nullify_end(ctx);
1274 }
1275 
do_add_imm(DisasContext * ctx,arg_rri_cf * a,bool is_tsv,bool is_tc)1276 static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1277                        bool is_tsv, bool is_tc)
1278 {
1279     TCGv_i64 tcg_im, tcg_r2;
1280 
1281     if (unlikely(is_tc && a->cf == 1)) {
1282         /* Unconditional trap on condition. */
1283         return gen_excp_iir(ctx, EXCP_COND);
1284     }
1285     if (a->cf) {
1286         nullify_over(ctx);
1287     }
1288     tcg_im = tcg_constant_i64(a->i);
1289     tcg_r2 = load_gpr(ctx, a->r);
1290     /* All ADDI conditions are 32-bit. */
1291     do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf, false);
1292     return nullify_end(ctx);
1293 }
1294 
do_sub(DisasContext * ctx,unsigned rt,TCGv_i64 in1,TCGv_i64 in2,bool is_tsv,bool is_b,bool is_tc,unsigned cf,bool d)1295 static void do_sub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1296                    TCGv_i64 in2, bool is_tsv, bool is_b,
1297                    bool is_tc, unsigned cf, bool d)
1298 {
1299     TCGv_i64 dest, sv, cb, cb_msb;
1300     unsigned c = cf >> 1;
1301     DisasCond cond;
1302 
1303     dest = tcg_temp_new_i64();
1304     cb = tcg_temp_new_i64();
1305     cb_msb = tcg_temp_new_i64();
1306 
1307     if (is_b) {
1308         /* DEST,C = IN1 + ~IN2 + C.  */
1309         tcg_gen_not_i64(cb, in2);
1310         tcg_gen_addcio_i64(dest, cb_msb, in1, cb, get_psw_carry(ctx, d));
1311         tcg_gen_xor_i64(cb, cb, in1);
1312         tcg_gen_xor_i64(cb, cb, dest);
1313     } else {
1314         /*
1315          * DEST,C = IN1 + ~IN2 + 1.  We can produce the same result in fewer
1316          * operations by seeding the high word with 1 and subtracting.
1317          */
1318         TCGv_i64 one = tcg_constant_i64(1);
1319         tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1320         tcg_gen_eqv_i64(cb, in1, in2);
1321         tcg_gen_xor_i64(cb, cb, dest);
1322     }
1323 
1324     /* Compute signed overflow if required.  */
1325     sv = NULL;
1326     if (is_tsv || cond_need_sv(c)) {
1327         sv = do_sub_sv(ctx, dest, in1, in2);
1328         if (is_tsv) {
1329             gen_tsv(ctx, &sv, d);
1330         }
1331     }
1332 
1333     /* Compute the condition.  We cannot use the special case for borrow.  */
1334     if (!is_b) {
1335         cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1336     } else {
1337         cond = do_cond(ctx, cf, d, dest, get_carry(ctx, d, cb, cb_msb), sv);
1338     }
1339 
1340     /* Emit any conditional trap before any writeback.  */
1341     if (is_tc) {
1342         gen_tc(ctx, &cond);
1343     }
1344 
1345     /* Write back the result.  */
1346     save_or_nullify(ctx, cpu_psw_cb, cb);
1347     save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1348     save_gpr(ctx, rt, dest);
1349 
1350     /* Install the new nullification.  */
1351     ctx->null_cond = cond;
1352 }
1353 
do_sub_reg(DisasContext * ctx,arg_rrr_cf_d * a,bool is_tsv,bool is_b,bool is_tc)1354 static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1355                        bool is_tsv, bool is_b, bool is_tc)
1356 {
1357     TCGv_i64 tcg_r1, tcg_r2;
1358 
1359     if (a->cf) {
1360         nullify_over(ctx);
1361     }
1362     tcg_r1 = load_gpr(ctx, a->r1);
1363     tcg_r2 = load_gpr(ctx, a->r2);
1364     do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf, a->d);
1365     return nullify_end(ctx);
1366 }
1367 
do_sub_imm(DisasContext * ctx,arg_rri_cf * a,bool is_tsv)1368 static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1369 {
1370     TCGv_i64 tcg_im, tcg_r2;
1371 
1372     if (a->cf) {
1373         nullify_over(ctx);
1374     }
1375     tcg_im = tcg_constant_i64(a->i);
1376     tcg_r2 = load_gpr(ctx, a->r);
1377     /* All SUBI conditions are 32-bit. */
1378     do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf, false);
1379     return nullify_end(ctx);
1380 }
1381 
do_cmpclr(DisasContext * ctx,unsigned rt,TCGv_i64 in1,TCGv_i64 in2,unsigned cf,bool d)1382 static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1383                       TCGv_i64 in2, unsigned cf, bool d)
1384 {
1385     TCGv_i64 dest, sv;
1386     DisasCond cond;
1387 
1388     dest = tcg_temp_new_i64();
1389     tcg_gen_sub_i64(dest, in1, in2);
1390 
1391     /* Compute signed overflow if required.  */
1392     sv = NULL;
1393     if (cond_need_sv(cf >> 1)) {
1394         sv = do_sub_sv(ctx, dest, in1, in2);
1395     }
1396 
1397     /* Form the condition for the compare.  */
1398     cond = do_sub_cond(ctx, cf, d, dest, in1, in2, sv);
1399 
1400     /* Clear.  */
1401     tcg_gen_movi_i64(dest, 0);
1402     save_gpr(ctx, rt, dest);
1403 
1404     /* Install the new nullification.  */
1405     ctx->null_cond = cond;
1406 }
1407 
do_log(DisasContext * ctx,unsigned rt,TCGv_i64 in1,TCGv_i64 in2,unsigned cf,bool d,void (* fn)(TCGv_i64,TCGv_i64,TCGv_i64))1408 static void do_log(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1409                    TCGv_i64 in2, unsigned cf, bool d,
1410                    void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1411 {
1412     TCGv_i64 dest = dest_gpr(ctx, rt);
1413 
1414     /* Perform the operation, and writeback.  */
1415     fn(dest, in1, in2);
1416     save_gpr(ctx, rt, dest);
1417 
1418     /* Install the new nullification.  */
1419     ctx->null_cond = do_log_cond(ctx, cf, d, dest);
1420 }
1421 
do_log_reg(DisasContext * ctx,arg_rrr_cf_d * a,void (* fn)(TCGv_i64,TCGv_i64,TCGv_i64))1422 static bool do_log_reg(DisasContext *ctx, arg_rrr_cf_d *a,
1423                        void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
1424 {
1425     TCGv_i64 tcg_r1, tcg_r2;
1426 
1427     if (a->cf) {
1428         nullify_over(ctx);
1429     }
1430     tcg_r1 = load_gpr(ctx, a->r1);
1431     tcg_r2 = load_gpr(ctx, a->r2);
1432     do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d, fn);
1433     return nullify_end(ctx);
1434 }
1435 
do_unit_addsub(DisasContext * ctx,unsigned rt,TCGv_i64 in1,TCGv_i64 in2,unsigned cf,bool d,bool is_tc,bool is_add)1436 static void do_unit_addsub(DisasContext *ctx, unsigned rt, TCGv_i64 in1,
1437                            TCGv_i64 in2, unsigned cf, bool d,
1438                            bool is_tc, bool is_add)
1439 {
1440     TCGv_i64 dest = tcg_temp_new_i64();
1441     uint64_t test_cb = 0;
1442     DisasCond cond;
1443 
1444     /* Select which carry-out bits to test. */
1445     switch (cf >> 1) {
1446     case 4: /* NDC / SDC -- 4-bit carries */
1447         test_cb = dup_const(MO_8, 0x88);
1448         break;
1449     case 5: /* NWC / SWC -- 32-bit carries */
1450         if (d) {
1451             test_cb = dup_const(MO_32, INT32_MIN);
1452         } else {
1453             cf &= 1; /* undefined -- map to never/always */
1454         }
1455         break;
1456     case 6: /* NBC / SBC -- 8-bit carries */
1457         test_cb = dup_const(MO_8, INT8_MIN);
1458         break;
1459     case 7: /* NHC / SHC -- 16-bit carries */
1460         test_cb = dup_const(MO_16, INT16_MIN);
1461         break;
1462     }
1463     if (!d) {
1464         test_cb = (uint32_t)test_cb;
1465     }
1466 
1467     if (!test_cb) {
1468         /* No need to compute carries if we don't need to test them. */
1469         if (is_add) {
1470             tcg_gen_add_i64(dest, in1, in2);
1471         } else {
1472             tcg_gen_sub_i64(dest, in1, in2);
1473         }
1474         cond = do_unit_zero_cond(cf, d, dest);
1475     } else {
1476         TCGv_i64 cb = tcg_temp_new_i64();
1477 
1478         if (d) {
1479             TCGv_i64 cb_msb = tcg_temp_new_i64();
1480             if (is_add) {
1481                 tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
1482                 tcg_gen_xor_i64(cb, in1, in2);
1483             } else {
1484                 /* See do_sub, !is_b. */
1485                 TCGv_i64 one = tcg_constant_i64(1);
1486                 tcg_gen_sub2_i64(dest, cb_msb, in1, one, in2, ctx->zero);
1487                 tcg_gen_eqv_i64(cb, in1, in2);
1488             }
1489             tcg_gen_xor_i64(cb, cb, dest);
1490             tcg_gen_extract2_i64(cb, cb, cb_msb, 1);
1491         } else {
1492             if (is_add) {
1493                 tcg_gen_add_i64(dest, in1, in2);
1494                 tcg_gen_xor_i64(cb, in1, in2);
1495             } else {
1496                 tcg_gen_sub_i64(dest, in1, in2);
1497                 tcg_gen_eqv_i64(cb, in1, in2);
1498             }
1499             tcg_gen_xor_i64(cb, cb, dest);
1500             tcg_gen_shri_i64(cb, cb, 1);
1501         }
1502 
1503         cond = cond_make_ti(cf & 1 ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
1504                             cb, test_cb);
1505     }
1506 
1507     if (is_tc) {
1508         gen_tc(ctx, &cond);
1509     }
1510     save_gpr(ctx, rt, dest);
1511 
1512     ctx->null_cond = cond;
1513 }
1514 
1515 #ifndef CONFIG_USER_ONLY
1516 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1517    from the top 2 bits of the base register.  There are a few system
1518    instructions that have a 3-bit space specifier, for which SR0 is
1519    not special.  To handle this, pass ~SP.  */
space_select(DisasContext * ctx,int sp,TCGv_i64 base)1520 static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_i64 base)
1521 {
1522     TCGv_ptr ptr;
1523     TCGv_i64 tmp;
1524     TCGv_i64 spc;
1525 
1526     if (sp != 0) {
1527         if (sp < 0) {
1528             sp = ~sp;
1529         }
1530         spc = tcg_temp_new_i64();
1531         load_spr(ctx, spc, sp);
1532         return spc;
1533     }
1534     if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1535         return cpu_srH;
1536     }
1537 
1538     ptr = tcg_temp_new_ptr();
1539     tmp = tcg_temp_new_i64();
1540     spc = tcg_temp_new_i64();
1541 
1542     /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1543     tcg_gen_shri_i64(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
1544     tcg_gen_andi_i64(tmp, tmp, 030);
1545     tcg_gen_trunc_i64_ptr(ptr, tmp);
1546 
1547     tcg_gen_add_ptr(ptr, ptr, tcg_env);
1548     tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
1549 
1550     return spc;
1551 }
1552 #endif
1553 
form_gva(DisasContext * ctx,TCGv_i64 * pgva,TCGv_i64 * pofs,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,bool is_phys)1554 static void form_gva(DisasContext *ctx, TCGv_i64 *pgva, TCGv_i64 *pofs,
1555                      unsigned rb, unsigned rx, int scale, int64_t disp,
1556                      unsigned sp, int modify, bool is_phys)
1557 {
1558     TCGv_i64 base = load_gpr(ctx, rb);
1559     TCGv_i64 ofs;
1560     TCGv_i64 addr;
1561 
1562     set_insn_breg(ctx, rb);
1563 
1564     /* Note that RX is mutually exclusive with DISP.  */
1565     if (rx) {
1566         ofs = tcg_temp_new_i64();
1567         tcg_gen_shli_i64(ofs, cpu_gr[rx], scale);
1568         tcg_gen_add_i64(ofs, ofs, base);
1569     } else if (disp || modify) {
1570         ofs = tcg_temp_new_i64();
1571         tcg_gen_addi_i64(ofs, base, disp);
1572     } else {
1573         ofs = base;
1574     }
1575 
1576     *pofs = ofs;
1577     *pgva = addr = tcg_temp_new_i64();
1578     tcg_gen_andi_i64(addr, modify <= 0 ? ofs : base,
1579                      ctx->gva_offset_mask);
1580 #ifndef CONFIG_USER_ONLY
1581     if (!is_phys) {
1582         tcg_gen_or_i64(addr, addr, space_select(ctx, sp, base));
1583     }
1584 #endif
1585 }
1586 
1587 /* Emit a memory load.  The modify parameter should be
1588  * < 0 for pre-modify,
1589  * > 0 for post-modify,
1590  * = 0 for no base register update.
1591  */
do_load_32(DisasContext * ctx,TCGv_i32 dest,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1592 static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
1593                        unsigned rx, int scale, int64_t disp,
1594                        unsigned sp, int modify, MemOp mop)
1595 {
1596     TCGv_i64 ofs;
1597     TCGv_i64 addr;
1598 
1599     /* Caller uses nullify_over/nullify_end.  */
1600     assert(ctx->null_cond.c == TCG_COND_NEVER);
1601 
1602     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1603              MMU_DISABLED(ctx));
1604     tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1605     if (modify) {
1606         save_gpr(ctx, rb, ofs);
1607     }
1608 }
1609 
do_load_64(DisasContext * ctx,TCGv_i64 dest,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1610 static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
1611                        unsigned rx, int scale, int64_t disp,
1612                        unsigned sp, int modify, MemOp mop)
1613 {
1614     TCGv_i64 ofs;
1615     TCGv_i64 addr;
1616 
1617     /* Caller uses nullify_over/nullify_end.  */
1618     assert(ctx->null_cond.c == TCG_COND_NEVER);
1619 
1620     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1621              MMU_DISABLED(ctx));
1622     tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1623     if (modify) {
1624         save_gpr(ctx, rb, ofs);
1625     }
1626 }
1627 
do_store_32(DisasContext * ctx,TCGv_i32 src,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1628 static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
1629                         unsigned rx, int scale, int64_t disp,
1630                         unsigned sp, int modify, MemOp mop)
1631 {
1632     TCGv_i64 ofs;
1633     TCGv_i64 addr;
1634 
1635     /* Caller uses nullify_over/nullify_end.  */
1636     assert(ctx->null_cond.c == TCG_COND_NEVER);
1637 
1638     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1639              MMU_DISABLED(ctx));
1640     tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1641     if (modify) {
1642         save_gpr(ctx, rb, ofs);
1643     }
1644 }
1645 
do_store_64(DisasContext * ctx,TCGv_i64 src,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1646 static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
1647                         unsigned rx, int scale, int64_t disp,
1648                         unsigned sp, int modify, MemOp mop)
1649 {
1650     TCGv_i64 ofs;
1651     TCGv_i64 addr;
1652 
1653     /* Caller uses nullify_over/nullify_end.  */
1654     assert(ctx->null_cond.c == TCG_COND_NEVER);
1655 
1656     form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1657              MMU_DISABLED(ctx));
1658     tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
1659     if (modify) {
1660         save_gpr(ctx, rb, ofs);
1661     }
1662 }
1663 
do_load(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify,MemOp mop)1664 static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
1665                     unsigned rx, int scale, int64_t disp,
1666                     unsigned sp, int modify, MemOp mop)
1667 {
1668     TCGv_i64 dest;
1669 
1670     nullify_over(ctx);
1671 
1672     if (modify == 0) {
1673         /* No base register update.  */
1674         dest = dest_gpr(ctx, rt);
1675     } else {
1676         /* Make sure if RT == RB, we see the result of the load.  */
1677         dest = tcg_temp_new_i64();
1678     }
1679     do_load_64(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
1680     save_gpr(ctx, rt, dest);
1681 
1682     return nullify_end(ctx);
1683 }
1684 
do_floadw(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify)1685 static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
1686                       unsigned rx, int scale, int64_t disp,
1687                       unsigned sp, int modify)
1688 {
1689     TCGv_i32 tmp;
1690 
1691     nullify_over(ctx);
1692 
1693     tmp = tcg_temp_new_i32();
1694     do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1695     save_frw_i32(rt, tmp);
1696 
1697     if (rt == 0) {
1698         gen_helper_loaded_fr0(tcg_env);
1699     }
1700 
1701     return nullify_end(ctx);
1702 }
1703 
trans_fldw(DisasContext * ctx,arg_ldst * a)1704 static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1705 {
1706     return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1707                      a->disp, a->sp, a->m);
1708 }
1709 
do_floadd(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify)1710 static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
1711                       unsigned rx, int scale, int64_t disp,
1712                       unsigned sp, int modify)
1713 {
1714     TCGv_i64 tmp;
1715 
1716     nullify_over(ctx);
1717 
1718     tmp = tcg_temp_new_i64();
1719     do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1720     save_frd(rt, tmp);
1721 
1722     if (rt == 0) {
1723         gen_helper_loaded_fr0(tcg_env);
1724     }
1725 
1726     return nullify_end(ctx);
1727 }
1728 
trans_fldd(DisasContext * ctx,arg_ldst * a)1729 static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1730 {
1731     return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1732                      a->disp, a->sp, a->m);
1733 }
1734 
do_store(DisasContext * ctx,unsigned rt,unsigned rb,int64_t disp,unsigned sp,int modify,MemOp mop)1735 static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
1736                      int64_t disp, unsigned sp,
1737                      int modify, MemOp mop)
1738 {
1739     nullify_over(ctx);
1740     do_store_64(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1741     return nullify_end(ctx);
1742 }
1743 
do_fstorew(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify)1744 static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
1745                        unsigned rx, int scale, int64_t disp,
1746                        unsigned sp, int modify)
1747 {
1748     TCGv_i32 tmp;
1749 
1750     nullify_over(ctx);
1751 
1752     tmp = load_frw_i32(rt);
1753     do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
1754 
1755     return nullify_end(ctx);
1756 }
1757 
trans_fstw(DisasContext * ctx,arg_ldst * a)1758 static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1759 {
1760     return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1761                       a->disp, a->sp, a->m);
1762 }
1763 
do_fstored(DisasContext * ctx,unsigned rt,unsigned rb,unsigned rx,int scale,int64_t disp,unsigned sp,int modify)1764 static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
1765                        unsigned rx, int scale, int64_t disp,
1766                        unsigned sp, int modify)
1767 {
1768     TCGv_i64 tmp;
1769 
1770     nullify_over(ctx);
1771 
1772     tmp = load_frd(rt);
1773     do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
1774 
1775     return nullify_end(ctx);
1776 }
1777 
trans_fstd(DisasContext * ctx,arg_ldst * a)1778 static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1779 {
1780     return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1781                       a->disp, a->sp, a->m);
1782 }
1783 
do_fop_wew(DisasContext * ctx,unsigned rt,unsigned ra,void (* func)(TCGv_i32,TCGv_env,TCGv_i32))1784 static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
1785                        void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
1786 {
1787     TCGv_i32 tmp;
1788 
1789     nullify_over(ctx);
1790     tmp = load_frw0_i32(ra);
1791 
1792     func(tmp, tcg_env, tmp);
1793 
1794     save_frw_i32(rt, tmp);
1795     return nullify_end(ctx);
1796 }
1797 
do_fop_wed(DisasContext * ctx,unsigned rt,unsigned ra,void (* func)(TCGv_i32,TCGv_env,TCGv_i64))1798 static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
1799                        void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
1800 {
1801     TCGv_i32 dst;
1802     TCGv_i64 src;
1803 
1804     nullify_over(ctx);
1805     src = load_frd(ra);
1806     dst = tcg_temp_new_i32();
1807 
1808     func(dst, tcg_env, src);
1809 
1810     save_frw_i32(rt, dst);
1811     return nullify_end(ctx);
1812 }
1813 
do_fop_ded(DisasContext * ctx,unsigned rt,unsigned ra,void (* func)(TCGv_i64,TCGv_env,TCGv_i64))1814 static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
1815                        void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
1816 {
1817     TCGv_i64 tmp;
1818 
1819     nullify_over(ctx);
1820     tmp = load_frd0(ra);
1821 
1822     func(tmp, tcg_env, tmp);
1823 
1824     save_frd(rt, tmp);
1825     return nullify_end(ctx);
1826 }
1827 
do_fop_dew(DisasContext * ctx,unsigned rt,unsigned ra,void (* func)(TCGv_i64,TCGv_env,TCGv_i32))1828 static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
1829                        void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
1830 {
1831     TCGv_i32 src;
1832     TCGv_i64 dst;
1833 
1834     nullify_over(ctx);
1835     src = load_frw0_i32(ra);
1836     dst = tcg_temp_new_i64();
1837 
1838     func(dst, tcg_env, src);
1839 
1840     save_frd(rt, dst);
1841     return nullify_end(ctx);
1842 }
1843 
do_fop_weww(DisasContext * ctx,unsigned rt,unsigned ra,unsigned rb,void (* func)(TCGv_i32,TCGv_env,TCGv_i32,TCGv_i32))1844 static bool do_fop_weww(DisasContext *ctx, unsigned rt,
1845                         unsigned ra, unsigned rb,
1846                         void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
1847 {
1848     TCGv_i32 a, b;
1849 
1850     nullify_over(ctx);
1851     a = load_frw0_i32(ra);
1852     b = load_frw0_i32(rb);
1853 
1854     func(a, tcg_env, a, b);
1855 
1856     save_frw_i32(rt, a);
1857     return nullify_end(ctx);
1858 }
1859 
do_fop_dedd(DisasContext * ctx,unsigned rt,unsigned ra,unsigned rb,void (* func)(TCGv_i64,TCGv_env,TCGv_i64,TCGv_i64))1860 static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
1861                         unsigned ra, unsigned rb,
1862                         void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
1863 {
1864     TCGv_i64 a, b;
1865 
1866     nullify_over(ctx);
1867     a = load_frd0(ra);
1868     b = load_frd0(rb);
1869 
1870     func(a, tcg_env, a, b);
1871 
1872     save_frd(rt, a);
1873     return nullify_end(ctx);
1874 }
1875 
1876 /* Emit an unconditional branch to a direct target, which may or may not
1877    have already had nullification handled.  */
do_dbranch(DisasContext * ctx,int64_t disp,unsigned link,bool is_n)1878 static bool do_dbranch(DisasContext *ctx, int64_t disp,
1879                        unsigned link, bool is_n)
1880 {
1881     ctx->iaq_j = iaqe_branchi(ctx, disp);
1882 
1883     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1884         install_link(ctx, link, false);
1885         if (is_n) {
1886             if (use_nullify_skip(ctx)) {
1887                 nullify_set(ctx, 0);
1888                 store_psw_xb(ctx, 0);
1889                 gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1890                 ctx->base.is_jmp = DISAS_NORETURN;
1891                 return true;
1892             }
1893             ctx->null_cond.c = TCG_COND_ALWAYS;
1894         }
1895         ctx->iaq_n = &ctx->iaq_j;
1896         ctx->psw_b_next = true;
1897     } else {
1898         nullify_over(ctx);
1899 
1900         install_link(ctx, link, false);
1901         if (is_n && use_nullify_skip(ctx)) {
1902             nullify_set(ctx, 0);
1903             store_psw_xb(ctx, 0);
1904             gen_goto_tb(ctx, 0, &ctx->iaq_j, NULL);
1905         } else {
1906             nullify_set(ctx, is_n);
1907             store_psw_xb(ctx, PSW_B);
1908             gen_goto_tb(ctx, 0, &ctx->iaq_b, &ctx->iaq_j);
1909         }
1910         nullify_end(ctx);
1911 
1912         nullify_set(ctx, 0);
1913         store_psw_xb(ctx, 0);
1914         gen_goto_tb(ctx, 1, &ctx->iaq_b, NULL);
1915         ctx->base.is_jmp = DISAS_NORETURN;
1916     }
1917     return true;
1918 }
1919 
1920 /* Emit a conditional branch to a direct target.  If the branch itself
1921    is nullified, we should have already used nullify_over.  */
do_cbranch(DisasContext * ctx,int64_t disp,bool is_n,DisasCond * cond)1922 static bool do_cbranch(DisasContext *ctx, int64_t disp, bool is_n,
1923                        DisasCond *cond)
1924 {
1925     DisasIAQE next;
1926     TCGLabel *taken = NULL;
1927     TCGCond c = cond->c;
1928     bool n;
1929 
1930     assert(ctx->null_cond.c == TCG_COND_NEVER);
1931 
1932     /* Handle TRUE and NEVER as direct branches.  */
1933     if (c == TCG_COND_ALWAYS) {
1934         return do_dbranch(ctx, disp, 0, is_n && disp >= 0);
1935     }
1936 
1937     taken = gen_new_label();
1938     tcg_gen_brcond_i64(c, cond->a0, cond->a1, taken);
1939 
1940     /* Not taken: Condition not satisfied; nullify on backward branches. */
1941     n = is_n && disp < 0;
1942     if (n && use_nullify_skip(ctx)) {
1943         nullify_set(ctx, 0);
1944         store_psw_xb(ctx, 0);
1945         next = iaqe_incr(&ctx->iaq_b, 4);
1946         gen_goto_tb(ctx, 0, &next, NULL);
1947     } else {
1948         if (!n && ctx->null_lab) {
1949             gen_set_label(ctx->null_lab);
1950             ctx->null_lab = NULL;
1951         }
1952         nullify_set(ctx, n);
1953         store_psw_xb(ctx, 0);
1954         gen_goto_tb(ctx, 0, &ctx->iaq_b, NULL);
1955     }
1956 
1957     gen_set_label(taken);
1958 
1959     /* Taken: Condition satisfied; nullify on forward branches.  */
1960     n = is_n && disp >= 0;
1961 
1962     next = iaqe_branchi(ctx, disp);
1963     if (n && use_nullify_skip(ctx)) {
1964         nullify_set(ctx, 0);
1965         store_psw_xb(ctx, 0);
1966         gen_goto_tb(ctx, 1, &next, NULL);
1967     } else {
1968         nullify_set(ctx, n);
1969         store_psw_xb(ctx, PSW_B);
1970         gen_goto_tb(ctx, 1, &ctx->iaq_b, &next);
1971     }
1972 
1973     /* Not taken: the branch itself was nullified.  */
1974     if (ctx->null_lab) {
1975         gen_set_label(ctx->null_lab);
1976         ctx->null_lab = NULL;
1977         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
1978     } else {
1979         ctx->base.is_jmp = DISAS_NORETURN;
1980     }
1981     return true;
1982 }
1983 
1984 /*
1985  * Emit an unconditional branch to an indirect target, in ctx->iaq_j.
1986  * This handles nullification of the branch itself.
1987  */
do_ibranch(DisasContext * ctx,unsigned link,bool with_sr0,bool is_n)1988 static bool do_ibranch(DisasContext *ctx, unsigned link,
1989                        bool with_sr0, bool is_n)
1990 {
1991     if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1992         install_link(ctx, link, with_sr0);
1993         if (is_n) {
1994             if (use_nullify_skip(ctx)) {
1995                 install_iaq_entries(ctx, &ctx->iaq_j, NULL);
1996                 nullify_set(ctx, 0);
1997                 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
1998                 return true;
1999             }
2000             ctx->null_cond.c = TCG_COND_ALWAYS;
2001         }
2002         ctx->iaq_n = &ctx->iaq_j;
2003         ctx->psw_b_next = true;
2004         return true;
2005     }
2006 
2007     nullify_over(ctx);
2008 
2009     install_link(ctx, link, with_sr0);
2010     if (is_n && use_nullify_skip(ctx)) {
2011         install_iaq_entries(ctx, &ctx->iaq_j, NULL);
2012         nullify_set(ctx, 0);
2013         store_psw_xb(ctx, 0);
2014     } else {
2015         install_iaq_entries(ctx, &ctx->iaq_b, &ctx->iaq_j);
2016         nullify_set(ctx, is_n);
2017         store_psw_xb(ctx, PSW_B);
2018     }
2019 
2020     tcg_gen_lookup_and_goto_ptr();
2021     ctx->base.is_jmp = DISAS_NORETURN;
2022     return nullify_end(ctx);
2023 }
2024 
2025 /* Implement
2026  *    if (IAOQ_Front{30..31} < GR[b]{30..31})
2027  *      IAOQ_Next{30..31} ← GR[b]{30..31};
2028  *    else
2029  *      IAOQ_Next{30..31} ← IAOQ_Front{30..31};
2030  * which keeps the privilege level from being increased.
2031  */
do_ibranch_priv(DisasContext * ctx,TCGv_i64 offset)2032 static TCGv_i64 do_ibranch_priv(DisasContext *ctx, TCGv_i64 offset)
2033 {
2034     TCGv_i64 dest = tcg_temp_new_i64();
2035     switch (ctx->privilege) {
2036     case 0:
2037         /* Privilege 0 is maximum and is allowed to decrease.  */
2038         tcg_gen_mov_i64(dest, offset);
2039         break;
2040     case 3:
2041         /* Privilege 3 is minimum and is never allowed to increase.  */
2042         tcg_gen_ori_i64(dest, offset, 3);
2043         break;
2044     default:
2045         tcg_gen_andi_i64(dest, offset, -4);
2046         tcg_gen_ori_i64(dest, dest, ctx->privilege);
2047         tcg_gen_umax_i64(dest, dest, offset);
2048         break;
2049     }
2050     return dest;
2051 }
2052 
2053 #ifdef CONFIG_USER_ONLY
2054 /* On Linux, page zero is normally marked execute only + gateway.
2055    Therefore normal read or write is supposed to fail, but specific
2056    offsets have kernel code mapped to raise permissions to implement
2057    system calls.  Handling this via an explicit check here, rather
2058    in than the "be disp(sr2,r0)" instruction that probably sent us
2059    here, is the easiest way to handle the branch delay slot on the
2060    aforementioned BE.  */
do_page_zero(DisasContext * ctx)2061 static void do_page_zero(DisasContext *ctx)
2062 {
2063     assert(ctx->iaq_f.disp == 0);
2064 
2065     /* If by some means we get here with PSW[N]=1, that implies that
2066        the B,GATE instruction would be skipped, and we'd fault on the
2067        next insn within the privileged page.  */
2068     switch (ctx->null_cond.c) {
2069     case TCG_COND_NEVER:
2070         break;
2071     case TCG_COND_ALWAYS:
2072         tcg_gen_movi_i64(cpu_psw_n, 0);
2073         goto do_sigill;
2074     default:
2075         /* Since this is always the first (and only) insn within the
2076            TB, we should know the state of PSW[N] from TB->FLAGS.  */
2077         g_assert_not_reached();
2078     }
2079 
2080     /* If PSW[B] is set, the B,GATE insn would trap. */
2081     if (ctx->psw_xb & PSW_B) {
2082         goto do_sigill;
2083     }
2084 
2085     switch (ctx->base.pc_first) {
2086     case 0x00: /* Null pointer call */
2087         gen_excp_1(EXCP_IMP);
2088         ctx->base.is_jmp = DISAS_NORETURN;
2089         break;
2090 
2091     case 0xb0: /* LWS */
2092         gen_excp_1(EXCP_SYSCALL_LWS);
2093         ctx->base.is_jmp = DISAS_NORETURN;
2094         break;
2095 
2096     case 0xe0: /* SET_THREAD_POINTER */
2097         {
2098             DisasIAQE next = { .base = tcg_temp_new_i64() };
2099 
2100             tcg_gen_st_i64(cpu_gr[26], tcg_env,
2101                            offsetof(CPUHPPAState, cr[27]));
2102             tcg_gen_ori_i64(next.base, cpu_gr[31], PRIV_USER);
2103             install_iaq_entries(ctx, &next, NULL);
2104             ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2105         }
2106         break;
2107 
2108     case 0x100: /* SYSCALL */
2109         gen_excp_1(EXCP_SYSCALL);
2110         ctx->base.is_jmp = DISAS_NORETURN;
2111         break;
2112 
2113     default:
2114     do_sigill:
2115         gen_excp_1(EXCP_ILL);
2116         ctx->base.is_jmp = DISAS_NORETURN;
2117         break;
2118     }
2119 }
2120 #endif
2121 
trans_nop(DisasContext * ctx,arg_nop * a)2122 static bool trans_nop(DisasContext *ctx, arg_nop *a)
2123 {
2124     ctx->null_cond = cond_make_f();
2125     return true;
2126 }
2127 
trans_break(DisasContext * ctx,arg_break * a)2128 static bool trans_break(DisasContext *ctx, arg_break *a)
2129 {
2130     return gen_excp_iir(ctx, EXCP_BREAK);
2131 }
2132 
trans_sync(DisasContext * ctx,arg_sync * a)2133 static bool trans_sync(DisasContext *ctx, arg_sync *a)
2134 {
2135     /* No point in nullifying the memory barrier.  */
2136     tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2137 
2138     ctx->null_cond = cond_make_f();
2139     return true;
2140 }
2141 
trans_mfia(DisasContext * ctx,arg_mfia * a)2142 static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
2143 {
2144     TCGv_i64 dest = dest_gpr(ctx, a->t);
2145 
2146     copy_iaoq_entry(ctx, dest, &ctx->iaq_f);
2147     tcg_gen_andi_i64(dest, dest, -4);
2148 
2149     save_gpr(ctx, a->t, dest);
2150     ctx->null_cond = cond_make_f();
2151     return true;
2152 }
2153 
trans_mfsp(DisasContext * ctx,arg_mfsp * a)2154 static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
2155 {
2156     unsigned rt = a->t;
2157     unsigned rs = a->sp;
2158     TCGv_i64 t0 = tcg_temp_new_i64();
2159 
2160     load_spr(ctx, t0, rs);
2161     tcg_gen_shri_i64(t0, t0, 32);
2162 
2163     save_gpr(ctx, rt, t0);
2164 
2165     ctx->null_cond = cond_make_f();
2166     return true;
2167 }
2168 
trans_mfctl(DisasContext * ctx,arg_mfctl * a)2169 static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
2170 {
2171     unsigned rt = a->t;
2172     unsigned ctl = a->r;
2173     TCGv_i64 tmp;
2174 
2175     switch (ctl) {
2176     case CR_SAR:
2177         if (a->e == 0) {
2178             /* MFSAR without ,W masks low 5 bits.  */
2179             tmp = dest_gpr(ctx, rt);
2180             tcg_gen_andi_i64(tmp, cpu_sar, 31);
2181             save_gpr(ctx, rt, tmp);
2182             goto done;
2183         }
2184         save_gpr(ctx, rt, cpu_sar);
2185         goto done;
2186     case CR_IT: /* Interval Timer */
2187         /* FIXME: Respect PSW_S bit.  */
2188         nullify_over(ctx);
2189         tmp = dest_gpr(ctx, rt);
2190         if (translator_io_start(&ctx->base)) {
2191             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2192         }
2193         gen_helper_read_interval_timer(tmp);
2194         save_gpr(ctx, rt, tmp);
2195         return nullify_end(ctx);
2196     case 26:
2197     case 27:
2198         break;
2199     default:
2200         /* All other control registers are privileged.  */
2201         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2202         break;
2203     }
2204 
2205     tmp = tcg_temp_new_i64();
2206     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2207     save_gpr(ctx, rt, tmp);
2208 
2209  done:
2210     ctx->null_cond = cond_make_f();
2211     return true;
2212 }
2213 
trans_mtsp(DisasContext * ctx,arg_mtsp * a)2214 static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
2215 {
2216     unsigned rr = a->r;
2217     unsigned rs = a->sp;
2218     TCGv_i64 tmp;
2219 
2220     if (rs >= 5) {
2221         CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2222     }
2223     nullify_over(ctx);
2224 
2225     tmp = tcg_temp_new_i64();
2226     tcg_gen_shli_i64(tmp, load_gpr(ctx, rr), 32);
2227 
2228     if (rs >= 4) {
2229         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, sr[rs]));
2230         ctx->tb_flags &= ~TB_FLAG_SR_SAME;
2231     } else {
2232         tcg_gen_mov_i64(cpu_sr[rs], tmp);
2233     }
2234 
2235     return nullify_end(ctx);
2236 }
2237 
trans_mtctl(DisasContext * ctx,arg_mtctl * a)2238 static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
2239 {
2240     unsigned ctl = a->t;
2241     TCGv_i64 reg;
2242     TCGv_i64 tmp;
2243 
2244     if (ctl == CR_SAR) {
2245         reg = load_gpr(ctx, a->r);
2246         tmp = tcg_temp_new_i64();
2247         tcg_gen_andi_i64(tmp, reg, ctx->is_pa20 ? 63 : 31);
2248         save_or_nullify(ctx, cpu_sar, tmp);
2249 
2250         ctx->null_cond = cond_make_f();
2251         return true;
2252     }
2253 
2254     /* All other control registers are privileged or read-only.  */
2255     CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2256 
2257 #ifndef CONFIG_USER_ONLY
2258     nullify_over(ctx);
2259 
2260     if (ctx->is_pa20) {
2261         reg = load_gpr(ctx, a->r);
2262     } else {
2263         reg = tcg_temp_new_i64();
2264         tcg_gen_ext32u_i64(reg, load_gpr(ctx, a->r));
2265     }
2266 
2267     switch (ctl) {
2268     case CR_IT:
2269         if (translator_io_start(&ctx->base)) {
2270             ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2271         }
2272         gen_helper_write_interval_timer(tcg_env, reg);
2273         break;
2274     case CR_EIRR:
2275         /* Helper modifies interrupt lines and is therefore IO. */
2276         translator_io_start(&ctx->base);
2277         gen_helper_write_eirr(tcg_env, reg);
2278         /* Exit to re-evaluate interrupts in the main loop. */
2279         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2280         break;
2281 
2282     case CR_IIASQ:
2283     case CR_IIAOQ:
2284         /* FIXME: Respect PSW_Q bit */
2285         /* The write advances the queue and stores to the back element.  */
2286         tmp = tcg_temp_new_i64();
2287         tcg_gen_ld_i64(tmp, tcg_env,
2288                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2289         tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2290         tcg_gen_st_i64(reg, tcg_env,
2291                        offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2292         break;
2293 
2294     case CR_PID1:
2295     case CR_PID2:
2296     case CR_PID3:
2297     case CR_PID4:
2298         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2299 #ifndef CONFIG_USER_ONLY
2300         gen_helper_change_prot_id(tcg_env);
2301 #endif
2302         break;
2303 
2304     case CR_EIEM:
2305         /* Exit to re-evaluate interrupts in the main loop. */
2306         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2307         /* FALLTHRU */
2308     default:
2309         tcg_gen_st_i64(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2310         break;
2311     }
2312     return nullify_end(ctx);
2313 #endif
2314 }
2315 
trans_mtsarcm(DisasContext * ctx,arg_mtsarcm * a)2316 static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
2317 {
2318     TCGv_i64 tmp = tcg_temp_new_i64();
2319 
2320     tcg_gen_not_i64(tmp, load_gpr(ctx, a->r));
2321     tcg_gen_andi_i64(tmp, tmp, ctx->is_pa20 ? 63 : 31);
2322     save_or_nullify(ctx, cpu_sar, tmp);
2323 
2324     ctx->null_cond = cond_make_f();
2325     return true;
2326 }
2327 
trans_ldsid(DisasContext * ctx,arg_ldsid * a)2328 static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
2329 {
2330     TCGv_i64 dest = dest_gpr(ctx, a->t);
2331 
2332 #ifdef CONFIG_USER_ONLY
2333     /* We don't implement space registers in user mode. */
2334     tcg_gen_movi_i64(dest, 0);
2335 #else
2336     tcg_gen_mov_i64(dest, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2337     tcg_gen_shri_i64(dest, dest, 32);
2338 #endif
2339     save_gpr(ctx, a->t, dest);
2340 
2341     ctx->null_cond = cond_make_f();
2342     return true;
2343 }
2344 
trans_rsm(DisasContext * ctx,arg_rsm * a)2345 static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
2346 {
2347 #ifdef CONFIG_USER_ONLY
2348     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2349 #else
2350     TCGv_i64 tmp;
2351 
2352     /* HP-UX 11i and HP ODE use rsm for read-access to PSW */
2353     if (a->i) {
2354         CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2355     }
2356 
2357     nullify_over(ctx);
2358 
2359     tmp = tcg_temp_new_i64();
2360     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2361     tcg_gen_andi_i64(tmp, tmp, ~a->i);
2362     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2363     save_gpr(ctx, a->t, tmp);
2364 
2365     /* Exit the TB to recognize new interrupts, e.g. PSW_M.  */
2366     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2367     return nullify_end(ctx);
2368 #endif
2369 }
2370 
trans_ssm(DisasContext * ctx,arg_ssm * a)2371 static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
2372 {
2373     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2374 #ifndef CONFIG_USER_ONLY
2375     TCGv_i64 tmp;
2376 
2377     nullify_over(ctx);
2378 
2379     tmp = tcg_temp_new_i64();
2380     tcg_gen_ld_i64(tmp, tcg_env, offsetof(CPUHPPAState, psw));
2381     tcg_gen_ori_i64(tmp, tmp, a->i);
2382     gen_helper_swap_system_mask(tmp, tcg_env, tmp);
2383     save_gpr(ctx, a->t, tmp);
2384 
2385     /* Exit the TB to recognize new interrupts, e.g. PSW_I.  */
2386     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2387     return nullify_end(ctx);
2388 #endif
2389 }
2390 
trans_mtsm(DisasContext * ctx,arg_mtsm * a)2391 static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
2392 {
2393     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2394 #ifndef CONFIG_USER_ONLY
2395     TCGv_i64 tmp, reg;
2396     nullify_over(ctx);
2397 
2398     reg = load_gpr(ctx, a->r);
2399     tmp = tcg_temp_new_i64();
2400     gen_helper_swap_system_mask(tmp, tcg_env, reg);
2401 
2402     /* Exit the TB to recognize new interrupts.  */
2403     ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2404     return nullify_end(ctx);
2405 #endif
2406 }
2407 
do_rfi(DisasContext * ctx,bool rfi_r)2408 static bool do_rfi(DisasContext *ctx, bool rfi_r)
2409 {
2410     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2411 #ifndef CONFIG_USER_ONLY
2412     nullify_over(ctx);
2413 
2414     if (rfi_r) {
2415         gen_helper_rfi_r(tcg_env);
2416     } else {
2417         gen_helper_rfi(tcg_env);
2418     }
2419     /* Exit the TB to recognize new interrupts.  */
2420     tcg_gen_exit_tb(NULL, 0);
2421     ctx->base.is_jmp = DISAS_NORETURN;
2422 
2423     return nullify_end(ctx);
2424 #endif
2425 }
2426 
trans_rfi(DisasContext * ctx,arg_rfi * a)2427 static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2428 {
2429     return do_rfi(ctx, false);
2430 }
2431 
trans_rfi_r(DisasContext * ctx,arg_rfi_r * a)2432 static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2433 {
2434     return do_rfi(ctx, true);
2435 }
2436 
trans_halt(DisasContext * ctx,arg_halt * a)2437 static bool trans_halt(DisasContext *ctx, arg_halt *a)
2438 {
2439     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2440 #ifndef CONFIG_USER_ONLY
2441     set_psw_xb(ctx, 0);
2442     nullify_over(ctx);
2443     gen_helper_halt(tcg_env);
2444     ctx->base.is_jmp = DISAS_NORETURN;
2445     return nullify_end(ctx);
2446 #endif
2447 }
2448 
trans_reset(DisasContext * ctx,arg_reset * a)2449 static bool trans_reset(DisasContext *ctx, arg_reset *a)
2450 {
2451     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2452 #ifndef CONFIG_USER_ONLY
2453     set_psw_xb(ctx, 0);
2454     nullify_over(ctx);
2455     gen_helper_reset(tcg_env);
2456     ctx->base.is_jmp = DISAS_NORETURN;
2457     return nullify_end(ctx);
2458 #endif
2459 }
2460 
do_getshadowregs(DisasContext * ctx)2461 static bool do_getshadowregs(DisasContext *ctx)
2462 {
2463     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2464     nullify_over(ctx);
2465     tcg_gen_ld_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2466     tcg_gen_ld_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2467     tcg_gen_ld_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2468     tcg_gen_ld_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2469     tcg_gen_ld_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2470     tcg_gen_ld_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2471     tcg_gen_ld_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2472     return nullify_end(ctx);
2473 }
2474 
do_putshadowregs(DisasContext * ctx)2475 static bool do_putshadowregs(DisasContext *ctx)
2476 {
2477     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2478     nullify_over(ctx);
2479     tcg_gen_st_i64(cpu_gr[1], tcg_env, offsetof(CPUHPPAState, shadow[0]));
2480     tcg_gen_st_i64(cpu_gr[8], tcg_env, offsetof(CPUHPPAState, shadow[1]));
2481     tcg_gen_st_i64(cpu_gr[9], tcg_env, offsetof(CPUHPPAState, shadow[2]));
2482     tcg_gen_st_i64(cpu_gr[16], tcg_env, offsetof(CPUHPPAState, shadow[3]));
2483     tcg_gen_st_i64(cpu_gr[17], tcg_env, offsetof(CPUHPPAState, shadow[4]));
2484     tcg_gen_st_i64(cpu_gr[24], tcg_env, offsetof(CPUHPPAState, shadow[5]));
2485     tcg_gen_st_i64(cpu_gr[25], tcg_env, offsetof(CPUHPPAState, shadow[6]));
2486     return nullify_end(ctx);
2487 }
2488 
trans_getshadowregs(DisasContext * ctx,arg_getshadowregs * a)2489 static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2490 {
2491     return do_getshadowregs(ctx);
2492 }
2493 
trans_nop_addrx(DisasContext * ctx,arg_ldst * a)2494 static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
2495 {
2496     if (a->m) {
2497         TCGv_i64 dest = dest_gpr(ctx, a->b);
2498         TCGv_i64 src1 = load_gpr(ctx, a->b);
2499         TCGv_i64 src2 = load_gpr(ctx, a->x);
2500 
2501         /* The only thing we need to do is the base register modification.  */
2502         tcg_gen_add_i64(dest, src1, src2);
2503         save_gpr(ctx, a->b, dest);
2504     }
2505     ctx->null_cond = cond_make_f();
2506     return true;
2507 }
2508 
trans_fic(DisasContext * ctx,arg_ldst * a)2509 static bool trans_fic(DisasContext *ctx, arg_ldst *a)
2510 {
2511     /* End TB for flush instruction cache, so we pick up new insns. */
2512     ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2513     return trans_nop_addrx(ctx, a);
2514 }
2515 
trans_probe(DisasContext * ctx,arg_probe * a)2516 static bool trans_probe(DisasContext *ctx, arg_probe *a)
2517 {
2518     TCGv_i64 dest, ofs;
2519     TCGv_i32 level, want;
2520     TCGv_i64 addr;
2521 
2522     nullify_over(ctx);
2523 
2524     dest = dest_gpr(ctx, a->t);
2525     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2526 
2527     if (a->imm) {
2528         level = tcg_constant_i32(a->ri & 3);
2529     } else {
2530         level = tcg_temp_new_i32();
2531         tcg_gen_extrl_i64_i32(level, load_gpr(ctx, a->ri));
2532         tcg_gen_andi_i32(level, level, 3);
2533     }
2534     want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
2535 
2536     gen_helper_probe(dest, tcg_env, addr, level, want);
2537 
2538     save_gpr(ctx, a->t, dest);
2539     return nullify_end(ctx);
2540 }
2541 
trans_ixtlbx(DisasContext * ctx,arg_ixtlbx * a)2542 static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
2543 {
2544     if (ctx->is_pa20) {
2545         return false;
2546     }
2547     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2548 #ifndef CONFIG_USER_ONLY
2549     TCGv_i64 addr;
2550     TCGv_i64 ofs, reg;
2551 
2552     nullify_over(ctx);
2553 
2554     form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2555     reg = load_gpr(ctx, a->r);
2556     if (a->addr) {
2557         gen_helper_itlba_pa11(tcg_env, addr, reg);
2558     } else {
2559         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2560     }
2561 
2562     /* Exit TB for TLB change if mmu is enabled.  */
2563     if (ctx->tb_flags & PSW_C) {
2564         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2565     }
2566     return nullify_end(ctx);
2567 #endif
2568 }
2569 
do_pxtlb(DisasContext * ctx,arg_ldst * a,bool local)2570 static bool do_pxtlb(DisasContext *ctx, arg_ldst *a, bool local)
2571 {
2572     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2573 #ifndef CONFIG_USER_ONLY
2574     TCGv_i64 addr;
2575     TCGv_i64 ofs;
2576 
2577     nullify_over(ctx);
2578 
2579     form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2580 
2581     /*
2582      * Page align now, rather than later, so that we can add in the
2583      * page_size field from pa2.0 from the low 4 bits of GR[b].
2584      */
2585     tcg_gen_andi_i64(addr, addr, TARGET_PAGE_MASK);
2586     if (ctx->is_pa20) {
2587         tcg_gen_deposit_i64(addr, addr, load_gpr(ctx, a->b), 0, 4);
2588     }
2589 
2590     if (local) {
2591         gen_helper_ptlb_l(tcg_env, addr);
2592     } else {
2593         gen_helper_ptlb(tcg_env, addr);
2594     }
2595 
2596     if (a->m) {
2597         save_gpr(ctx, a->b, ofs);
2598     }
2599 
2600     /* Exit TB for TLB change if mmu is enabled.  */
2601     if (ctx->tb_flags & PSW_C) {
2602         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2603     }
2604     return nullify_end(ctx);
2605 #endif
2606 }
2607 
trans_pxtlb(DisasContext * ctx,arg_ldst * a)2608 static bool trans_pxtlb(DisasContext *ctx, arg_ldst *a)
2609 {
2610     return do_pxtlb(ctx, a, false);
2611 }
2612 
trans_pxtlb_l(DisasContext * ctx,arg_ldst * a)2613 static bool trans_pxtlb_l(DisasContext *ctx, arg_ldst *a)
2614 {
2615     return ctx->is_pa20 && do_pxtlb(ctx, a, true);
2616 }
2617 
trans_pxtlbe(DisasContext * ctx,arg_ldst * a)2618 static bool trans_pxtlbe(DisasContext *ctx, arg_ldst *a)
2619 {
2620     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2621 #ifndef CONFIG_USER_ONLY
2622     nullify_over(ctx);
2623 
2624     trans_nop_addrx(ctx, a);
2625     gen_helper_ptlbe(tcg_env);
2626 
2627     /* Exit TB for TLB change if mmu is enabled.  */
2628     if (ctx->tb_flags & PSW_C) {
2629         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2630     }
2631     return nullify_end(ctx);
2632 #endif
2633 }
2634 
2635 /*
2636  * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2637  * See
2638  *     https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2639  *     page 13-9 (195/206)
2640  */
trans_ixtlbxf(DisasContext * ctx,arg_ixtlbxf * a)2641 static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2642 {
2643     if (ctx->is_pa20) {
2644         return false;
2645     }
2646     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2647 #ifndef CONFIG_USER_ONLY
2648     TCGv_i64 addr, atl, stl;
2649     TCGv_i64 reg;
2650 
2651     nullify_over(ctx);
2652 
2653     /*
2654      * FIXME:
2655      *  if (not (pcxl or pcxl2))
2656      *    return gen_illegal(ctx);
2657      */
2658 
2659     atl = tcg_temp_new_i64();
2660     stl = tcg_temp_new_i64();
2661     addr = tcg_temp_new_i64();
2662 
2663     tcg_gen_ld32u_i64(stl, tcg_env,
2664                       a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2665                       : offsetof(CPUHPPAState, cr[CR_IIASQ]));
2666     tcg_gen_ld32u_i64(atl, tcg_env,
2667                       a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2668                       : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2669     tcg_gen_shli_i64(stl, stl, 32);
2670     tcg_gen_or_i64(addr, atl, stl);
2671 
2672     reg = load_gpr(ctx, a->r);
2673     if (a->addr) {
2674         gen_helper_itlba_pa11(tcg_env, addr, reg);
2675     } else {
2676         gen_helper_itlbp_pa11(tcg_env, addr, reg);
2677     }
2678 
2679     /* Exit TB for TLB change if mmu is enabled.  */
2680     if (ctx->tb_flags & PSW_C) {
2681         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2682     }
2683     return nullify_end(ctx);
2684 #endif
2685 }
2686 
trans_ixtlbt(DisasContext * ctx,arg_ixtlbt * a)2687 static bool trans_ixtlbt(DisasContext *ctx, arg_ixtlbt *a)
2688 {
2689     if (!ctx->is_pa20) {
2690         return false;
2691     }
2692     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2693 #ifndef CONFIG_USER_ONLY
2694     nullify_over(ctx);
2695     {
2696         TCGv_i64 src1 = load_gpr(ctx, a->r1);
2697         TCGv_i64 src2 = load_gpr(ctx, a->r2);
2698 
2699         if (a->data) {
2700             gen_helper_idtlbt_pa20(tcg_env, src1, src2);
2701         } else {
2702             gen_helper_iitlbt_pa20(tcg_env, src1, src2);
2703         }
2704     }
2705     /* Exit TB for TLB change if mmu is enabled.  */
2706     if (ctx->tb_flags & PSW_C) {
2707         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2708     }
2709     return nullify_end(ctx);
2710 #endif
2711 }
2712 
trans_lpa(DisasContext * ctx,arg_ldst * a)2713 static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2714 {
2715     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2716 #ifndef CONFIG_USER_ONLY
2717     TCGv_i64 vaddr;
2718     TCGv_i64 ofs, paddr;
2719 
2720     nullify_over(ctx);
2721 
2722     form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2723 
2724     paddr = tcg_temp_new_i64();
2725     gen_helper_lpa(paddr, tcg_env, vaddr);
2726 
2727     /* Note that physical address result overrides base modification.  */
2728     if (a->m) {
2729         save_gpr(ctx, a->b, ofs);
2730     }
2731     save_gpr(ctx, a->t, paddr);
2732 
2733     return nullify_end(ctx);
2734 #endif
2735 }
2736 
trans_lci(DisasContext * ctx,arg_lci * a)2737 static bool trans_lci(DisasContext *ctx, arg_lci *a)
2738 {
2739     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2740 
2741     /* The Coherence Index is an implementation-defined function of the
2742        physical address.  Two addresses with the same CI have a coherent
2743        view of the cache.  Our implementation is to return 0 for all,
2744        since the entire address space is coherent.  */
2745     save_gpr(ctx, a->t, ctx->zero);
2746 
2747     ctx->null_cond = cond_make_f();
2748     return true;
2749 }
2750 
trans_add(DisasContext * ctx,arg_rrr_cf_d_sh * a)2751 static bool trans_add(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2752 {
2753     return do_add_reg(ctx, a, false, false, false, false);
2754 }
2755 
trans_add_l(DisasContext * ctx,arg_rrr_cf_d_sh * a)2756 static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2757 {
2758     return do_add_reg(ctx, a, true, false, false, false);
2759 }
2760 
trans_add_tsv(DisasContext * ctx,arg_rrr_cf_d_sh * a)2761 static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2762 {
2763     return do_add_reg(ctx, a, false, true, false, false);
2764 }
2765 
trans_add_c(DisasContext * ctx,arg_rrr_cf_d_sh * a)2766 static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2767 {
2768     return do_add_reg(ctx, a, false, false, false, true);
2769 }
2770 
trans_add_c_tsv(DisasContext * ctx,arg_rrr_cf_d_sh * a)2771 static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_d_sh *a)
2772 {
2773     return do_add_reg(ctx, a, false, true, false, true);
2774 }
2775 
trans_sub(DisasContext * ctx,arg_rrr_cf_d * a)2776 static bool trans_sub(DisasContext *ctx, arg_rrr_cf_d *a)
2777 {
2778     return do_sub_reg(ctx, a, false, false, false);
2779 }
2780 
trans_sub_tsv(DisasContext * ctx,arg_rrr_cf_d * a)2781 static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2782 {
2783     return do_sub_reg(ctx, a, true, false, false);
2784 }
2785 
trans_sub_tc(DisasContext * ctx,arg_rrr_cf_d * a)2786 static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2787 {
2788     return do_sub_reg(ctx, a, false, false, true);
2789 }
2790 
trans_sub_tsv_tc(DisasContext * ctx,arg_rrr_cf_d * a)2791 static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2792 {
2793     return do_sub_reg(ctx, a, true, false, true);
2794 }
2795 
trans_sub_b(DisasContext * ctx,arg_rrr_cf_d * a)2796 static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf_d *a)
2797 {
2798     return do_sub_reg(ctx, a, false, true, false);
2799 }
2800 
trans_sub_b_tsv(DisasContext * ctx,arg_rrr_cf_d * a)2801 static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf_d *a)
2802 {
2803     return do_sub_reg(ctx, a, true, true, false);
2804 }
2805 
trans_andcm(DisasContext * ctx,arg_rrr_cf_d * a)2806 static bool trans_andcm(DisasContext *ctx, arg_rrr_cf_d *a)
2807 {
2808     return do_log_reg(ctx, a, tcg_gen_andc_i64);
2809 }
2810 
trans_and(DisasContext * ctx,arg_rrr_cf_d * a)2811 static bool trans_and(DisasContext *ctx, arg_rrr_cf_d *a)
2812 {
2813     return do_log_reg(ctx, a, tcg_gen_and_i64);
2814 }
2815 
trans_or(DisasContext * ctx,arg_rrr_cf_d * a)2816 static bool trans_or(DisasContext *ctx, arg_rrr_cf_d *a)
2817 {
2818     if (a->cf == 0) {
2819         unsigned r2 = a->r2;
2820         unsigned r1 = a->r1;
2821         unsigned rt = a->t;
2822 
2823         if (rt == 0) { /* NOP */
2824             ctx->null_cond = cond_make_f();
2825             return true;
2826         }
2827         if (r2 == 0) { /* COPY */
2828             if (r1 == 0) {
2829                 TCGv_i64 dest = dest_gpr(ctx, rt);
2830                 tcg_gen_movi_i64(dest, 0);
2831                 save_gpr(ctx, rt, dest);
2832             } else {
2833                 save_gpr(ctx, rt, cpu_gr[r1]);
2834             }
2835             ctx->null_cond = cond_make_f();
2836             return true;
2837         }
2838 #ifndef CONFIG_USER_ONLY
2839         /* These are QEMU extensions and are nops in the real architecture:
2840          *
2841          * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2842          * or %r31,%r31,%r31 -- death loop; offline cpu
2843          *                      currently implemented as idle.
2844          */
2845         if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
2846             /* No need to check for supervisor, as userland can only pause
2847                until the next timer interrupt.  */
2848 
2849             set_psw_xb(ctx, 0);
2850 
2851             nullify_over(ctx);
2852 
2853             /* Advance the instruction queue.  */
2854             install_iaq_entries(ctx, &ctx->iaq_b, NULL);
2855             nullify_set(ctx, 0);
2856 
2857             /* Tell the qemu main loop to halt until this cpu has work.  */
2858             tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
2859                            offsetof(CPUState, halted) - offsetof(HPPACPU, env));
2860             gen_excp_1(EXCP_HALTED);
2861             ctx->base.is_jmp = DISAS_NORETURN;
2862 
2863             return nullify_end(ctx);
2864         }
2865 #endif
2866     }
2867     return do_log_reg(ctx, a, tcg_gen_or_i64);
2868 }
2869 
trans_xor(DisasContext * ctx,arg_rrr_cf_d * a)2870 static bool trans_xor(DisasContext *ctx, arg_rrr_cf_d *a)
2871 {
2872     return do_log_reg(ctx, a, tcg_gen_xor_i64);
2873 }
2874 
trans_cmpclr(DisasContext * ctx,arg_rrr_cf_d * a)2875 static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf_d *a)
2876 {
2877     TCGv_i64 tcg_r1, tcg_r2;
2878 
2879     if (a->cf) {
2880         nullify_over(ctx);
2881     }
2882     tcg_r1 = load_gpr(ctx, a->r1);
2883     tcg_r2 = load_gpr(ctx, a->r2);
2884     do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf, a->d);
2885     return nullify_end(ctx);
2886 }
2887 
trans_uxor(DisasContext * ctx,arg_rrr_cf_d * a)2888 static bool trans_uxor(DisasContext *ctx, arg_rrr_cf_d *a)
2889 {
2890     TCGv_i64 tcg_r1, tcg_r2, dest;
2891 
2892     if (a->cf) {
2893         nullify_over(ctx);
2894     }
2895 
2896     tcg_r1 = load_gpr(ctx, a->r1);
2897     tcg_r2 = load_gpr(ctx, a->r2);
2898     dest = dest_gpr(ctx, a->t);
2899 
2900     tcg_gen_xor_i64(dest, tcg_r1, tcg_r2);
2901     save_gpr(ctx, a->t, dest);
2902 
2903     ctx->null_cond = do_unit_zero_cond(a->cf, a->d, dest);
2904     return nullify_end(ctx);
2905 }
2906 
do_uaddcm(DisasContext * ctx,arg_rrr_cf_d * a,bool is_tc)2907 static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a, bool is_tc)
2908 {
2909     TCGv_i64 tcg_r1, tcg_r2, tmp;
2910 
2911     if (a->cf == 0) {
2912         tcg_r2 = load_gpr(ctx, a->r2);
2913         tmp = dest_gpr(ctx, a->t);
2914 
2915         if (a->r1 == 0) {
2916             /* UADDCM r0,src,dst is the common idiom for dst = ~src. */
2917             tcg_gen_not_i64(tmp, tcg_r2);
2918         } else {
2919             /*
2920              * Recall that r1 - r2 == r1 + ~r2 + 1.
2921              * Thus r1 + ~r2 == r1 - r2 - 1,
2922              * which does not require an extra temporary.
2923              */
2924             tcg_r1 = load_gpr(ctx, a->r1);
2925             tcg_gen_sub_i64(tmp, tcg_r1, tcg_r2);
2926             tcg_gen_subi_i64(tmp, tmp, 1);
2927         }
2928         save_gpr(ctx, a->t, tmp);
2929         ctx->null_cond = cond_make_f();
2930         return true;
2931     }
2932 
2933     nullify_over(ctx);
2934     tcg_r1 = load_gpr(ctx, a->r1);
2935     tcg_r2 = load_gpr(ctx, a->r2);
2936     tmp = tcg_temp_new_i64();
2937     tcg_gen_not_i64(tmp, tcg_r2);
2938     do_unit_addsub(ctx, a->t, tcg_r1, tmp, a->cf, a->d, is_tc, true);
2939     return nullify_end(ctx);
2940 }
2941 
trans_uaddcm(DisasContext * ctx,arg_rrr_cf_d * a)2942 static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf_d *a)
2943 {
2944     return do_uaddcm(ctx, a, false);
2945 }
2946 
trans_uaddcm_tc(DisasContext * ctx,arg_rrr_cf_d * a)2947 static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf_d *a)
2948 {
2949     return do_uaddcm(ctx, a, true);
2950 }
2951 
do_dcor(DisasContext * ctx,arg_rr_cf_d * a,bool is_i)2952 static bool do_dcor(DisasContext *ctx, arg_rr_cf_d *a, bool is_i)
2953 {
2954     TCGv_i64 tmp;
2955 
2956     nullify_over(ctx);
2957 
2958     tmp = tcg_temp_new_i64();
2959     tcg_gen_extract2_i64(tmp, cpu_psw_cb, cpu_psw_cb_msb, 4);
2960     if (!is_i) {
2961         tcg_gen_not_i64(tmp, tmp);
2962     }
2963     tcg_gen_andi_i64(tmp, tmp, (uint64_t)0x1111111111111111ull);
2964     tcg_gen_muli_i64(tmp, tmp, 6);
2965     do_unit_addsub(ctx, a->t, load_gpr(ctx, a->r), tmp,
2966                    a->cf, a->d, false, is_i);
2967     return nullify_end(ctx);
2968 }
2969 
trans_dcor(DisasContext * ctx,arg_rr_cf_d * a)2970 static bool trans_dcor(DisasContext *ctx, arg_rr_cf_d *a)
2971 {
2972     return do_dcor(ctx, a, false);
2973 }
2974 
trans_dcor_i(DisasContext * ctx,arg_rr_cf_d * a)2975 static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf_d *a)
2976 {
2977     return do_dcor(ctx, a, true);
2978 }
2979 
trans_ds(DisasContext * ctx,arg_rrr_cf * a)2980 static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
2981 {
2982     TCGv_i64 dest, add1, add2, addc, in1, in2;
2983 
2984     nullify_over(ctx);
2985 
2986     in1 = load_gpr(ctx, a->r1);
2987     in2 = load_gpr(ctx, a->r2);
2988 
2989     add1 = tcg_temp_new_i64();
2990     add2 = tcg_temp_new_i64();
2991     addc = tcg_temp_new_i64();
2992     dest = tcg_temp_new_i64();
2993 
2994     /* Form R1 << 1 | PSW[CB]{8}.  */
2995     tcg_gen_add_i64(add1, in1, in1);
2996     tcg_gen_add_i64(add1, add1, get_psw_carry(ctx, false));
2997 
2998     /*
2999      * Add or subtract R2, depending on PSW[V].  Proper computation of
3000      * carry requires that we subtract via + ~R2 + 1, as described in
3001      * the manual.  By extracting and masking V, we can produce the
3002      * proper inputs to the addition without movcond.
3003      */
3004     tcg_gen_sextract_i64(addc, cpu_psw_v, 31, 1);
3005     tcg_gen_xor_i64(add2, in2, addc);
3006     tcg_gen_andi_i64(addc, addc, 1);
3007 
3008     tcg_gen_addcio_i64(dest, cpu_psw_cb_msb, add1, add2, addc);
3009 
3010     /* Write back the result register.  */
3011     save_gpr(ctx, a->t, dest);
3012 
3013     /* Write back PSW[CB].  */
3014     tcg_gen_xor_i64(cpu_psw_cb, add1, add2);
3015     tcg_gen_xor_i64(cpu_psw_cb, cpu_psw_cb, dest);
3016 
3017     /*
3018      * Write back PSW[V] for the division step.
3019      * Shift cb{8} from where it lives in bit 32 to bit 31,
3020      * so that it overlaps r2{32} in bit 31.
3021      */
3022     tcg_gen_shri_i64(cpu_psw_v, cpu_psw_cb, 1);
3023     tcg_gen_xor_i64(cpu_psw_v, cpu_psw_v, in2);
3024 
3025     /* Install the new nullification.  */
3026     if (a->cf) {
3027         TCGv_i64 sv = NULL, uv = NULL;
3028         if (cond_need_sv(a->cf >> 1)) {
3029             sv = do_add_sv(ctx, dest, add1, add2, in1, 1, false);
3030         } else if (cond_need_cb(a->cf >> 1)) {
3031             uv = do_add_uv(ctx, cpu_psw_cb, NULL, in1, 1, false);
3032         }
3033         ctx->null_cond = do_cond(ctx, a->cf, false, dest, uv, sv);
3034     }
3035 
3036     return nullify_end(ctx);
3037 }
3038 
trans_addi(DisasContext * ctx,arg_rri_cf * a)3039 static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
3040 {
3041     return do_add_imm(ctx, a, false, false);
3042 }
3043 
trans_addi_tsv(DisasContext * ctx,arg_rri_cf * a)3044 static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
3045 {
3046     return do_add_imm(ctx, a, true, false);
3047 }
3048 
trans_addi_tc(DisasContext * ctx,arg_rri_cf * a)3049 static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
3050 {
3051     return do_add_imm(ctx, a, false, true);
3052 }
3053 
trans_addi_tc_tsv(DisasContext * ctx,arg_rri_cf * a)3054 static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
3055 {
3056     return do_add_imm(ctx, a, true, true);
3057 }
3058 
trans_subi(DisasContext * ctx,arg_rri_cf * a)3059 static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
3060 {
3061     return do_sub_imm(ctx, a, false);
3062 }
3063 
trans_subi_tsv(DisasContext * ctx,arg_rri_cf * a)3064 static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
3065 {
3066     return do_sub_imm(ctx, a, true);
3067 }
3068 
trans_cmpiclr(DisasContext * ctx,arg_rri_cf_d * a)3069 static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf_d *a)
3070 {
3071     TCGv_i64 tcg_im, tcg_r2;
3072 
3073     if (a->cf) {
3074         nullify_over(ctx);
3075     }
3076 
3077     tcg_im = tcg_constant_i64(a->i);
3078     tcg_r2 = load_gpr(ctx, a->r);
3079     do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf, a->d);
3080 
3081     return nullify_end(ctx);
3082 }
3083 
do_multimedia(DisasContext * ctx,arg_rrr * a,void (* fn)(TCGv_i64,TCGv_i64,TCGv_i64))3084 static bool do_multimedia(DisasContext *ctx, arg_rrr *a,
3085                           void (*fn)(TCGv_i64, TCGv_i64, TCGv_i64))
3086 {
3087     TCGv_i64 r1, r2, dest;
3088 
3089     if (!ctx->is_pa20) {
3090         return false;
3091     }
3092 
3093     nullify_over(ctx);
3094 
3095     r1 = load_gpr(ctx, a->r1);
3096     r2 = load_gpr(ctx, a->r2);
3097     dest = dest_gpr(ctx, a->t);
3098 
3099     fn(dest, r1, r2);
3100     save_gpr(ctx, a->t, dest);
3101 
3102     return nullify_end(ctx);
3103 }
3104 
do_multimedia_sh(DisasContext * ctx,arg_rri * a,void (* fn)(TCGv_i64,TCGv_i64,int64_t))3105 static bool do_multimedia_sh(DisasContext *ctx, arg_rri *a,
3106                              void (*fn)(TCGv_i64, TCGv_i64, int64_t))
3107 {
3108     TCGv_i64 r, dest;
3109 
3110     if (!ctx->is_pa20) {
3111         return false;
3112     }
3113 
3114     nullify_over(ctx);
3115 
3116     r = load_gpr(ctx, a->r);
3117     dest = dest_gpr(ctx, a->t);
3118 
3119     fn(dest, r, a->i);
3120     save_gpr(ctx, a->t, dest);
3121 
3122     return nullify_end(ctx);
3123 }
3124 
do_multimedia_shadd(DisasContext * ctx,arg_rrr_sh * a,void (* fn)(TCGv_i64,TCGv_i64,TCGv_i64,TCGv_i32))3125 static bool do_multimedia_shadd(DisasContext *ctx, arg_rrr_sh *a,
3126                                 void (*fn)(TCGv_i64, TCGv_i64,
3127                                            TCGv_i64, TCGv_i32))
3128 {
3129     TCGv_i64 r1, r2, dest;
3130 
3131     if (!ctx->is_pa20) {
3132         return false;
3133     }
3134 
3135     nullify_over(ctx);
3136 
3137     r1 = load_gpr(ctx, a->r1);
3138     r2 = load_gpr(ctx, a->r2);
3139     dest = dest_gpr(ctx, a->t);
3140 
3141     fn(dest, r1, r2, tcg_constant_i32(a->sh));
3142     save_gpr(ctx, a->t, dest);
3143 
3144     return nullify_end(ctx);
3145 }
3146 
trans_hadd(DisasContext * ctx,arg_rrr * a)3147 static bool trans_hadd(DisasContext *ctx, arg_rrr *a)
3148 {
3149     return do_multimedia(ctx, a, tcg_gen_vec_add16_i64);
3150 }
3151 
trans_hadd_ss(DisasContext * ctx,arg_rrr * a)3152 static bool trans_hadd_ss(DisasContext *ctx, arg_rrr *a)
3153 {
3154     return do_multimedia(ctx, a, gen_helper_hadd_ss);
3155 }
3156 
trans_hadd_us(DisasContext * ctx,arg_rrr * a)3157 static bool trans_hadd_us(DisasContext *ctx, arg_rrr *a)
3158 {
3159     return do_multimedia(ctx, a, gen_helper_hadd_us);
3160 }
3161 
trans_havg(DisasContext * ctx,arg_rrr * a)3162 static bool trans_havg(DisasContext *ctx, arg_rrr *a)
3163 {
3164     return do_multimedia(ctx, a, gen_helper_havg);
3165 }
3166 
trans_hshl(DisasContext * ctx,arg_rri * a)3167 static bool trans_hshl(DisasContext *ctx, arg_rri *a)
3168 {
3169     return do_multimedia_sh(ctx, a, tcg_gen_vec_shl16i_i64);
3170 }
3171 
trans_hshr_s(DisasContext * ctx,arg_rri * a)3172 static bool trans_hshr_s(DisasContext *ctx, arg_rri *a)
3173 {
3174     return do_multimedia_sh(ctx, a, tcg_gen_vec_sar16i_i64);
3175 }
3176 
trans_hshr_u(DisasContext * ctx,arg_rri * a)3177 static bool trans_hshr_u(DisasContext *ctx, arg_rri *a)
3178 {
3179     return do_multimedia_sh(ctx, a, tcg_gen_vec_shr16i_i64);
3180 }
3181 
trans_hshladd(DisasContext * ctx,arg_rrr_sh * a)3182 static bool trans_hshladd(DisasContext *ctx, arg_rrr_sh *a)
3183 {
3184     return do_multimedia_shadd(ctx, a, gen_helper_hshladd);
3185 }
3186 
trans_hshradd(DisasContext * ctx,arg_rrr_sh * a)3187 static bool trans_hshradd(DisasContext *ctx, arg_rrr_sh *a)
3188 {
3189     return do_multimedia_shadd(ctx, a, gen_helper_hshradd);
3190 }
3191 
trans_hsub(DisasContext * ctx,arg_rrr * a)3192 static bool trans_hsub(DisasContext *ctx, arg_rrr *a)
3193 {
3194     return do_multimedia(ctx, a, tcg_gen_vec_sub16_i64);
3195 }
3196 
trans_hsub_ss(DisasContext * ctx,arg_rrr * a)3197 static bool trans_hsub_ss(DisasContext *ctx, arg_rrr *a)
3198 {
3199     return do_multimedia(ctx, a, gen_helper_hsub_ss);
3200 }
3201 
trans_hsub_us(DisasContext * ctx,arg_rrr * a)3202 static bool trans_hsub_us(DisasContext *ctx, arg_rrr *a)
3203 {
3204     return do_multimedia(ctx, a, gen_helper_hsub_us);
3205 }
3206 
gen_mixh_l(TCGv_i64 dst,TCGv_i64 r1,TCGv_i64 r2)3207 static void gen_mixh_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3208 {
3209     uint64_t mask = 0xffff0000ffff0000ull;
3210     TCGv_i64 tmp = tcg_temp_new_i64();
3211 
3212     tcg_gen_andi_i64(tmp, r2, mask);
3213     tcg_gen_andi_i64(dst, r1, mask);
3214     tcg_gen_shri_i64(tmp, tmp, 16);
3215     tcg_gen_or_i64(dst, dst, tmp);
3216 }
3217 
trans_mixh_l(DisasContext * ctx,arg_rrr * a)3218 static bool trans_mixh_l(DisasContext *ctx, arg_rrr *a)
3219 {
3220     return do_multimedia(ctx, a, gen_mixh_l);
3221 }
3222 
gen_mixh_r(TCGv_i64 dst,TCGv_i64 r1,TCGv_i64 r2)3223 static void gen_mixh_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3224 {
3225     uint64_t mask = 0x0000ffff0000ffffull;
3226     TCGv_i64 tmp = tcg_temp_new_i64();
3227 
3228     tcg_gen_andi_i64(tmp, r1, mask);
3229     tcg_gen_andi_i64(dst, r2, mask);
3230     tcg_gen_shli_i64(tmp, tmp, 16);
3231     tcg_gen_or_i64(dst, dst, tmp);
3232 }
3233 
trans_mixh_r(DisasContext * ctx,arg_rrr * a)3234 static bool trans_mixh_r(DisasContext *ctx, arg_rrr *a)
3235 {
3236     return do_multimedia(ctx, a, gen_mixh_r);
3237 }
3238 
gen_mixw_l(TCGv_i64 dst,TCGv_i64 r1,TCGv_i64 r2)3239 static void gen_mixw_l(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3240 {
3241     TCGv_i64 tmp = tcg_temp_new_i64();
3242 
3243     tcg_gen_shri_i64(tmp, r2, 32);
3244     tcg_gen_deposit_i64(dst, r1, tmp, 0, 32);
3245 }
3246 
trans_mixw_l(DisasContext * ctx,arg_rrr * a)3247 static bool trans_mixw_l(DisasContext *ctx, arg_rrr *a)
3248 {
3249     return do_multimedia(ctx, a, gen_mixw_l);
3250 }
3251 
gen_mixw_r(TCGv_i64 dst,TCGv_i64 r1,TCGv_i64 r2)3252 static void gen_mixw_r(TCGv_i64 dst, TCGv_i64 r1, TCGv_i64 r2)
3253 {
3254     tcg_gen_deposit_i64(dst, r2, r1, 32, 32);
3255 }
3256 
trans_mixw_r(DisasContext * ctx,arg_rrr * a)3257 static bool trans_mixw_r(DisasContext *ctx, arg_rrr *a)
3258 {
3259     return do_multimedia(ctx, a, gen_mixw_r);
3260 }
3261 
trans_permh(DisasContext * ctx,arg_permh * a)3262 static bool trans_permh(DisasContext *ctx, arg_permh *a)
3263 {
3264     TCGv_i64 r, t0, t1, t2, t3;
3265 
3266     if (!ctx->is_pa20) {
3267         return false;
3268     }
3269 
3270     nullify_over(ctx);
3271 
3272     r = load_gpr(ctx, a->r1);
3273     t0 = tcg_temp_new_i64();
3274     t1 = tcg_temp_new_i64();
3275     t2 = tcg_temp_new_i64();
3276     t3 = tcg_temp_new_i64();
3277 
3278     tcg_gen_extract_i64(t0, r, (3 - a->c0) * 16, 16);
3279     tcg_gen_extract_i64(t1, r, (3 - a->c1) * 16, 16);
3280     tcg_gen_extract_i64(t2, r, (3 - a->c2) * 16, 16);
3281     tcg_gen_extract_i64(t3, r, (3 - a->c3) * 16, 16);
3282 
3283     tcg_gen_deposit_i64(t0, t1, t0, 16, 48);
3284     tcg_gen_deposit_i64(t2, t3, t2, 16, 48);
3285     tcg_gen_deposit_i64(t0, t2, t0, 32, 32);
3286 
3287     save_gpr(ctx, a->t, t0);
3288     return nullify_end(ctx);
3289 }
3290 
trans_ld(DisasContext * ctx,arg_ldst * a)3291 static bool trans_ld(DisasContext *ctx, arg_ldst *a)
3292 {
3293     if (ctx->is_pa20) {
3294        /*
3295         * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3296         * Any base modification still occurs.
3297         */
3298         if (a->t == 0) {
3299             return trans_nop_addrx(ctx, a);
3300         }
3301     } else if (a->size > MO_32) {
3302         return gen_illegal(ctx);
3303     }
3304     return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
3305                    a->disp, a->sp, a->m, a->size | MO_TE);
3306 }
3307 
trans_st(DisasContext * ctx,arg_ldst * a)3308 static bool trans_st(DisasContext *ctx, arg_ldst *a)
3309 {
3310     assert(a->x == 0 && a->scale == 0);
3311     if (!ctx->is_pa20 && a->size > MO_32) {
3312         return gen_illegal(ctx);
3313     }
3314     return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
3315 }
3316 
trans_ldc(DisasContext * ctx,arg_ldst * a)3317 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
3318 {
3319     MemOp mop = MO_TE | MO_ALIGN | a->size;
3320     TCGv_i64 dest, ofs;
3321     TCGv_i64 addr;
3322 
3323     if (!ctx->is_pa20 && a->size > MO_32) {
3324         return gen_illegal(ctx);
3325     }
3326 
3327     nullify_over(ctx);
3328 
3329     if (a->m) {
3330         /* Base register modification.  Make sure if RT == RB,
3331            we see the result of the load.  */
3332         dest = tcg_temp_new_i64();
3333     } else {
3334         dest = dest_gpr(ctx, a->t);
3335     }
3336 
3337     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? 3 : 0,
3338              a->disp, a->sp, a->m, MMU_DISABLED(ctx));
3339 
3340     /*
3341      * For hppa1.1, LDCW is undefined unless aligned mod 16.
3342      * However actual hardware succeeds with aligned mod 4.
3343      * Detect this case and log a GUEST_ERROR.
3344      *
3345      * TODO: HPPA64 relaxes the over-alignment requirement
3346      * with the ,co completer.
3347      */
3348     gen_helper_ldc_check(addr);
3349 
3350     tcg_gen_atomic_xchg_i64(dest, addr, ctx->zero, ctx->mmu_idx, mop);
3351 
3352     if (a->m) {
3353         save_gpr(ctx, a->b, ofs);
3354     }
3355     save_gpr(ctx, a->t, dest);
3356 
3357     return nullify_end(ctx);
3358 }
3359 
trans_stby(DisasContext * ctx,arg_stby * a)3360 static bool trans_stby(DisasContext *ctx, arg_stby *a)
3361 {
3362     TCGv_i64 ofs, val;
3363     TCGv_i64 addr;
3364 
3365     nullify_over(ctx);
3366 
3367     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3368              MMU_DISABLED(ctx));
3369     val = load_gpr(ctx, a->r);
3370     if (a->a) {
3371         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3372             gen_helper_stby_e_parallel(tcg_env, addr, val);
3373         } else {
3374             gen_helper_stby_e(tcg_env, addr, val);
3375         }
3376     } else {
3377         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3378             gen_helper_stby_b_parallel(tcg_env, addr, val);
3379         } else {
3380             gen_helper_stby_b(tcg_env, addr, val);
3381         }
3382     }
3383     if (a->m) {
3384         tcg_gen_andi_i64(ofs, ofs, ~3);
3385         save_gpr(ctx, a->b, ofs);
3386     }
3387 
3388     return nullify_end(ctx);
3389 }
3390 
trans_stdby(DisasContext * ctx,arg_stby * a)3391 static bool trans_stdby(DisasContext *ctx, arg_stby *a)
3392 {
3393     TCGv_i64 ofs, val;
3394     TCGv_i64 addr;
3395 
3396     if (!ctx->is_pa20) {
3397         return false;
3398     }
3399     nullify_over(ctx);
3400 
3401     form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
3402              MMU_DISABLED(ctx));
3403     val = load_gpr(ctx, a->r);
3404     if (a->a) {
3405         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3406             gen_helper_stdby_e_parallel(tcg_env, addr, val);
3407         } else {
3408             gen_helper_stdby_e(tcg_env, addr, val);
3409         }
3410     } else {
3411         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
3412             gen_helper_stdby_b_parallel(tcg_env, addr, val);
3413         } else {
3414             gen_helper_stdby_b(tcg_env, addr, val);
3415         }
3416     }
3417     if (a->m) {
3418         tcg_gen_andi_i64(ofs, ofs, ~7);
3419         save_gpr(ctx, a->b, ofs);
3420     }
3421 
3422     return nullify_end(ctx);
3423 }
3424 
trans_lda(DisasContext * ctx,arg_ldst * a)3425 static bool trans_lda(DisasContext *ctx, arg_ldst *a)
3426 {
3427     int hold_mmu_idx = ctx->mmu_idx;
3428 
3429     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3430     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3431     trans_ld(ctx, a);
3432     ctx->mmu_idx = hold_mmu_idx;
3433     return true;
3434 }
3435 
trans_sta(DisasContext * ctx,arg_ldst * a)3436 static bool trans_sta(DisasContext *ctx, arg_ldst *a)
3437 {
3438     int hold_mmu_idx = ctx->mmu_idx;
3439 
3440     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
3441     ctx->mmu_idx = ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX;
3442     trans_st(ctx, a);
3443     ctx->mmu_idx = hold_mmu_idx;
3444     return true;
3445 }
3446 
trans_ldil(DisasContext * ctx,arg_ldil * a)3447 static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
3448 {
3449     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3450 
3451     tcg_gen_movi_i64(tcg_rt, a->i);
3452     save_gpr(ctx, a->t, tcg_rt);
3453     ctx->null_cond = cond_make_f();
3454     return true;
3455 }
3456 
trans_addil(DisasContext * ctx,arg_addil * a)3457 static bool trans_addil(DisasContext *ctx, arg_addil *a)
3458 {
3459     TCGv_i64 tcg_rt = load_gpr(ctx, a->r);
3460     TCGv_i64 tcg_r1 = dest_gpr(ctx, 1);
3461 
3462     tcg_gen_addi_i64(tcg_r1, tcg_rt, a->i);
3463     save_gpr(ctx, 1, tcg_r1);
3464     ctx->null_cond = cond_make_f();
3465     return true;
3466 }
3467 
trans_ldo(DisasContext * ctx,arg_ldo * a)3468 static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
3469 {
3470     TCGv_i64 tcg_rt = dest_gpr(ctx, a->t);
3471 
3472     /* Special case rb == 0, for the LDI pseudo-op.
3473        The COPY pseudo-op is handled for free within tcg_gen_addi_i64.  */
3474     if (a->b == 0) {
3475         tcg_gen_movi_i64(tcg_rt, a->i);
3476     } else {
3477         tcg_gen_addi_i64(tcg_rt, cpu_gr[a->b], a->i);
3478     }
3479     save_gpr(ctx, a->t, tcg_rt);
3480     ctx->null_cond = cond_make_f();
3481     return true;
3482 }
3483 
do_cmpb(DisasContext * ctx,unsigned r,TCGv_i64 in1,unsigned c,unsigned f,bool d,unsigned n,int disp)3484 static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3485                     unsigned c, unsigned f, bool d, unsigned n, int disp)
3486 {
3487     TCGv_i64 dest, in2, sv;
3488     DisasCond cond;
3489 
3490     in2 = load_gpr(ctx, r);
3491     dest = tcg_temp_new_i64();
3492 
3493     tcg_gen_sub_i64(dest, in1, in2);
3494 
3495     sv = NULL;
3496     if (cond_need_sv(c)) {
3497         sv = do_sub_sv(ctx, dest, in1, in2);
3498     }
3499 
3500     cond = do_sub_cond(ctx, c * 2 + f, d, dest, in1, in2, sv);
3501     return do_cbranch(ctx, disp, n, &cond);
3502 }
3503 
trans_cmpb(DisasContext * ctx,arg_cmpb * a)3504 static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
3505 {
3506     if (!ctx->is_pa20 && a->d) {
3507         return false;
3508     }
3509     nullify_over(ctx);
3510     return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1),
3511                    a->c, a->f, a->d, a->n, a->disp);
3512 }
3513 
trans_cmpbi(DisasContext * ctx,arg_cmpbi * a)3514 static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3515 {
3516     if (!ctx->is_pa20 && a->d) {
3517         return false;
3518     }
3519     nullify_over(ctx);
3520     return do_cmpb(ctx, a->r, tcg_constant_i64(a->i),
3521                    a->c, a->f, a->d, a->n, a->disp);
3522 }
3523 
do_addb(DisasContext * ctx,unsigned r,TCGv_i64 in1,unsigned c,unsigned f,unsigned n,int disp)3524 static bool do_addb(DisasContext *ctx, unsigned r, TCGv_i64 in1,
3525                     unsigned c, unsigned f, unsigned n, int disp)
3526 {
3527     TCGv_i64 dest, in2, sv, cb_cond;
3528     DisasCond cond;
3529     bool d = false;
3530 
3531     /*
3532      * For hppa64, the ADDB conditions change with PSW.W,
3533      * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3534      */
3535     if (ctx->tb_flags & PSW_W) {
3536         d = c >= 5;
3537         if (d) {
3538             c &= 3;
3539         }
3540     }
3541 
3542     in2 = load_gpr(ctx, r);
3543     dest = tcg_temp_new_i64();
3544     sv = NULL;
3545     cb_cond = NULL;
3546 
3547     if (cond_need_cb(c)) {
3548         TCGv_i64 cb = tcg_temp_new_i64();
3549         TCGv_i64 cb_msb = tcg_temp_new_i64();
3550 
3551         tcg_gen_add2_i64(dest, cb_msb, in1, ctx->zero, in2, ctx->zero);
3552         tcg_gen_xor_i64(cb, in1, in2);
3553         tcg_gen_xor_i64(cb, cb, dest);
3554         cb_cond = get_carry(ctx, d, cb, cb_msb);
3555     } else {
3556         tcg_gen_add_i64(dest, in1, in2);
3557     }
3558     if (cond_need_sv(c)) {
3559         sv = do_add_sv(ctx, dest, in1, in2, in1, 0, d);
3560     }
3561 
3562     cond = do_cond(ctx, c * 2 + f, d, dest, cb_cond, sv);
3563     save_gpr(ctx, r, dest);
3564     return do_cbranch(ctx, disp, n, &cond);
3565 }
3566 
trans_addb(DisasContext * ctx,arg_addb * a)3567 static bool trans_addb(DisasContext *ctx, arg_addb *a)
3568 {
3569     nullify_over(ctx);
3570     return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3571 }
3572 
trans_addbi(DisasContext * ctx,arg_addbi * a)3573 static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3574 {
3575     nullify_over(ctx);
3576     return do_addb(ctx, a->r, tcg_constant_i64(a->i), a->c, a->f, a->n, a->disp);
3577 }
3578 
trans_bb_sar(DisasContext * ctx,arg_bb_sar * a)3579 static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
3580 {
3581     TCGv_i64 tmp, tcg_r;
3582     DisasCond cond;
3583 
3584     nullify_over(ctx);
3585 
3586     tmp = tcg_temp_new_i64();
3587     tcg_r = load_gpr(ctx, a->r);
3588     if (a->d) {
3589         tcg_gen_shl_i64(tmp, tcg_r, cpu_sar);
3590     } else {
3591         /* Force shift into [32,63] */
3592         tcg_gen_ori_i64(tmp, cpu_sar, 32);
3593         tcg_gen_shl_i64(tmp, tcg_r, tmp);
3594     }
3595 
3596     cond = cond_make_ti(a->c ? TCG_COND_GE : TCG_COND_LT, tmp, 0);
3597     return do_cbranch(ctx, a->disp, a->n, &cond);
3598 }
3599 
trans_bb_imm(DisasContext * ctx,arg_bb_imm * a)3600 static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3601 {
3602     DisasCond cond;
3603     int p = a->p | (a->d ? 0 : 32);
3604 
3605     nullify_over(ctx);
3606     cond = cond_make_vi(a->c ? TCG_COND_TSTEQ : TCG_COND_TSTNE,
3607                         load_gpr(ctx, a->r), 1ull << (63 - p));
3608     return do_cbranch(ctx, a->disp, a->n, &cond);
3609 }
3610 
trans_movb(DisasContext * ctx,arg_movb * a)3611 static bool trans_movb(DisasContext *ctx, arg_movb *a)
3612 {
3613     TCGv_i64 dest;
3614     DisasCond cond;
3615 
3616     nullify_over(ctx);
3617 
3618     dest = dest_gpr(ctx, a->r2);
3619     if (a->r1 == 0) {
3620         tcg_gen_movi_i64(dest, 0);
3621     } else {
3622         tcg_gen_mov_i64(dest, cpu_gr[a->r1]);
3623     }
3624 
3625     /* All MOVB conditions are 32-bit. */
3626     cond = do_sed_cond(ctx, a->c, false, dest);
3627     return do_cbranch(ctx, a->disp, a->n, &cond);
3628 }
3629 
trans_movbi(DisasContext * ctx,arg_movbi * a)3630 static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3631 {
3632     TCGv_i64 dest;
3633     DisasCond cond;
3634 
3635     nullify_over(ctx);
3636 
3637     dest = dest_gpr(ctx, a->r);
3638     tcg_gen_movi_i64(dest, a->i);
3639 
3640     /* All MOVBI conditions are 32-bit. */
3641     cond = do_sed_cond(ctx, a->c, false, dest);
3642     return do_cbranch(ctx, a->disp, a->n, &cond);
3643 }
3644 
trans_shrp_sar(DisasContext * ctx,arg_shrp_sar * a)3645 static bool trans_shrp_sar(DisasContext *ctx, arg_shrp_sar *a)
3646 {
3647     TCGv_i64 dest, src2;
3648 
3649     if (!ctx->is_pa20 && a->d) {
3650         return false;
3651     }
3652     if (a->c) {
3653         nullify_over(ctx);
3654     }
3655 
3656     dest = dest_gpr(ctx, a->t);
3657     src2 = load_gpr(ctx, a->r2);
3658     if (a->r1 == 0) {
3659         if (a->d) {
3660             tcg_gen_shr_i64(dest, src2, cpu_sar);
3661         } else {
3662             TCGv_i64 tmp = tcg_temp_new_i64();
3663 
3664             tcg_gen_ext32u_i64(dest, src2);
3665             tcg_gen_andi_i64(tmp, cpu_sar, 31);
3666             tcg_gen_shr_i64(dest, dest, tmp);
3667         }
3668     } else if (a->r1 == a->r2) {
3669         if (a->d) {
3670             tcg_gen_rotr_i64(dest, src2, cpu_sar);
3671         } else {
3672             TCGv_i32 t32 = tcg_temp_new_i32();
3673             TCGv_i32 s32 = tcg_temp_new_i32();
3674 
3675             tcg_gen_extrl_i64_i32(t32, src2);
3676             tcg_gen_extrl_i64_i32(s32, cpu_sar);
3677             tcg_gen_andi_i32(s32, s32, 31);
3678             tcg_gen_rotr_i32(t32, t32, s32);
3679             tcg_gen_extu_i32_i64(dest, t32);
3680         }
3681     } else {
3682         TCGv_i64 src1 = load_gpr(ctx, a->r1);
3683 
3684         if (a->d) {
3685             TCGv_i64 t = tcg_temp_new_i64();
3686             TCGv_i64 n = tcg_temp_new_i64();
3687 
3688             tcg_gen_xori_i64(n, cpu_sar, 63);
3689             tcg_gen_shl_i64(t, src1, n);
3690             tcg_gen_shli_i64(t, t, 1);
3691             tcg_gen_shr_i64(dest, src2, cpu_sar);
3692             tcg_gen_or_i64(dest, dest, t);
3693         } else {
3694             TCGv_i64 t = tcg_temp_new_i64();
3695             TCGv_i64 s = tcg_temp_new_i64();
3696 
3697             tcg_gen_concat32_i64(t, src2, src1);
3698             tcg_gen_andi_i64(s, cpu_sar, 31);
3699             tcg_gen_shr_i64(dest, t, s);
3700         }
3701     }
3702     save_gpr(ctx, a->t, dest);
3703 
3704     /* Install the new nullification.  */
3705     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3706     return nullify_end(ctx);
3707 }
3708 
trans_shrp_imm(DisasContext * ctx,arg_shrp_imm * a)3709 static bool trans_shrp_imm(DisasContext *ctx, arg_shrp_imm *a)
3710 {
3711     unsigned width, sa;
3712     TCGv_i64 dest, t2;
3713 
3714     if (!ctx->is_pa20 && a->d) {
3715         return false;
3716     }
3717     if (a->c) {
3718         nullify_over(ctx);
3719     }
3720 
3721     width = a->d ? 64 : 32;
3722     sa = width - 1 - a->cpos;
3723 
3724     dest = dest_gpr(ctx, a->t);
3725     t2 = load_gpr(ctx, a->r2);
3726     if (a->r1 == 0) {
3727         tcg_gen_extract_i64(dest, t2, sa, width - sa);
3728     } else if (width == TARGET_LONG_BITS) {
3729         tcg_gen_extract2_i64(dest, t2, cpu_gr[a->r1], sa);
3730     } else {
3731         assert(!a->d);
3732         if (a->r1 == a->r2) {
3733             TCGv_i32 t32 = tcg_temp_new_i32();
3734             tcg_gen_extrl_i64_i32(t32, t2);
3735             tcg_gen_rotri_i32(t32, t32, sa);
3736             tcg_gen_extu_i32_i64(dest, t32);
3737         } else {
3738             tcg_gen_concat32_i64(dest, t2, cpu_gr[a->r1]);
3739             tcg_gen_extract_i64(dest, dest, sa, 32);
3740         }
3741     }
3742     save_gpr(ctx, a->t, dest);
3743 
3744     /* Install the new nullification.  */
3745     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3746     return nullify_end(ctx);
3747 }
3748 
trans_extr_sar(DisasContext * ctx,arg_extr_sar * a)3749 static bool trans_extr_sar(DisasContext *ctx, arg_extr_sar *a)
3750 {
3751     unsigned widthm1 = a->d ? 63 : 31;
3752     TCGv_i64 dest, src, tmp;
3753 
3754     if (!ctx->is_pa20 && a->d) {
3755         return false;
3756     }
3757     if (a->c) {
3758         nullify_over(ctx);
3759     }
3760 
3761     dest = dest_gpr(ctx, a->t);
3762     src = load_gpr(ctx, a->r);
3763     tmp = tcg_temp_new_i64();
3764 
3765     /* Recall that SAR is using big-endian bit numbering.  */
3766     tcg_gen_andi_i64(tmp, cpu_sar, widthm1);
3767     tcg_gen_xori_i64(tmp, tmp, widthm1);
3768 
3769     if (a->se) {
3770         if (!a->d) {
3771             tcg_gen_ext32s_i64(dest, src);
3772             src = dest;
3773         }
3774         tcg_gen_sar_i64(dest, src, tmp);
3775         tcg_gen_sextract_i64(dest, dest, 0, a->len);
3776     } else {
3777         if (!a->d) {
3778             tcg_gen_ext32u_i64(dest, src);
3779             src = dest;
3780         }
3781         tcg_gen_shr_i64(dest, src, tmp);
3782         tcg_gen_extract_i64(dest, dest, 0, a->len);
3783     }
3784     save_gpr(ctx, a->t, dest);
3785 
3786     /* Install the new nullification.  */
3787     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3788     return nullify_end(ctx);
3789 }
3790 
trans_extr_imm(DisasContext * ctx,arg_extr_imm * a)3791 static bool trans_extr_imm(DisasContext *ctx, arg_extr_imm *a)
3792 {
3793     unsigned len, cpos, width;
3794     TCGv_i64 dest, src;
3795 
3796     if (!ctx->is_pa20 && a->d) {
3797         return false;
3798     }
3799     if (a->c) {
3800         nullify_over(ctx);
3801     }
3802 
3803     len = a->len;
3804     width = a->d ? 64 : 32;
3805     cpos = width - 1 - a->pos;
3806     if (cpos + len > width) {
3807         len = width - cpos;
3808     }
3809 
3810     dest = dest_gpr(ctx, a->t);
3811     src = load_gpr(ctx, a->r);
3812     if (a->se) {
3813         tcg_gen_sextract_i64(dest, src, cpos, len);
3814     } else {
3815         tcg_gen_extract_i64(dest, src, cpos, len);
3816     }
3817     save_gpr(ctx, a->t, dest);
3818 
3819     /* Install the new nullification.  */
3820     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3821     return nullify_end(ctx);
3822 }
3823 
trans_depi_imm(DisasContext * ctx,arg_depi_imm * a)3824 static bool trans_depi_imm(DisasContext *ctx, arg_depi_imm *a)
3825 {
3826     unsigned len, width;
3827     uint64_t mask0, mask1;
3828     TCGv_i64 dest;
3829 
3830     if (!ctx->is_pa20 && a->d) {
3831         return false;
3832     }
3833     if (a->c) {
3834         nullify_over(ctx);
3835     }
3836 
3837     len = a->len;
3838     width = a->d ? 64 : 32;
3839     if (a->cpos + len > width) {
3840         len = width - a->cpos;
3841     }
3842 
3843     dest = dest_gpr(ctx, a->t);
3844     mask0 = deposit64(0, a->cpos, len, a->i);
3845     mask1 = deposit64(-1, a->cpos, len, a->i);
3846 
3847     if (a->nz) {
3848         TCGv_i64 src = load_gpr(ctx, a->t);
3849         tcg_gen_andi_i64(dest, src, mask1);
3850         tcg_gen_ori_i64(dest, dest, mask0);
3851     } else {
3852         tcg_gen_movi_i64(dest, mask0);
3853     }
3854     save_gpr(ctx, a->t, dest);
3855 
3856     /* Install the new nullification.  */
3857     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3858     return nullify_end(ctx);
3859 }
3860 
trans_dep_imm(DisasContext * ctx,arg_dep_imm * a)3861 static bool trans_dep_imm(DisasContext *ctx, arg_dep_imm *a)
3862 {
3863     unsigned rs = a->nz ? a->t : 0;
3864     unsigned len, width;
3865     TCGv_i64 dest, val;
3866 
3867     if (!ctx->is_pa20 && a->d) {
3868         return false;
3869     }
3870     if (a->c) {
3871         nullify_over(ctx);
3872     }
3873 
3874     len = a->len;
3875     width = a->d ? 64 : 32;
3876     if (a->cpos + len > width) {
3877         len = width - a->cpos;
3878     }
3879 
3880     dest = dest_gpr(ctx, a->t);
3881     val = load_gpr(ctx, a->r);
3882     if (rs == 0) {
3883         tcg_gen_deposit_z_i64(dest, val, a->cpos, len);
3884     } else {
3885         tcg_gen_deposit_i64(dest, cpu_gr[rs], val, a->cpos, len);
3886     }
3887     save_gpr(ctx, a->t, dest);
3888 
3889     /* Install the new nullification.  */
3890     ctx->null_cond = do_sed_cond(ctx, a->c, a->d, dest);
3891     return nullify_end(ctx);
3892 }
3893 
do_dep_sar(DisasContext * ctx,unsigned rt,unsigned c,bool d,bool nz,unsigned len,TCGv_i64 val)3894 static bool do_dep_sar(DisasContext *ctx, unsigned rt, unsigned c,
3895                        bool d, bool nz, unsigned len, TCGv_i64 val)
3896 {
3897     unsigned rs = nz ? rt : 0;
3898     unsigned widthm1 = d ? 63 : 31;
3899     TCGv_i64 mask, tmp, shift, dest;
3900     uint64_t msb = 1ULL << (len - 1);
3901 
3902     dest = dest_gpr(ctx, rt);
3903     shift = tcg_temp_new_i64();
3904     tmp = tcg_temp_new_i64();
3905 
3906     /* Convert big-endian bit numbering in SAR to left-shift.  */
3907     tcg_gen_andi_i64(shift, cpu_sar, widthm1);
3908     tcg_gen_xori_i64(shift, shift, widthm1);
3909 
3910     mask = tcg_temp_new_i64();
3911     tcg_gen_movi_i64(mask, msb + (msb - 1));
3912     tcg_gen_and_i64(tmp, val, mask);
3913     if (rs) {
3914         tcg_gen_shl_i64(mask, mask, shift);
3915         tcg_gen_shl_i64(tmp, tmp, shift);
3916         tcg_gen_andc_i64(dest, cpu_gr[rs], mask);
3917         tcg_gen_or_i64(dest, dest, tmp);
3918     } else {
3919         tcg_gen_shl_i64(dest, tmp, shift);
3920     }
3921     save_gpr(ctx, rt, dest);
3922 
3923     /* Install the new nullification.  */
3924     ctx->null_cond = do_sed_cond(ctx, c, d, dest);
3925     return nullify_end(ctx);
3926 }
3927 
trans_dep_sar(DisasContext * ctx,arg_dep_sar * a)3928 static bool trans_dep_sar(DisasContext *ctx, arg_dep_sar *a)
3929 {
3930     if (!ctx->is_pa20 && a->d) {
3931         return false;
3932     }
3933     if (a->c) {
3934         nullify_over(ctx);
3935     }
3936     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3937                       load_gpr(ctx, a->r));
3938 }
3939 
trans_depi_sar(DisasContext * ctx,arg_depi_sar * a)3940 static bool trans_depi_sar(DisasContext *ctx, arg_depi_sar *a)
3941 {
3942     if (!ctx->is_pa20 && a->d) {
3943         return false;
3944     }
3945     if (a->c) {
3946         nullify_over(ctx);
3947     }
3948     return do_dep_sar(ctx, a->t, a->c, a->d, a->nz, a->len,
3949                       tcg_constant_i64(a->i));
3950 }
3951 
trans_be(DisasContext * ctx,arg_be * a)3952 static bool trans_be(DisasContext *ctx, arg_be *a)
3953 {
3954 #ifndef CONFIG_USER_ONLY
3955     ctx->iaq_j.space = tcg_temp_new_i64();
3956     load_spr(ctx, ctx->iaq_j.space, a->sp);
3957 #endif
3958 
3959     ctx->iaq_j.base = tcg_temp_new_i64();
3960     ctx->iaq_j.disp = 0;
3961 
3962     tcg_gen_addi_i64(ctx->iaq_j.base, load_gpr(ctx, a->b), a->disp);
3963     ctx->iaq_j.base = do_ibranch_priv(ctx, ctx->iaq_j.base);
3964 
3965     return do_ibranch(ctx, a->l, true, a->n);
3966 }
3967 
trans_bl(DisasContext * ctx,arg_bl * a)3968 static bool trans_bl(DisasContext *ctx, arg_bl *a)
3969 {
3970     return do_dbranch(ctx, a->disp, a->l, a->n);
3971 }
3972 
trans_b_gate(DisasContext * ctx,arg_b_gate * a)3973 static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
3974 {
3975     int64_t disp = a->disp;
3976     bool indirect = false;
3977 
3978     /* Trap if PSW[B] is set. */
3979     if (ctx->psw_xb & PSW_B) {
3980         return gen_illegal(ctx);
3981     }
3982 
3983     nullify_over(ctx);
3984 
3985 #ifndef CONFIG_USER_ONLY
3986     if (ctx->privilege == 0) {
3987         /* Privilege cannot decrease. */
3988     } else if (!(ctx->tb_flags & PSW_C)) {
3989         /* With paging disabled, priv becomes 0. */
3990         disp -= ctx->privilege;
3991     } else {
3992         /* Adjust the dest offset for the privilege change from the PTE. */
3993         TCGv_i64 off = tcg_temp_new_i64();
3994 
3995         copy_iaoq_entry(ctx, off, &ctx->iaq_f);
3996         gen_helper_b_gate_priv(off, tcg_env, off);
3997 
3998         ctx->iaq_j.base = off;
3999         ctx->iaq_j.disp = disp + 8;
4000         indirect = true;
4001     }
4002 #endif
4003 
4004     if (a->l) {
4005         TCGv_i64 tmp = dest_gpr(ctx, a->l);
4006         if (ctx->privilege < 3) {
4007             tcg_gen_andi_i64(tmp, tmp, -4);
4008         }
4009         tcg_gen_ori_i64(tmp, tmp, ctx->privilege);
4010         save_gpr(ctx, a->l, tmp);
4011     }
4012 
4013     if (indirect) {
4014         return do_ibranch(ctx, 0, false, a->n);
4015     }
4016     return do_dbranch(ctx, disp, 0, a->n);
4017 }
4018 
trans_blr(DisasContext * ctx,arg_blr * a)4019 static bool trans_blr(DisasContext *ctx, arg_blr *a)
4020 {
4021     if (a->x) {
4022         DisasIAQE next = iaqe_incr(&ctx->iaq_f, 8);
4023         TCGv_i64 t0 = tcg_temp_new_i64();
4024         TCGv_i64 t1 = tcg_temp_new_i64();
4025 
4026         /* The computation here never changes privilege level.  */
4027         copy_iaoq_entry(ctx, t0, &next);
4028         tcg_gen_shli_i64(t1, load_gpr(ctx, a->x), 3);
4029         tcg_gen_add_i64(t0, t0, t1);
4030 
4031         ctx->iaq_j = iaqe_next_absv(ctx, t0);
4032         return do_ibranch(ctx, a->l, false, a->n);
4033     } else {
4034         /* BLR R0,RX is a good way to load PC+8 into RX.  */
4035         return do_dbranch(ctx, 0, a->l, a->n);
4036     }
4037 }
4038 
trans_bv(DisasContext * ctx,arg_bv * a)4039 static bool trans_bv(DisasContext *ctx, arg_bv *a)
4040 {
4041     TCGv_i64 dest;
4042 
4043     if (a->x == 0) {
4044         dest = load_gpr(ctx, a->b);
4045     } else {
4046         dest = tcg_temp_new_i64();
4047         tcg_gen_shli_i64(dest, load_gpr(ctx, a->x), 3);
4048         tcg_gen_add_i64(dest, dest, load_gpr(ctx, a->b));
4049     }
4050     dest = do_ibranch_priv(ctx, dest);
4051     ctx->iaq_j = iaqe_next_absv(ctx, dest);
4052 
4053     return do_ibranch(ctx, 0, false, a->n);
4054 }
4055 
trans_bve(DisasContext * ctx,arg_bve * a)4056 static bool trans_bve(DisasContext *ctx, arg_bve *a)
4057 {
4058     TCGv_i64 b = load_gpr(ctx, a->b);
4059 
4060 #ifndef CONFIG_USER_ONLY
4061     ctx->iaq_j.space = space_select(ctx, 0, b);
4062 #endif
4063     ctx->iaq_j.base = do_ibranch_priv(ctx, b);
4064     ctx->iaq_j.disp = 0;
4065 
4066     return do_ibranch(ctx, a->l, false, a->n);
4067 }
4068 
trans_nopbts(DisasContext * ctx,arg_nopbts * a)4069 static bool trans_nopbts(DisasContext *ctx, arg_nopbts *a)
4070 {
4071     /* All branch target stack instructions implement as nop. */
4072     return ctx->is_pa20;
4073 }
4074 
4075 /*
4076  * Float class 0
4077  */
4078 
gen_fcpy_f(TCGv_i32 dst,TCGv_env unused,TCGv_i32 src)4079 static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4080 {
4081     tcg_gen_mov_i32(dst, src);
4082 }
4083 
trans_fid_f(DisasContext * ctx,arg_fid_f * a)4084 static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
4085 {
4086     uint64_t ret;
4087 
4088     if (ctx->is_pa20) {
4089         ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
4090     } else {
4091         ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
4092     }
4093 
4094     nullify_over(ctx);
4095     save_frd(0, tcg_constant_i64(ret));
4096     return nullify_end(ctx);
4097 }
4098 
trans_fcpy_f(DisasContext * ctx,arg_fclass01 * a)4099 static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
4100 {
4101     return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
4102 }
4103 
gen_fcpy_d(TCGv_i64 dst,TCGv_env unused,TCGv_i64 src)4104 static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4105 {
4106     tcg_gen_mov_i64(dst, src);
4107 }
4108 
trans_fcpy_d(DisasContext * ctx,arg_fclass01 * a)4109 static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
4110 {
4111     return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
4112 }
4113 
gen_fabs_f(TCGv_i32 dst,TCGv_env unused,TCGv_i32 src)4114 static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4115 {
4116     tcg_gen_andi_i32(dst, src, INT32_MAX);
4117 }
4118 
trans_fabs_f(DisasContext * ctx,arg_fclass01 * a)4119 static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
4120 {
4121     return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
4122 }
4123 
gen_fabs_d(TCGv_i64 dst,TCGv_env unused,TCGv_i64 src)4124 static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4125 {
4126     tcg_gen_andi_i64(dst, src, INT64_MAX);
4127 }
4128 
trans_fabs_d(DisasContext * ctx,arg_fclass01 * a)4129 static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
4130 {
4131     return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
4132 }
4133 
trans_fsqrt_f(DisasContext * ctx,arg_fclass01 * a)4134 static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
4135 {
4136     return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
4137 }
4138 
trans_fsqrt_d(DisasContext * ctx,arg_fclass01 * a)4139 static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
4140 {
4141     return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
4142 }
4143 
trans_frnd_f(DisasContext * ctx,arg_fclass01 * a)4144 static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
4145 {
4146     return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
4147 }
4148 
trans_frnd_d(DisasContext * ctx,arg_fclass01 * a)4149 static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
4150 {
4151     return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
4152 }
4153 
gen_fneg_f(TCGv_i32 dst,TCGv_env unused,TCGv_i32 src)4154 static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4155 {
4156     tcg_gen_xori_i32(dst, src, INT32_MIN);
4157 }
4158 
trans_fneg_f(DisasContext * ctx,arg_fclass01 * a)4159 static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
4160 {
4161     return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
4162 }
4163 
gen_fneg_d(TCGv_i64 dst,TCGv_env unused,TCGv_i64 src)4164 static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4165 {
4166     tcg_gen_xori_i64(dst, src, INT64_MIN);
4167 }
4168 
trans_fneg_d(DisasContext * ctx,arg_fclass01 * a)4169 static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
4170 {
4171     return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
4172 }
4173 
gen_fnegabs_f(TCGv_i32 dst,TCGv_env unused,TCGv_i32 src)4174 static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
4175 {
4176     tcg_gen_ori_i32(dst, src, INT32_MIN);
4177 }
4178 
trans_fnegabs_f(DisasContext * ctx,arg_fclass01 * a)4179 static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
4180 {
4181     return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
4182 }
4183 
gen_fnegabs_d(TCGv_i64 dst,TCGv_env unused,TCGv_i64 src)4184 static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
4185 {
4186     tcg_gen_ori_i64(dst, src, INT64_MIN);
4187 }
4188 
trans_fnegabs_d(DisasContext * ctx,arg_fclass01 * a)4189 static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
4190 {
4191     return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
4192 }
4193 
4194 /*
4195  * Float class 1
4196  */
4197 
trans_fcnv_d_f(DisasContext * ctx,arg_fclass01 * a)4198 static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
4199 {
4200     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
4201 }
4202 
trans_fcnv_f_d(DisasContext * ctx,arg_fclass01 * a)4203 static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
4204 {
4205     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
4206 }
4207 
trans_fcnv_w_f(DisasContext * ctx,arg_fclass01 * a)4208 static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
4209 {
4210     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
4211 }
4212 
trans_fcnv_q_f(DisasContext * ctx,arg_fclass01 * a)4213 static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
4214 {
4215     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
4216 }
4217 
trans_fcnv_w_d(DisasContext * ctx,arg_fclass01 * a)4218 static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
4219 {
4220     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
4221 }
4222 
trans_fcnv_q_d(DisasContext * ctx,arg_fclass01 * a)4223 static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
4224 {
4225     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
4226 }
4227 
trans_fcnv_f_w(DisasContext * ctx,arg_fclass01 * a)4228 static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
4229 {
4230     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
4231 }
4232 
trans_fcnv_d_w(DisasContext * ctx,arg_fclass01 * a)4233 static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
4234 {
4235     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
4236 }
4237 
trans_fcnv_f_q(DisasContext * ctx,arg_fclass01 * a)4238 static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
4239 {
4240     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
4241 }
4242 
trans_fcnv_d_q(DisasContext * ctx,arg_fclass01 * a)4243 static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
4244 {
4245     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
4246 }
4247 
trans_fcnv_t_f_w(DisasContext * ctx,arg_fclass01 * a)4248 static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
4249 {
4250     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
4251 }
4252 
trans_fcnv_t_d_w(DisasContext * ctx,arg_fclass01 * a)4253 static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
4254 {
4255     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
4256 }
4257 
trans_fcnv_t_f_q(DisasContext * ctx,arg_fclass01 * a)4258 static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
4259 {
4260     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
4261 }
4262 
trans_fcnv_t_d_q(DisasContext * ctx,arg_fclass01 * a)4263 static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
4264 {
4265     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
4266 }
4267 
trans_fcnv_uw_f(DisasContext * ctx,arg_fclass01 * a)4268 static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
4269 {
4270     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
4271 }
4272 
trans_fcnv_uq_f(DisasContext * ctx,arg_fclass01 * a)4273 static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
4274 {
4275     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
4276 }
4277 
trans_fcnv_uw_d(DisasContext * ctx,arg_fclass01 * a)4278 static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
4279 {
4280     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
4281 }
4282 
trans_fcnv_uq_d(DisasContext * ctx,arg_fclass01 * a)4283 static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
4284 {
4285     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
4286 }
4287 
trans_fcnv_f_uw(DisasContext * ctx,arg_fclass01 * a)4288 static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
4289 {
4290     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
4291 }
4292 
trans_fcnv_d_uw(DisasContext * ctx,arg_fclass01 * a)4293 static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
4294 {
4295     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
4296 }
4297 
trans_fcnv_f_uq(DisasContext * ctx,arg_fclass01 * a)4298 static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
4299 {
4300     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
4301 }
4302 
trans_fcnv_d_uq(DisasContext * ctx,arg_fclass01 * a)4303 static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
4304 {
4305     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
4306 }
4307 
trans_fcnv_t_f_uw(DisasContext * ctx,arg_fclass01 * a)4308 static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
4309 {
4310     return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
4311 }
4312 
trans_fcnv_t_d_uw(DisasContext * ctx,arg_fclass01 * a)4313 static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
4314 {
4315     return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
4316 }
4317 
trans_fcnv_t_f_uq(DisasContext * ctx,arg_fclass01 * a)4318 static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
4319 {
4320     return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
4321 }
4322 
trans_fcnv_t_d_uq(DisasContext * ctx,arg_fclass01 * a)4323 static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
4324 {
4325     return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
4326 }
4327 
4328 /*
4329  * Float class 2
4330  */
4331 
trans_fcmp_f(DisasContext * ctx,arg_fclass2 * a)4332 static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
4333 {
4334     TCGv_i32 ta, tb, tc, ty;
4335 
4336     nullify_over(ctx);
4337 
4338     ta = load_frw0_i32(a->r1);
4339     tb = load_frw0_i32(a->r2);
4340     ty = tcg_constant_i32(a->y);
4341     tc = tcg_constant_i32(a->c);
4342 
4343     gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
4344 
4345     return nullify_end(ctx);
4346 }
4347 
trans_fcmp_d(DisasContext * ctx,arg_fclass2 * a)4348 static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
4349 {
4350     TCGv_i64 ta, tb;
4351     TCGv_i32 tc, ty;
4352 
4353     nullify_over(ctx);
4354 
4355     ta = load_frd0(a->r1);
4356     tb = load_frd0(a->r2);
4357     ty = tcg_constant_i32(a->y);
4358     tc = tcg_constant_i32(a->c);
4359 
4360     gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
4361 
4362     return nullify_end(ctx);
4363 }
4364 
trans_ftest(DisasContext * ctx,arg_ftest * a)4365 static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
4366 {
4367     TCGCond tc = TCG_COND_TSTNE;
4368     uint32_t mask;
4369     TCGv_i64 t;
4370 
4371     nullify_over(ctx);
4372 
4373     t = tcg_temp_new_i64();
4374     tcg_gen_ld32u_i64(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
4375 
4376     if (a->y == 1) {
4377         switch (a->c) {
4378         case 0: /* simple */
4379             mask = R_FPSR_C_MASK;
4380             break;
4381         case 2: /* rej */
4382             tc = TCG_COND_TSTEQ;
4383             /* fallthru */
4384         case 1: /* acc */
4385             mask = R_FPSR_C_MASK | R_FPSR_CQ_MASK;
4386             break;
4387         case 6: /* rej8 */
4388             tc = TCG_COND_TSTEQ;
4389             /* fallthru */
4390         case 5: /* acc8 */
4391             mask = R_FPSR_C_MASK | R_FPSR_CQ0_6_MASK;
4392             break;
4393         case 9: /* acc6 */
4394             mask = R_FPSR_C_MASK | R_FPSR_CQ0_4_MASK;
4395             break;
4396         case 13: /* acc4 */
4397             mask = R_FPSR_C_MASK | R_FPSR_CQ0_2_MASK;
4398             break;
4399         case 17: /* acc2 */
4400             mask = R_FPSR_C_MASK | R_FPSR_CQ0_MASK;
4401             break;
4402         default:
4403             gen_illegal(ctx);
4404             return true;
4405         }
4406     } else {
4407         unsigned cbit = (a->y ^ 1) - 1;
4408         mask = R_FPSR_CA0_MASK >> cbit;
4409     }
4410 
4411     ctx->null_cond = cond_make_ti(tc, t, mask);
4412     return nullify_end(ctx);
4413 }
4414 
4415 /*
4416  * Float class 2
4417  */
4418 
trans_fadd_f(DisasContext * ctx,arg_fclass3 * a)4419 static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
4420 {
4421     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
4422 }
4423 
trans_fadd_d(DisasContext * ctx,arg_fclass3 * a)4424 static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
4425 {
4426     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
4427 }
4428 
trans_fsub_f(DisasContext * ctx,arg_fclass3 * a)4429 static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
4430 {
4431     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
4432 }
4433 
trans_fsub_d(DisasContext * ctx,arg_fclass3 * a)4434 static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
4435 {
4436     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
4437 }
4438 
trans_fmpy_f(DisasContext * ctx,arg_fclass3 * a)4439 static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
4440 {
4441     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
4442 }
4443 
trans_fmpy_d(DisasContext * ctx,arg_fclass3 * a)4444 static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
4445 {
4446     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
4447 }
4448 
trans_fdiv_f(DisasContext * ctx,arg_fclass3 * a)4449 static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
4450 {
4451     return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
4452 }
4453 
trans_fdiv_d(DisasContext * ctx,arg_fclass3 * a)4454 static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
4455 {
4456     return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
4457 }
4458 
trans_xmpyu(DisasContext * ctx,arg_xmpyu * a)4459 static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
4460 {
4461     TCGv_i64 x, y;
4462 
4463     nullify_over(ctx);
4464 
4465     x = load_frw0_i64(a->r1);
4466     y = load_frw0_i64(a->r2);
4467     tcg_gen_mul_i64(x, x, y);
4468     save_frd(a->t, x);
4469 
4470     return nullify_end(ctx);
4471 }
4472 
4473 /* Convert the fmpyadd single-precision register encodings to standard.  */
fmpyadd_s_reg(unsigned r)4474 static inline int fmpyadd_s_reg(unsigned r)
4475 {
4476     return (r & 16) * 2 + 16 + (r & 15);
4477 }
4478 
do_fmpyadd_s(DisasContext * ctx,arg_mpyadd * a,bool is_sub)4479 static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4480 {
4481     int tm = fmpyadd_s_reg(a->tm);
4482     int ra = fmpyadd_s_reg(a->ra);
4483     int ta = fmpyadd_s_reg(a->ta);
4484     int rm2 = fmpyadd_s_reg(a->rm2);
4485     int rm1 = fmpyadd_s_reg(a->rm1);
4486 
4487     nullify_over(ctx);
4488 
4489     do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4490     do_fop_weww(ctx, ta, ta, ra,
4491                 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
4492 
4493     return nullify_end(ctx);
4494 }
4495 
trans_fmpyadd_f(DisasContext * ctx,arg_mpyadd * a)4496 static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4497 {
4498     return do_fmpyadd_s(ctx, a, false);
4499 }
4500 
trans_fmpysub_f(DisasContext * ctx,arg_mpyadd * a)4501 static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4502 {
4503     return do_fmpyadd_s(ctx, a, true);
4504 }
4505 
do_fmpyadd_d(DisasContext * ctx,arg_mpyadd * a,bool is_sub)4506 static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4507 {
4508     nullify_over(ctx);
4509 
4510     do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4511     do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4512                 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4513 
4514     return nullify_end(ctx);
4515 }
4516 
trans_fmpyadd_d(DisasContext * ctx,arg_mpyadd * a)4517 static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4518 {
4519     return do_fmpyadd_d(ctx, a, false);
4520 }
4521 
trans_fmpysub_d(DisasContext * ctx,arg_mpyadd * a)4522 static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4523 {
4524     return do_fmpyadd_d(ctx, a, true);
4525 }
4526 
trans_fmpyfadd_f(DisasContext * ctx,arg_fmpyfadd_f * a)4527 static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
4528 {
4529     TCGv_i32 x, y, z;
4530 
4531     nullify_over(ctx);
4532     x = load_frw0_i32(a->rm1);
4533     y = load_frw0_i32(a->rm2);
4534     z = load_frw0_i32(a->ra3);
4535 
4536     if (a->neg) {
4537         gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
4538     } else {
4539         gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
4540     }
4541 
4542     save_frw_i32(a->t, x);
4543     return nullify_end(ctx);
4544 }
4545 
trans_fmpyfadd_d(DisasContext * ctx,arg_fmpyfadd_d * a)4546 static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
4547 {
4548     TCGv_i64 x, y, z;
4549 
4550     nullify_over(ctx);
4551     x = load_frd0(a->rm1);
4552     y = load_frd0(a->rm2);
4553     z = load_frd0(a->ra3);
4554 
4555     if (a->neg) {
4556         gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
4557     } else {
4558         gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
4559     }
4560 
4561     save_frd(a->t, x);
4562     return nullify_end(ctx);
4563 }
4564 
4565 /* Emulate PDC BTLB, called by SeaBIOS-hppa */
trans_diag_btlb(DisasContext * ctx,arg_diag_btlb * a)4566 static bool trans_diag_btlb(DisasContext *ctx, arg_diag_btlb *a)
4567 {
4568     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4569 #ifndef CONFIG_USER_ONLY
4570     nullify_over(ctx);
4571     gen_helper_diag_btlb(tcg_env);
4572     return nullify_end(ctx);
4573 #endif
4574 }
4575 
4576 /* Print char in %r26 to first serial console, used by SeaBIOS-hppa */
trans_diag_cout(DisasContext * ctx,arg_diag_cout * a)4577 static bool trans_diag_cout(DisasContext *ctx, arg_diag_cout *a)
4578 {
4579     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4580 #ifndef CONFIG_USER_ONLY
4581     nullify_over(ctx);
4582     gen_helper_diag_console_output(tcg_env);
4583     return nullify_end(ctx);
4584 #endif
4585 }
4586 
trans_diag_getshadowregs_pa1(DisasContext * ctx,arg_empty * a)4587 static bool trans_diag_getshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4588 {
4589     return !ctx->is_pa20 && do_getshadowregs(ctx);
4590 }
4591 
trans_diag_putshadowregs_pa1(DisasContext * ctx,arg_empty * a)4592 static bool trans_diag_putshadowregs_pa1(DisasContext *ctx, arg_empty *a)
4593 {
4594     return !ctx->is_pa20 && do_putshadowregs(ctx);
4595 }
4596 
trans_diag_mfdiag(DisasContext * ctx,arg_diag_mfdiag * a)4597 static bool trans_diag_mfdiag(DisasContext *ctx, arg_diag_mfdiag *a)
4598 {
4599     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4600     nullify_over(ctx);
4601     TCGv_i64 dest = dest_gpr(ctx, a->rt);
4602     tcg_gen_ld_i64(dest, tcg_env,
4603                        offsetof(CPUHPPAState, dr[a->dr]));
4604     save_gpr(ctx, a->rt, dest);
4605     return nullify_end(ctx);
4606 }
4607 
trans_diag_mtdiag(DisasContext * ctx,arg_diag_mtdiag * a)4608 static bool trans_diag_mtdiag(DisasContext *ctx, arg_diag_mtdiag *a)
4609 {
4610     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4611     nullify_over(ctx);
4612     tcg_gen_st_i64(load_gpr(ctx, a->r1), tcg_env,
4613                         offsetof(CPUHPPAState, dr[a->dr]));
4614 #ifndef CONFIG_USER_ONLY
4615     if (ctx->is_pa20 && (a->dr == 2)) {
4616         /* Update gva_offset_mask from the new value of %dr2 */
4617         gen_helper_update_gva_offset_mask(tcg_env);
4618         /* Exit to capture the new value for the next TB. */
4619         ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4620     }
4621 #endif
4622     return nullify_end(ctx);
4623 }
4624 
trans_diag_unimp(DisasContext * ctx,arg_diag_unimp * a)4625 static bool trans_diag_unimp(DisasContext *ctx, arg_diag_unimp *a)
4626 {
4627     CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4628     qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4629     return true;
4630 }
4631 
hppa_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cs)4632 static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
4633 {
4634     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4635     uint64_t cs_base;
4636     int bound;
4637 
4638     ctx->cs = cs;
4639     ctx->tb_flags = ctx->base.tb->flags;
4640     ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
4641     ctx->psw_xb = ctx->tb_flags & (PSW_X | PSW_B);
4642     ctx->gva_offset_mask = cpu_env(cs)->gva_offset_mask;
4643 
4644 #ifdef CONFIG_USER_ONLY
4645     ctx->privilege = PRIV_USER;
4646     ctx->mmu_idx = MMU_USER_IDX;
4647     ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
4648 #else
4649     ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
4650     ctx->mmu_idx = (ctx->tb_flags & PSW_D
4651                     ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4652                     : ctx->tb_flags & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX);
4653 #endif
4654 
4655     cs_base = ctx->base.tb->cs_base;
4656     ctx->iaoq_first = ctx->base.pc_first + ctx->privilege;
4657 
4658     if (unlikely(cs_base & CS_BASE_DIFFSPACE)) {
4659         ctx->iaq_b.space = cpu_iasq_b;
4660         ctx->iaq_b.base = cpu_iaoq_b;
4661     } else if (unlikely(cs_base & CS_BASE_DIFFPAGE)) {
4662         ctx->iaq_b.base = cpu_iaoq_b;
4663     } else {
4664         uint64_t iaoq_f_pgofs = ctx->iaoq_first & ~TARGET_PAGE_MASK;
4665         uint64_t iaoq_b_pgofs = cs_base & ~TARGET_PAGE_MASK;
4666         ctx->iaq_b.disp = iaoq_b_pgofs - iaoq_f_pgofs;
4667     }
4668 
4669     ctx->zero = tcg_constant_i64(0);
4670 
4671     /* Bound the number of instructions by those left on the page.  */
4672     bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
4673     ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
4674 }
4675 
hppa_tr_tb_start(DisasContextBase * dcbase,CPUState * cs)4676 static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4677 {
4678     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4679 
4680     /* Seed the nullification status from PSW[N], as saved in TB->FLAGS.  */
4681     ctx->null_cond = cond_make_f();
4682     ctx->psw_n_nonzero = false;
4683     if (ctx->tb_flags & PSW_N) {
4684         ctx->null_cond.c = TCG_COND_ALWAYS;
4685         ctx->psw_n_nonzero = true;
4686     }
4687     ctx->null_lab = NULL;
4688 }
4689 
hppa_tr_insn_start(DisasContextBase * dcbase,CPUState * cs)4690 static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4691 {
4692     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4693     uint64_t iaoq_f, iaoq_b;
4694     int64_t diff;
4695 
4696     tcg_debug_assert(!iaqe_variable(&ctx->iaq_f));
4697 
4698     iaoq_f = ctx->iaoq_first + ctx->iaq_f.disp;
4699     if (iaqe_variable(&ctx->iaq_b)) {
4700         diff = INT32_MIN;
4701     } else {
4702         iaoq_b = ctx->iaoq_first + ctx->iaq_b.disp;
4703         diff = iaoq_b - iaoq_f;
4704         /* Direct branches can only produce a 24-bit displacement. */
4705         tcg_debug_assert(diff == (int32_t)diff);
4706         tcg_debug_assert(diff != INT32_MIN);
4707     }
4708 
4709     tcg_gen_insn_start(iaoq_f & ~TARGET_PAGE_MASK, diff, 0);
4710     ctx->insn_start_updated = false;
4711 }
4712 
hppa_tr_translate_insn(DisasContextBase * dcbase,CPUState * cs)4713 static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4714 {
4715     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4716     CPUHPPAState *env = cpu_env(cs);
4717     DisasJumpType ret;
4718 
4719     /* Execute one insn.  */
4720 #ifdef CONFIG_USER_ONLY
4721     if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
4722         do_page_zero(ctx);
4723         ret = ctx->base.is_jmp;
4724         assert(ret != DISAS_NEXT);
4725     } else
4726 #endif
4727     {
4728         /* Always fetch the insn, even if nullified, so that we check
4729            the page permissions for execute.  */
4730         uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
4731 
4732         /*
4733          * Set up the IA queue for the next insn.
4734          * This will be overwritten by a branch.
4735          */
4736         ctx->iaq_n = NULL;
4737         memset(&ctx->iaq_j, 0, sizeof(ctx->iaq_j));
4738         ctx->psw_b_next = false;
4739 
4740         if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4741             ctx->null_cond.c = TCG_COND_NEVER;
4742             ret = DISAS_NEXT;
4743         } else {
4744             ctx->insn = insn;
4745             if (!decode(ctx, insn)) {
4746                 gen_illegal(ctx);
4747             }
4748             ret = ctx->base.is_jmp;
4749             assert(ctx->null_lab == NULL);
4750         }
4751 
4752         if (ret != DISAS_NORETURN) {
4753             set_psw_xb(ctx, ctx->psw_b_next ? PSW_B : 0);
4754         }
4755     }
4756 
4757     /* If the TranslationBlock must end, do so. */
4758     ctx->base.pc_next += 4;
4759     if (ret != DISAS_NEXT) {
4760         return;
4761     }
4762     /* Note this also detects a priority change. */
4763     if (iaqe_variable(&ctx->iaq_b)
4764         || ctx->iaq_b.disp != ctx->iaq_f.disp + 4) {
4765         ctx->base.is_jmp = DISAS_IAQ_N_STALE;
4766         return;
4767     }
4768 
4769     /*
4770      * Advance the insn queue.
4771      * The only exit now is DISAS_TOO_MANY from the translator loop.
4772      */
4773     ctx->iaq_f.disp = ctx->iaq_b.disp;
4774     if (!ctx->iaq_n) {
4775         ctx->iaq_b.disp += 4;
4776         return;
4777     }
4778     /*
4779      * If IAQ_Next is variable in any way, we need to copy into the
4780      * IAQ_Back globals, in case the next insn raises an exception.
4781      */
4782     if (ctx->iaq_n->base) {
4783         copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaq_n);
4784         ctx->iaq_b.base = cpu_iaoq_b;
4785         ctx->iaq_b.disp = 0;
4786     } else {
4787         ctx->iaq_b.disp = ctx->iaq_n->disp;
4788     }
4789     if (ctx->iaq_n->space) {
4790         tcg_gen_mov_i64(cpu_iasq_b, ctx->iaq_n->space);
4791         ctx->iaq_b.space = cpu_iasq_b;
4792     }
4793 }
4794 
hppa_tr_tb_stop(DisasContextBase * dcbase,CPUState * cs)4795 static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4796 {
4797     DisasContext *ctx = container_of(dcbase, DisasContext, base);
4798     DisasJumpType is_jmp = ctx->base.is_jmp;
4799     /* Assume the insn queue has not been advanced. */
4800     DisasIAQE *f = &ctx->iaq_b;
4801     DisasIAQE *b = ctx->iaq_n;
4802 
4803     switch (is_jmp) {
4804     case DISAS_NORETURN:
4805         break;
4806     case DISAS_TOO_MANY:
4807         /* The insn queue has not been advanced. */
4808         f = &ctx->iaq_f;
4809         b = &ctx->iaq_b;
4810         /* FALLTHRU */
4811     case DISAS_IAQ_N_STALE:
4812         if (use_goto_tb(ctx, f, b)
4813             && (ctx->null_cond.c == TCG_COND_NEVER
4814                 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
4815             nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4816             gen_goto_tb(ctx, 0, f, b);
4817             break;
4818         }
4819         /* FALLTHRU */
4820     case DISAS_IAQ_N_STALE_EXIT:
4821         install_iaq_entries(ctx, f, b);
4822         nullify_save(ctx);
4823         if (is_jmp == DISAS_IAQ_N_STALE_EXIT) {
4824             tcg_gen_exit_tb(NULL, 0);
4825             break;
4826         }
4827         /* FALLTHRU */
4828     case DISAS_IAQ_N_UPDATED:
4829         tcg_gen_lookup_and_goto_ptr();
4830         break;
4831     case DISAS_EXIT:
4832         tcg_gen_exit_tb(NULL, 0);
4833         break;
4834     default:
4835         g_assert_not_reached();
4836     }
4837 
4838     for (DisasDelayException *e = ctx->delay_excp_list; e ; e = e->next) {
4839         gen_set_label(e->lab);
4840         if (e->set_n >= 0) {
4841             tcg_gen_movi_i64(cpu_psw_n, e->set_n);
4842         }
4843         if (e->set_iir) {
4844             tcg_gen_st_i64(tcg_constant_i64(e->insn), tcg_env,
4845                            offsetof(CPUHPPAState, cr[CR_IIR]));
4846         }
4847         install_iaq_entries(ctx, &e->iaq_f, &e->iaq_b);
4848         gen_excp_1(e->excp);
4849     }
4850 }
4851 
4852 #ifdef CONFIG_USER_ONLY
hppa_tr_disas_log(const DisasContextBase * dcbase,CPUState * cs,FILE * logfile)4853 static bool hppa_tr_disas_log(const DisasContextBase *dcbase,
4854                               CPUState *cs, FILE *logfile)
4855 {
4856     target_ulong pc = dcbase->pc_first;
4857 
4858     switch (pc) {
4859     case 0x00:
4860         fprintf(logfile, "IN:\n0x00000000:  (null)\n");
4861         return true;
4862     case 0xb0:
4863         fprintf(logfile, "IN:\n0x000000b0:  light-weight-syscall\n");
4864         return true;
4865     case 0xe0:
4866         fprintf(logfile, "IN:\n0x000000e0:  set-thread-pointer-syscall\n");
4867         return true;
4868     case 0x100:
4869         fprintf(logfile, "IN:\n0x00000100:  syscall\n");
4870         return true;
4871     }
4872     return false;
4873 }
4874 #endif
4875 
4876 static const TranslatorOps hppa_tr_ops = {
4877     .init_disas_context = hppa_tr_init_disas_context,
4878     .tb_start           = hppa_tr_tb_start,
4879     .insn_start         = hppa_tr_insn_start,
4880     .translate_insn     = hppa_tr_translate_insn,
4881     .tb_stop            = hppa_tr_tb_stop,
4882 #ifdef CONFIG_USER_ONLY
4883     .disas_log          = hppa_tr_disas_log,
4884 #endif
4885 };
4886 
hppa_translate_code(CPUState * cs,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)4887 void hppa_translate_code(CPUState *cs, TranslationBlock *tb,
4888                          int *max_insns, vaddr pc, void *host_pc)
4889 {
4890     DisasContext ctx = { };
4891     translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
4892 }
4893