1 /*
2 * m68k translation
3 *
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/translation-block.h"
24 #include "exec/target_page.h"
25 #include "tcg/tcg-op.h"
26 #include "qemu/log.h"
27 #include "qemu/qemu-print.h"
28 #include "exec/translator.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "exec/log.h"
32 #include "fpu/softfloat.h"
33 #include "semihosting/semihost.h"
34
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
37 #undef HELPER_H
38
39 //#define DEBUG_DISPATCH 1
40
41 #define DEFO32(name, offset) static TCGv QREG_##name;
42 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
43 #include "qregs.h.inc"
44 #undef DEFO32
45 #undef DEFO64
46
47 static TCGv_i32 cpu_halted;
48 static TCGv_i32 cpu_exception_index;
49
50 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
51 static TCGv cpu_dregs[8];
52 static TCGv cpu_aregs[8];
53 static TCGv_i64 cpu_macc[4];
54
55 #define REG(insn, pos) (((insn) >> (pos)) & 7)
56 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
57 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
58 #define MACREG(acc) cpu_macc[acc]
59 #define QREG_SP get_areg(s, 7)
60
61 static TCGv NULL_QREG;
62 #define IS_NULL_QREG(t) (t == NULL_QREG)
63 /* Used to distinguish stores from bad addressing modes. */
64 static TCGv store_dummy;
65
m68k_tcg_init(void)66 void m68k_tcg_init(void)
67 {
68 char *p;
69 int i;
70
71 #define DEFO32(name, offset) \
72 QREG_##name = tcg_global_mem_new_i32(tcg_env, \
73 offsetof(CPUM68KState, offset), #name);
74 #define DEFO64(name, offset) \
75 QREG_##name = tcg_global_mem_new_i64(tcg_env, \
76 offsetof(CPUM68KState, offset), #name);
77 #include "qregs.h.inc"
78 #undef DEFO32
79 #undef DEFO64
80
81 cpu_halted = tcg_global_mem_new_i32(tcg_env,
82 -offsetof(M68kCPU, env) +
83 offsetof(CPUState, halted), "HALTED");
84 cpu_exception_index = tcg_global_mem_new_i32(tcg_env,
85 -offsetof(M68kCPU, env) +
86 offsetof(CPUState, exception_index),
87 "EXCEPTION");
88
89 p = cpu_reg_names;
90 for (i = 0; i < 8; i++) {
91 sprintf(p, "D%d", i);
92 cpu_dregs[i] = tcg_global_mem_new(tcg_env,
93 offsetof(CPUM68KState, dregs[i]), p);
94 p += 3;
95 sprintf(p, "A%d", i);
96 cpu_aregs[i] = tcg_global_mem_new(tcg_env,
97 offsetof(CPUM68KState, aregs[i]), p);
98 p += 3;
99 }
100 for (i = 0; i < 4; i++) {
101 sprintf(p, "ACC%d", i);
102 cpu_macc[i] = tcg_global_mem_new_i64(tcg_env,
103 offsetof(CPUM68KState, macc[i]), p);
104 p += 5;
105 }
106
107 NULL_QREG = tcg_global_mem_new(tcg_env, -4, "NULL");
108 store_dummy = tcg_global_mem_new(tcg_env, -8, "NULL");
109 }
110
111 /* internal defines */
112 typedef struct DisasContext {
113 DisasContextBase base;
114 CPUM68KState *env;
115 target_ulong pc;
116 target_ulong pc_prev;
117 CCOp cc_op; /* Current CC operation */
118 int cc_op_synced;
119 TCGv_i64 mactmp;
120 int done_mac;
121 int writeback_mask;
122 TCGv writeback[8];
123 bool ss_active;
124 } DisasContext;
125
get_areg(DisasContext * s,unsigned regno)126 static TCGv get_areg(DisasContext *s, unsigned regno)
127 {
128 if (s->writeback_mask & (1 << regno)) {
129 return s->writeback[regno];
130 } else {
131 return cpu_aregs[regno];
132 }
133 }
134
delay_set_areg(DisasContext * s,unsigned regno,TCGv val,bool give_temp)135 static void delay_set_areg(DisasContext *s, unsigned regno,
136 TCGv val, bool give_temp)
137 {
138 if (s->writeback_mask & (1 << regno)) {
139 if (give_temp) {
140 s->writeback[regno] = val;
141 } else {
142 tcg_gen_mov_i32(s->writeback[regno], val);
143 }
144 } else {
145 s->writeback_mask |= 1 << regno;
146 if (give_temp) {
147 s->writeback[regno] = val;
148 } else {
149 TCGv tmp = tcg_temp_new();
150 s->writeback[regno] = tmp;
151 tcg_gen_mov_i32(tmp, val);
152 }
153 }
154 }
155
do_writebacks(DisasContext * s)156 static void do_writebacks(DisasContext *s)
157 {
158 unsigned mask = s->writeback_mask;
159 if (mask) {
160 s->writeback_mask = 0;
161 do {
162 unsigned regno = ctz32(mask);
163 tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
164 mask &= mask - 1;
165 } while (mask);
166 }
167 }
168
169 /* is_jmp field values */
170 #define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
171 #define DISAS_EXIT DISAS_TARGET_1 /* cpu state was modified dynamically */
172
173 #if defined(CONFIG_USER_ONLY)
174 #define IS_USER(s) 1
175 #else
176 #define IS_USER(s) (!(s->base.tb->flags & TB_FLAGS_MSR_S))
177 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
178 MMU_KERNEL_IDX : MMU_USER_IDX)
179 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
180 MMU_KERNEL_IDX : MMU_USER_IDX)
181 #endif
182
183 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
184
185 #ifdef DEBUG_DISPATCH
186 #define DISAS_INSN(name) \
187 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
188 uint16_t insn); \
189 static void disas_##name(CPUM68KState *env, DisasContext *s, \
190 uint16_t insn) \
191 { \
192 qemu_log("Dispatch " #name "\n"); \
193 real_disas_##name(env, s, insn); \
194 } \
195 static void real_disas_##name(CPUM68KState *env, DisasContext *s, \
196 uint16_t insn)
197 #else
198 #define DISAS_INSN(name) \
199 static void disas_##name(CPUM68KState *env, DisasContext *s, \
200 uint16_t insn)
201 #endif
202
203 static const uint8_t cc_op_live[CC_OP_NB] = {
204 [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
205 [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
206 [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
207 [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
208 [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
209 [CC_OP_LOGIC] = CCF_X | CCF_N
210 };
211
set_cc_op(DisasContext * s,CCOp op)212 static void set_cc_op(DisasContext *s, CCOp op)
213 {
214 CCOp old_op = s->cc_op;
215 int dead;
216
217 if (old_op == op) {
218 return;
219 }
220 s->cc_op = op;
221 s->cc_op_synced = 0;
222
223 /*
224 * Discard CC computation that will no longer be used.
225 * Note that X and N are never dead.
226 */
227 dead = cc_op_live[old_op] & ~cc_op_live[op];
228 if (dead & CCF_C) {
229 tcg_gen_discard_i32(QREG_CC_C);
230 }
231 if (dead & CCF_Z) {
232 tcg_gen_discard_i32(QREG_CC_Z);
233 }
234 if (dead & CCF_V) {
235 tcg_gen_discard_i32(QREG_CC_V);
236 }
237 }
238
239 /* Update the CPU env CC_OP state. */
update_cc_op(DisasContext * s)240 static void update_cc_op(DisasContext *s)
241 {
242 if (!s->cc_op_synced) {
243 s->cc_op_synced = 1;
244 tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
245 }
246 }
247
248 /* Generate a jump to an immediate address. */
gen_jmp_im(DisasContext * s,uint32_t dest)249 static void gen_jmp_im(DisasContext *s, uint32_t dest)
250 {
251 update_cc_op(s);
252 tcg_gen_movi_i32(QREG_PC, dest);
253 s->base.is_jmp = DISAS_JUMP;
254 }
255
256 /* Generate a jump to the address in qreg DEST. */
gen_jmp(DisasContext * s,TCGv dest)257 static void gen_jmp(DisasContext *s, TCGv dest)
258 {
259 update_cc_op(s);
260 tcg_gen_mov_i32(QREG_PC, dest);
261 s->base.is_jmp = DISAS_JUMP;
262 }
263
gen_raise_exception(int nr)264 static void gen_raise_exception(int nr)
265 {
266 gen_helper_raise_exception(tcg_env, tcg_constant_i32(nr));
267 }
268
gen_raise_exception_format2(DisasContext * s,int nr,target_ulong this_pc)269 static void gen_raise_exception_format2(DisasContext *s, int nr,
270 target_ulong this_pc)
271 {
272 /*
273 * Pass the address of the insn to the exception handler,
274 * for recording in the Format $2 (6-word) stack frame.
275 * Re-use mmu.ar for the purpose, since that's only valid
276 * after tlb_fill.
277 */
278 tcg_gen_st_i32(tcg_constant_i32(this_pc), tcg_env,
279 offsetof(CPUM68KState, mmu.ar));
280 gen_raise_exception(nr);
281 s->base.is_jmp = DISAS_NORETURN;
282 }
283
gen_exception(DisasContext * s,uint32_t dest,int nr)284 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
285 {
286 update_cc_op(s);
287 tcg_gen_movi_i32(QREG_PC, dest);
288
289 gen_raise_exception(nr);
290
291 s->base.is_jmp = DISAS_NORETURN;
292 }
293
gen_addr_fault(DisasContext * s)294 static inline void gen_addr_fault(DisasContext *s)
295 {
296 gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
297 }
298
299 /*
300 * Generate a load from the specified address. Narrow values are
301 * sign extended to full register width.
302 */
gen_load(DisasContext * s,int opsize,TCGv addr,int sign,int index)303 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
304 int sign, int index)
305 {
306 TCGv tmp = tcg_temp_new_i32();
307
308 switch (opsize) {
309 case OS_BYTE:
310 case OS_WORD:
311 case OS_LONG:
312 tcg_gen_qemu_ld_tl(tmp, addr, index,
313 opsize | (sign ? MO_SIGN : 0) | MO_TE);
314 break;
315 default:
316 g_assert_not_reached();
317 }
318 return tmp;
319 }
320
321 /* Generate a store. */
gen_store(DisasContext * s,int opsize,TCGv addr,TCGv val,int index)322 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
323 int index)
324 {
325 switch (opsize) {
326 case OS_BYTE:
327 case OS_WORD:
328 case OS_LONG:
329 tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
330 break;
331 default:
332 g_assert_not_reached();
333 }
334 }
335
336 typedef enum {
337 EA_STORE,
338 EA_LOADU,
339 EA_LOADS
340 } ea_what;
341
342 /*
343 * Generate an unsigned load if VAL is 0 a signed load if val is -1,
344 * otherwise generate a store.
345 */
gen_ldst(DisasContext * s,int opsize,TCGv addr,TCGv val,ea_what what,int index)346 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
347 ea_what what, int index)
348 {
349 if (what == EA_STORE) {
350 gen_store(s, opsize, addr, val, index);
351 return store_dummy;
352 } else {
353 return gen_load(s, opsize, addr, what == EA_LOADS, index);
354 }
355 }
356
357 /* Read a 16-bit immediate constant */
read_im16(CPUM68KState * env,DisasContext * s)358 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
359 {
360 uint16_t im;
361 im = translator_lduw(env, &s->base, s->pc);
362 s->pc += 2;
363 return im;
364 }
365
366 /* Read an 8-bit immediate constant */
read_im8(CPUM68KState * env,DisasContext * s)367 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
368 {
369 return read_im16(env, s);
370 }
371
372 /* Read a 32-bit immediate constant. */
read_im32(CPUM68KState * env,DisasContext * s)373 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
374 {
375 uint32_t im;
376 im = read_im16(env, s) << 16;
377 im |= 0xffff & read_im16(env, s);
378 return im;
379 }
380
381 /* Read a 64-bit immediate constant. */
read_im64(CPUM68KState * env,DisasContext * s)382 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
383 {
384 uint64_t im;
385 im = (uint64_t)read_im32(env, s) << 32;
386 im |= (uint64_t)read_im32(env, s);
387 return im;
388 }
389
390 /* Calculate and address index. */
gen_addr_index(DisasContext * s,uint16_t ext,TCGv tmp)391 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
392 {
393 TCGv add;
394 int scale;
395
396 add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
397 if ((ext & 0x800) == 0) {
398 tcg_gen_ext16s_i32(tmp, add);
399 add = tmp;
400 }
401 scale = (ext >> 9) & 3;
402 if (scale != 0) {
403 tcg_gen_shli_i32(tmp, add, scale);
404 add = tmp;
405 }
406 return add;
407 }
408
409 /*
410 * Handle a base + index + displacement effective address.
411 * A NULL_QREG base means pc-relative.
412 */
gen_lea_indexed(CPUM68KState * env,DisasContext * s,TCGv base)413 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
414 {
415 uint32_t offset;
416 uint16_t ext;
417 TCGv add;
418 TCGv tmp;
419 uint32_t bd, od;
420
421 offset = s->pc;
422 ext = read_im16(env, s);
423
424 if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
425 return NULL_QREG;
426
427 if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
428 !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
429 ext &= ~(3 << 9);
430 }
431
432 if (ext & 0x100) {
433 /* full extension word format */
434 if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
435 return NULL_QREG;
436
437 if ((ext & 0x30) > 0x10) {
438 /* base displacement */
439 if ((ext & 0x30) == 0x20) {
440 bd = (int16_t)read_im16(env, s);
441 } else {
442 bd = read_im32(env, s);
443 }
444 } else {
445 bd = 0;
446 }
447 tmp = tcg_temp_new();
448 if ((ext & 0x44) == 0) {
449 /* pre-index */
450 add = gen_addr_index(s, ext, tmp);
451 } else {
452 add = NULL_QREG;
453 }
454 if ((ext & 0x80) == 0) {
455 /* base not suppressed */
456 if (IS_NULL_QREG(base)) {
457 base = tcg_constant_i32(offset + bd);
458 bd = 0;
459 }
460 if (!IS_NULL_QREG(add)) {
461 tcg_gen_add_i32(tmp, add, base);
462 add = tmp;
463 } else {
464 add = base;
465 }
466 }
467 if (!IS_NULL_QREG(add)) {
468 if (bd != 0) {
469 tcg_gen_addi_i32(tmp, add, bd);
470 add = tmp;
471 }
472 } else {
473 add = tcg_constant_i32(bd);
474 }
475 if ((ext & 3) != 0) {
476 /* memory indirect */
477 base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
478 if ((ext & 0x44) == 4) {
479 add = gen_addr_index(s, ext, tmp);
480 tcg_gen_add_i32(tmp, add, base);
481 add = tmp;
482 } else {
483 add = base;
484 }
485 if ((ext & 3) > 1) {
486 /* outer displacement */
487 if ((ext & 3) == 2) {
488 od = (int16_t)read_im16(env, s);
489 } else {
490 od = read_im32(env, s);
491 }
492 } else {
493 od = 0;
494 }
495 if (od != 0) {
496 tcg_gen_addi_i32(tmp, add, od);
497 add = tmp;
498 }
499 }
500 } else {
501 /* brief extension word format */
502 tmp = tcg_temp_new();
503 add = gen_addr_index(s, ext, tmp);
504 if (!IS_NULL_QREG(base)) {
505 tcg_gen_add_i32(tmp, add, base);
506 if ((int8_t)ext)
507 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
508 } else {
509 tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
510 }
511 add = tmp;
512 }
513 return add;
514 }
515
516 /* Sign or zero extend a value. */
517
gen_ext(TCGv res,TCGv val,int opsize,int sign)518 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
519 {
520 switch (opsize) {
521 case OS_BYTE:
522 case OS_WORD:
523 case OS_LONG:
524 tcg_gen_ext_i32(res, val, opsize | (sign ? MO_SIGN : 0));
525 break;
526 default:
527 g_assert_not_reached();
528 }
529 }
530
531 /* Evaluate all the CC flags. */
532
gen_flush_flags(DisasContext * s)533 static void gen_flush_flags(DisasContext *s)
534 {
535 TCGv t0, t1;
536
537 switch (s->cc_op) {
538 case CC_OP_FLAGS:
539 return;
540
541 case CC_OP_ADDB:
542 case CC_OP_ADDW:
543 case CC_OP_ADDL:
544 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
545 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
546 /* Compute signed overflow for addition. */
547 t0 = tcg_temp_new();
548 t1 = tcg_temp_new();
549 tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
550 gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
551 tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
552 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
553 tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
554 break;
555
556 case CC_OP_SUBB:
557 case CC_OP_SUBW:
558 case CC_OP_SUBL:
559 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
560 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
561 /* Compute signed overflow for subtraction. */
562 t0 = tcg_temp_new();
563 t1 = tcg_temp_new();
564 tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
565 gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
566 tcg_gen_xor_i32(t1, QREG_CC_N, t0);
567 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
568 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
569 break;
570
571 case CC_OP_CMPB:
572 case CC_OP_CMPW:
573 case CC_OP_CMPL:
574 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
575 tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
576 gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
577 /* Compute signed overflow for subtraction. */
578 t0 = tcg_temp_new();
579 tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
580 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
581 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
582 tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
583 break;
584
585 case CC_OP_LOGIC:
586 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
587 tcg_gen_movi_i32(QREG_CC_C, 0);
588 tcg_gen_movi_i32(QREG_CC_V, 0);
589 break;
590
591 case CC_OP_DYNAMIC:
592 gen_helper_flush_flags(tcg_env, QREG_CC_OP);
593 s->cc_op_synced = 1;
594 break;
595
596 default:
597 gen_helper_flush_flags(tcg_env, tcg_constant_i32(s->cc_op));
598 s->cc_op_synced = 1;
599 break;
600 }
601
602 /* Note that flush_flags also assigned to env->cc_op. */
603 s->cc_op = CC_OP_FLAGS;
604 }
605
gen_extend(DisasContext * s,TCGv val,int opsize,int sign)606 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
607 {
608 TCGv tmp;
609
610 if (opsize == OS_LONG) {
611 tmp = val;
612 } else {
613 tmp = tcg_temp_new();
614 gen_ext(tmp, val, opsize, sign);
615 }
616
617 return tmp;
618 }
619
gen_logic_cc(DisasContext * s,TCGv val,int opsize)620 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
621 {
622 gen_ext(QREG_CC_N, val, opsize, 1);
623 set_cc_op(s, CC_OP_LOGIC);
624 }
625
gen_update_cc_cmp(DisasContext * s,TCGv dest,TCGv src,int opsize)626 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
627 {
628 tcg_gen_mov_i32(QREG_CC_N, dest);
629 tcg_gen_mov_i32(QREG_CC_V, src);
630 set_cc_op(s, CC_OP_CMPB + opsize);
631 }
632
gen_update_cc_add(TCGv dest,TCGv src,int opsize)633 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
634 {
635 gen_ext(QREG_CC_N, dest, opsize, 1);
636 tcg_gen_mov_i32(QREG_CC_V, src);
637 }
638
opsize_bytes(int opsize)639 static inline int opsize_bytes(int opsize)
640 {
641 switch (opsize) {
642 case OS_BYTE: return 1;
643 case OS_WORD: return 2;
644 case OS_LONG: return 4;
645 case OS_SINGLE: return 4;
646 case OS_DOUBLE: return 8;
647 case OS_EXTENDED: return 12;
648 case OS_PACKED: return 12;
649 default:
650 g_assert_not_reached();
651 }
652 }
653
insn_opsize(int insn)654 static inline int insn_opsize(int insn)
655 {
656 switch ((insn >> 6) & 3) {
657 case 0: return OS_BYTE;
658 case 1: return OS_WORD;
659 case 2: return OS_LONG;
660 default:
661 g_assert_not_reached();
662 }
663 }
664
ext_opsize(int ext,int pos)665 static inline int ext_opsize(int ext, int pos)
666 {
667 switch ((ext >> pos) & 7) {
668 case 0: return OS_LONG;
669 case 1: return OS_SINGLE;
670 case 2: return OS_EXTENDED;
671 case 3: return OS_PACKED;
672 case 4: return OS_WORD;
673 case 5: return OS_DOUBLE;
674 case 6: return OS_BYTE;
675 default:
676 g_assert_not_reached();
677 }
678 }
679
680 /*
681 * Assign value to a register. If the width is less than the register width
682 * only the low part of the register is set.
683 */
gen_partset_reg(int opsize,TCGv reg,TCGv val)684 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
685 {
686 switch (opsize) {
687 case OS_BYTE:
688 tcg_gen_deposit_i32(reg, reg, val, 0, 8);
689 break;
690 case OS_WORD:
691 tcg_gen_deposit_i32(reg, reg, val, 0, 16);
692 break;
693 case OS_LONG:
694 case OS_SINGLE:
695 tcg_gen_mov_i32(reg, val);
696 break;
697 default:
698 g_assert_not_reached();
699 }
700 }
701
702 /*
703 * Generate code for an "effective address". Does not adjust the base
704 * register for autoincrement addressing modes.
705 */
gen_lea_mode(CPUM68KState * env,DisasContext * s,int mode,int reg0,int opsize)706 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
707 int mode, int reg0, int opsize)
708 {
709 TCGv reg;
710 TCGv tmp;
711 uint16_t ext;
712 uint32_t offset;
713
714 switch (mode) {
715 case 0: /* Data register direct. */
716 case 1: /* Address register direct. */
717 return NULL_QREG;
718 case 3: /* Indirect postincrement. */
719 if (opsize == OS_UNSIZED) {
720 return NULL_QREG;
721 }
722 /* fallthru */
723 case 2: /* Indirect register */
724 tmp = tcg_temp_new();
725 tcg_gen_mov_i32(tmp, get_areg(s, reg0));
726 return tmp;
727 case 4: /* Indirect predecrememnt. */
728 if (opsize == OS_UNSIZED) {
729 return NULL_QREG;
730 }
731 reg = get_areg(s, reg0);
732 tmp = tcg_temp_new();
733 if (reg0 == 7 && opsize == OS_BYTE &&
734 m68k_feature(s->env, M68K_FEATURE_M68K)) {
735 tcg_gen_subi_i32(tmp, reg, 2);
736 } else {
737 tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
738 }
739 return tmp;
740 case 5: /* Indirect displacement. */
741 reg = get_areg(s, reg0);
742 tmp = tcg_temp_new();
743 ext = read_im16(env, s);
744 tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
745 return tmp;
746 case 6: /* Indirect index + displacement. */
747 reg = get_areg(s, reg0);
748 return gen_lea_indexed(env, s, reg);
749 case 7: /* Other */
750 switch (reg0) {
751 case 0: /* Absolute short. */
752 offset = (int16_t)read_im16(env, s);
753 break;
754 case 1: /* Absolute long. */
755 offset = read_im32(env, s);
756 break;
757 case 2: /* pc displacement */
758 offset = s->pc;
759 offset += (int16_t)read_im16(env, s);
760 break;
761 case 3: /* pc index+displacement. */
762 return gen_lea_indexed(env, s, NULL_QREG);
763 case 4: /* Immediate. */
764 default:
765 return NULL_QREG;
766 }
767 tmp = tcg_temp_new();
768 tcg_gen_movi_i32(tmp, offset);
769 return tmp;
770 }
771 /* Should never happen. */
772 return NULL_QREG;
773 }
774
gen_lea(CPUM68KState * env,DisasContext * s,uint16_t insn,int opsize)775 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
776 int opsize)
777 {
778 int mode = extract32(insn, 3, 3);
779 int reg0 = REG(insn, 0);
780 return gen_lea_mode(env, s, mode, reg0, opsize);
781 }
782
783 /*
784 * Generate code to load/store a value from/into an EA. If WHAT > 0 this is
785 * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
786 * ADDRP is non-null for readwrite operands.
787 */
gen_ea_mode(CPUM68KState * env,DisasContext * s,int mode,int reg0,int opsize,TCGv val,TCGv * addrp,ea_what what,int index)788 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
789 int opsize, TCGv val, TCGv *addrp, ea_what what,
790 int index)
791 {
792 TCGv reg, tmp, result;
793 int32_t offset;
794
795 switch (mode) {
796 case 0: /* Data register direct. */
797 reg = cpu_dregs[reg0];
798 if (what == EA_STORE) {
799 gen_partset_reg(opsize, reg, val);
800 return store_dummy;
801 } else {
802 return gen_extend(s, reg, opsize, what == EA_LOADS);
803 }
804 case 1: /* Address register direct. */
805 reg = get_areg(s, reg0);
806 if (what == EA_STORE) {
807 tcg_gen_mov_i32(reg, val);
808 return store_dummy;
809 } else {
810 return gen_extend(s, reg, opsize, what == EA_LOADS);
811 }
812 case 2: /* Indirect register */
813 reg = get_areg(s, reg0);
814 return gen_ldst(s, opsize, reg, val, what, index);
815 case 3: /* Indirect postincrement. */
816 reg = get_areg(s, reg0);
817 result = gen_ldst(s, opsize, reg, val, what, index);
818 if (what == EA_STORE || !addrp) {
819 tmp = tcg_temp_new();
820 if (reg0 == 7 && opsize == OS_BYTE &&
821 m68k_feature(s->env, M68K_FEATURE_M68K)) {
822 tcg_gen_addi_i32(tmp, reg, 2);
823 } else {
824 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
825 }
826 delay_set_areg(s, reg0, tmp, true);
827 }
828 return result;
829 case 4: /* Indirect predecrememnt. */
830 if (addrp && what == EA_STORE) {
831 tmp = *addrp;
832 } else {
833 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
834 if (IS_NULL_QREG(tmp)) {
835 return tmp;
836 }
837 if (addrp) {
838 *addrp = tmp;
839 }
840 }
841 result = gen_ldst(s, opsize, tmp, val, what, index);
842 if (what == EA_STORE || !addrp) {
843 delay_set_areg(s, reg0, tmp, false);
844 }
845 return result;
846 case 5: /* Indirect displacement. */
847 case 6: /* Indirect index + displacement. */
848 do_indirect:
849 if (addrp && what == EA_STORE) {
850 tmp = *addrp;
851 } else {
852 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
853 if (IS_NULL_QREG(tmp)) {
854 return tmp;
855 }
856 if (addrp) {
857 *addrp = tmp;
858 }
859 }
860 return gen_ldst(s, opsize, tmp, val, what, index);
861 case 7: /* Other */
862 switch (reg0) {
863 case 0: /* Absolute short. */
864 case 1: /* Absolute long. */
865 case 2: /* pc displacement */
866 case 3: /* pc index+displacement. */
867 goto do_indirect;
868 case 4: /* Immediate. */
869 /* Sign extend values for consistency. */
870 switch (opsize) {
871 case OS_BYTE:
872 if (what == EA_LOADS) {
873 offset = (int8_t)read_im8(env, s);
874 } else {
875 offset = read_im8(env, s);
876 }
877 break;
878 case OS_WORD:
879 if (what == EA_LOADS) {
880 offset = (int16_t)read_im16(env, s);
881 } else {
882 offset = read_im16(env, s);
883 }
884 break;
885 case OS_LONG:
886 offset = read_im32(env, s);
887 break;
888 default:
889 g_assert_not_reached();
890 }
891 return tcg_constant_i32(offset);
892 default:
893 return NULL_QREG;
894 }
895 }
896 /* Should never happen. */
897 return NULL_QREG;
898 }
899
gen_ea(CPUM68KState * env,DisasContext * s,uint16_t insn,int opsize,TCGv val,TCGv * addrp,ea_what what,int index)900 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
901 int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
902 {
903 int mode = extract32(insn, 3, 3);
904 int reg0 = REG(insn, 0);
905 return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
906 }
907
gen_fp_ptr(int freg)908 static TCGv_ptr gen_fp_ptr(int freg)
909 {
910 TCGv_ptr fp = tcg_temp_new_ptr();
911 tcg_gen_addi_ptr(fp, tcg_env, offsetof(CPUM68KState, fregs[freg]));
912 return fp;
913 }
914
gen_fp_result_ptr(void)915 static TCGv_ptr gen_fp_result_ptr(void)
916 {
917 TCGv_ptr fp = tcg_temp_new_ptr();
918 tcg_gen_addi_ptr(fp, tcg_env, offsetof(CPUM68KState, fp_result));
919 return fp;
920 }
921
gen_fp_move(TCGv_ptr dest,TCGv_ptr src)922 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
923 {
924 TCGv t32;
925 TCGv_i64 t64;
926
927 t32 = tcg_temp_new();
928 tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
929 tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
930
931 t64 = tcg_temp_new_i64();
932 tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
933 tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
934 }
935
gen_load_fp(DisasContext * s,int opsize,TCGv addr,TCGv_ptr fp,int index)936 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
937 int index)
938 {
939 TCGv tmp;
940 TCGv_i64 t64;
941
942 t64 = tcg_temp_new_i64();
943 tmp = tcg_temp_new();
944 switch (opsize) {
945 case OS_BYTE:
946 case OS_WORD:
947 case OS_LONG:
948 tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
949 gen_helper_exts32(tcg_env, fp, tmp);
950 break;
951 case OS_SINGLE:
952 tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
953 gen_helper_extf32(tcg_env, fp, tmp);
954 break;
955 case OS_DOUBLE:
956 tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
957 gen_helper_extf64(tcg_env, fp, t64);
958 break;
959 case OS_EXTENDED:
960 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
961 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
962 break;
963 }
964 tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
965 tcg_gen_shri_i32(tmp, tmp, 16);
966 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
967 tcg_gen_addi_i32(tmp, addr, 4);
968 tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
969 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
970 break;
971 case OS_PACKED:
972 /*
973 * unimplemented data type on 68040/ColdFire
974 * FIXME if needed for another FPU
975 */
976 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
977 break;
978 default:
979 g_assert_not_reached();
980 }
981 }
982
gen_store_fp(DisasContext * s,int opsize,TCGv addr,TCGv_ptr fp,int index)983 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
984 int index)
985 {
986 TCGv tmp;
987 TCGv_i64 t64;
988
989 t64 = tcg_temp_new_i64();
990 tmp = tcg_temp_new();
991 switch (opsize) {
992 case OS_BYTE:
993 case OS_WORD:
994 case OS_LONG:
995 gen_helper_reds32(tmp, tcg_env, fp);
996 tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
997 break;
998 case OS_SINGLE:
999 gen_helper_redf32(tmp, tcg_env, fp);
1000 tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
1001 break;
1002 case OS_DOUBLE:
1003 gen_helper_redf64(t64, tcg_env, fp);
1004 tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1005 break;
1006 case OS_EXTENDED:
1007 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1008 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1009 break;
1010 }
1011 tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1012 tcg_gen_shli_i32(tmp, tmp, 16);
1013 tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1014 tcg_gen_addi_i32(tmp, addr, 4);
1015 tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1016 tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1017 break;
1018 case OS_PACKED:
1019 /*
1020 * unimplemented data type on 68040/ColdFire
1021 * FIXME if needed for another FPU
1022 */
1023 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1024 break;
1025 default:
1026 g_assert_not_reached();
1027 }
1028 }
1029
gen_ldst_fp(DisasContext * s,int opsize,TCGv addr,TCGv_ptr fp,ea_what what,int index)1030 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1031 TCGv_ptr fp, ea_what what, int index)
1032 {
1033 if (what == EA_STORE) {
1034 gen_store_fp(s, opsize, addr, fp, index);
1035 } else {
1036 gen_load_fp(s, opsize, addr, fp, index);
1037 }
1038 }
1039
gen_ea_mode_fp(CPUM68KState * env,DisasContext * s,int mode,int reg0,int opsize,TCGv_ptr fp,ea_what what,int index)1040 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1041 int reg0, int opsize, TCGv_ptr fp, ea_what what,
1042 int index)
1043 {
1044 TCGv reg, addr, tmp;
1045 TCGv_i64 t64;
1046
1047 switch (mode) {
1048 case 0: /* Data register direct. */
1049 reg = cpu_dregs[reg0];
1050 if (what == EA_STORE) {
1051 switch (opsize) {
1052 case OS_BYTE:
1053 case OS_WORD:
1054 case OS_LONG:
1055 gen_helper_reds32(reg, tcg_env, fp);
1056 break;
1057 case OS_SINGLE:
1058 gen_helper_redf32(reg, tcg_env, fp);
1059 break;
1060 default:
1061 g_assert_not_reached();
1062 }
1063 } else {
1064 tmp = tcg_temp_new();
1065 switch (opsize) {
1066 case OS_BYTE:
1067 case OS_WORD:
1068 case OS_LONG:
1069 tcg_gen_ext_i32(tmp, reg, opsize | MO_SIGN);
1070 gen_helper_exts32(tcg_env, fp, tmp);
1071 break;
1072 case OS_SINGLE:
1073 gen_helper_extf32(tcg_env, fp, reg);
1074 break;
1075 default:
1076 g_assert_not_reached();
1077 }
1078 }
1079 return 0;
1080 case 1: /* Address register direct. */
1081 return -1;
1082 case 2: /* Indirect register */
1083 addr = get_areg(s, reg0);
1084 gen_ldst_fp(s, opsize, addr, fp, what, index);
1085 return 0;
1086 case 3: /* Indirect postincrement. */
1087 addr = cpu_aregs[reg0];
1088 gen_ldst_fp(s, opsize, addr, fp, what, index);
1089 tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1090 return 0;
1091 case 4: /* Indirect predecrememnt. */
1092 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1093 if (IS_NULL_QREG(addr)) {
1094 return -1;
1095 }
1096 gen_ldst_fp(s, opsize, addr, fp, what, index);
1097 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1098 return 0;
1099 case 5: /* Indirect displacement. */
1100 case 6: /* Indirect index + displacement. */
1101 do_indirect:
1102 addr = gen_lea_mode(env, s, mode, reg0, opsize);
1103 if (IS_NULL_QREG(addr)) {
1104 return -1;
1105 }
1106 gen_ldst_fp(s, opsize, addr, fp, what, index);
1107 return 0;
1108 case 7: /* Other */
1109 switch (reg0) {
1110 case 0: /* Absolute short. */
1111 case 1: /* Absolute long. */
1112 case 2: /* pc displacement */
1113 case 3: /* pc index+displacement. */
1114 goto do_indirect;
1115 case 4: /* Immediate. */
1116 if (what == EA_STORE) {
1117 return -1;
1118 }
1119 switch (opsize) {
1120 case OS_BYTE:
1121 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1122 gen_helper_exts32(tcg_env, fp, tmp);
1123 break;
1124 case OS_WORD:
1125 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1126 gen_helper_exts32(tcg_env, fp, tmp);
1127 break;
1128 case OS_LONG:
1129 tmp = tcg_constant_i32(read_im32(env, s));
1130 gen_helper_exts32(tcg_env, fp, tmp);
1131 break;
1132 case OS_SINGLE:
1133 tmp = tcg_constant_i32(read_im32(env, s));
1134 gen_helper_extf32(tcg_env, fp, tmp);
1135 break;
1136 case OS_DOUBLE:
1137 t64 = tcg_constant_i64(read_im64(env, s));
1138 gen_helper_extf64(tcg_env, fp, t64);
1139 break;
1140 case OS_EXTENDED:
1141 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1142 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1143 break;
1144 }
1145 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1146 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1147 t64 = tcg_constant_i64(read_im64(env, s));
1148 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1149 break;
1150 case OS_PACKED:
1151 /*
1152 * unimplemented data type on 68040/ColdFire
1153 * FIXME if needed for another FPU
1154 */
1155 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1156 break;
1157 default:
1158 g_assert_not_reached();
1159 }
1160 return 0;
1161 default:
1162 return -1;
1163 }
1164 }
1165 return -1;
1166 }
1167
gen_ea_fp(CPUM68KState * env,DisasContext * s,uint16_t insn,int opsize,TCGv_ptr fp,ea_what what,int index)1168 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1169 int opsize, TCGv_ptr fp, ea_what what, int index)
1170 {
1171 int mode = extract32(insn, 3, 3);
1172 int reg0 = REG(insn, 0);
1173 return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1174 }
1175
1176 typedef struct {
1177 TCGCond tcond;
1178 TCGv v1;
1179 TCGv v2;
1180 } DisasCompare;
1181
gen_cc_cond(DisasCompare * c,DisasContext * s,int cond)1182 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1183 {
1184 TCGv tmp, tmp2;
1185 TCGCond tcond;
1186 CCOp op = s->cc_op;
1187
1188 /* The CC_OP_CMP form can handle most normal comparisons directly. */
1189 if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1190 c->v1 = QREG_CC_N;
1191 c->v2 = QREG_CC_V;
1192 switch (cond) {
1193 case 2: /* HI */
1194 case 3: /* LS */
1195 tcond = TCG_COND_LEU;
1196 goto done;
1197 case 4: /* CC */
1198 case 5: /* CS */
1199 tcond = TCG_COND_LTU;
1200 goto done;
1201 case 6: /* NE */
1202 case 7: /* EQ */
1203 tcond = TCG_COND_EQ;
1204 goto done;
1205 case 10: /* PL */
1206 case 11: /* MI */
1207 c->v2 = tcg_constant_i32(0);
1208 c->v1 = tmp = tcg_temp_new();
1209 tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1210 gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1211 /* fallthru */
1212 case 12: /* GE */
1213 case 13: /* LT */
1214 tcond = TCG_COND_LT;
1215 goto done;
1216 case 14: /* GT */
1217 case 15: /* LE */
1218 tcond = TCG_COND_LE;
1219 goto done;
1220 }
1221 }
1222
1223 c->v2 = tcg_constant_i32(0);
1224
1225 switch (cond) {
1226 case 0: /* T */
1227 case 1: /* F */
1228 c->v1 = c->v2;
1229 tcond = TCG_COND_NEVER;
1230 goto done;
1231 case 14: /* GT (!(Z || (N ^ V))) */
1232 case 15: /* LE (Z || (N ^ V)) */
1233 /*
1234 * Logic operations clear V, which simplifies LE to (Z || N),
1235 * and since Z and N are co-located, this becomes a normal
1236 * comparison vs N.
1237 */
1238 if (op == CC_OP_LOGIC) {
1239 c->v1 = QREG_CC_N;
1240 tcond = TCG_COND_LE;
1241 goto done;
1242 }
1243 break;
1244 case 12: /* GE (!(N ^ V)) */
1245 case 13: /* LT (N ^ V) */
1246 /* Logic operations clear V, which simplifies this to N. */
1247 if (op != CC_OP_LOGIC) {
1248 break;
1249 }
1250 /* fallthru */
1251 case 10: /* PL (!N) */
1252 case 11: /* MI (N) */
1253 /* Several cases represent N normally. */
1254 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1255 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1256 op == CC_OP_LOGIC) {
1257 c->v1 = QREG_CC_N;
1258 tcond = TCG_COND_LT;
1259 goto done;
1260 }
1261 break;
1262 case 6: /* NE (!Z) */
1263 case 7: /* EQ (Z) */
1264 /* Some cases fold Z into N. */
1265 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1266 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1267 op == CC_OP_LOGIC) {
1268 tcond = TCG_COND_EQ;
1269 c->v1 = QREG_CC_N;
1270 goto done;
1271 }
1272 break;
1273 case 4: /* CC (!C) */
1274 case 5: /* CS (C) */
1275 /* Some cases fold C into X. */
1276 if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1277 op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1278 tcond = TCG_COND_NE;
1279 c->v1 = QREG_CC_X;
1280 goto done;
1281 }
1282 /* fallthru */
1283 case 8: /* VC (!V) */
1284 case 9: /* VS (V) */
1285 /* Logic operations clear V and C. */
1286 if (op == CC_OP_LOGIC) {
1287 tcond = TCG_COND_NEVER;
1288 c->v1 = c->v2;
1289 goto done;
1290 }
1291 break;
1292 }
1293
1294 /* Otherwise, flush flag state to CC_OP_FLAGS. */
1295 gen_flush_flags(s);
1296
1297 switch (cond) {
1298 case 0: /* T */
1299 case 1: /* F */
1300 default:
1301 /* Invalid, or handled above. */
1302 abort();
1303 case 2: /* HI (!C && !Z) -> !(C || Z)*/
1304 case 3: /* LS (C || Z) */
1305 c->v1 = tmp = tcg_temp_new();
1306 tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1307 tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1308 tcond = TCG_COND_NE;
1309 break;
1310 case 4: /* CC (!C) */
1311 case 5: /* CS (C) */
1312 c->v1 = QREG_CC_C;
1313 tcond = TCG_COND_NE;
1314 break;
1315 case 6: /* NE (!Z) */
1316 case 7: /* EQ (Z) */
1317 c->v1 = QREG_CC_Z;
1318 tcond = TCG_COND_EQ;
1319 break;
1320 case 8: /* VC (!V) */
1321 case 9: /* VS (V) */
1322 c->v1 = QREG_CC_V;
1323 tcond = TCG_COND_LT;
1324 break;
1325 case 10: /* PL (!N) */
1326 case 11: /* MI (N) */
1327 c->v1 = QREG_CC_N;
1328 tcond = TCG_COND_LT;
1329 break;
1330 case 12: /* GE (!(N ^ V)) */
1331 case 13: /* LT (N ^ V) */
1332 c->v1 = tmp = tcg_temp_new();
1333 tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1334 tcond = TCG_COND_LT;
1335 break;
1336 case 14: /* GT (!(Z || (N ^ V))) */
1337 case 15: /* LE (Z || (N ^ V)) */
1338 c->v1 = tmp = tcg_temp_new();
1339 tcg_gen_negsetcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1340 tmp2 = tcg_temp_new();
1341 tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1342 tcg_gen_or_i32(tmp, tmp, tmp2);
1343 tcond = TCG_COND_LT;
1344 break;
1345 }
1346
1347 done:
1348 if ((cond & 1) == 0) {
1349 tcond = tcg_invert_cond(tcond);
1350 }
1351 c->tcond = tcond;
1352 }
1353
gen_jmpcc(DisasContext * s,int cond,TCGLabel * l1)1354 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1355 {
1356 DisasCompare c;
1357
1358 gen_cc_cond(&c, s, cond);
1359 update_cc_op(s);
1360 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1361 }
1362
1363 /* Force a TB lookup after an instruction that changes the CPU state. */
gen_exit_tb(DisasContext * s)1364 static void gen_exit_tb(DisasContext *s)
1365 {
1366 update_cc_op(s);
1367 tcg_gen_movi_i32(QREG_PC, s->pc);
1368 s->base.is_jmp = DISAS_EXIT;
1369 }
1370
1371 #define SRC_EA(env, result, opsize, op_sign, addrp) do { \
1372 result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp, \
1373 op_sign ? EA_LOADS : EA_LOADU, IS_USER(s)); \
1374 if (IS_NULL_QREG(result)) { \
1375 gen_addr_fault(s); \
1376 return; \
1377 } \
1378 } while (0)
1379
1380 #define DEST_EA(env, insn, opsize, val, addrp) do { \
1381 TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp, \
1382 EA_STORE, IS_USER(s)); \
1383 if (IS_NULL_QREG(ea_result)) { \
1384 gen_addr_fault(s); \
1385 return; \
1386 } \
1387 } while (0)
1388
1389 /* Generate a jump to an immediate address. */
gen_jmp_tb(DisasContext * s,int n,target_ulong dest,target_ulong src)1390 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1391 target_ulong src)
1392 {
1393 if (unlikely(s->ss_active)) {
1394 update_cc_op(s);
1395 tcg_gen_movi_i32(QREG_PC, dest);
1396 gen_raise_exception_format2(s, EXCP_TRACE, src);
1397 } else if (translator_use_goto_tb(&s->base, dest)) {
1398 tcg_gen_goto_tb(n);
1399 tcg_gen_movi_i32(QREG_PC, dest);
1400 tcg_gen_exit_tb(s->base.tb, n);
1401 } else {
1402 gen_jmp_im(s, dest);
1403 tcg_gen_exit_tb(NULL, 0);
1404 }
1405 s->base.is_jmp = DISAS_NORETURN;
1406 }
1407
1408 #ifndef CONFIG_USER_ONLY
semihosting_test(DisasContext * s)1409 static bool semihosting_test(DisasContext *s)
1410 {
1411 uint32_t test;
1412
1413 if (!semihosting_enabled(IS_USER(s))) {
1414 return false;
1415 }
1416
1417 /*
1418 * "The semihosting instruction is immediately preceded by a
1419 * nop aligned to a 4-byte boundary..."
1420 * The preceding 2-byte (aligned) nop plus the 2-byte halt/bkpt
1421 * means that we have advanced 4 bytes from the required nop.
1422 */
1423 if (s->pc % 4 != 0) {
1424 return false;
1425 }
1426 test = translator_lduw(s->env, &s->base, s->pc - 4);
1427 if (test != 0x4e71) {
1428 return false;
1429 }
1430 /* "... and followed by an invalid sentinel instruction movec %sp,0." */
1431 test = translator_ldl(s->env, &s->base, s->pc);
1432 if (test != 0x4e7bf000) {
1433 return false;
1434 }
1435
1436 /* Consume the sentinel. */
1437 s->pc += 4;
1438 return true;
1439 }
1440 #endif /* !CONFIG_USER_ONLY */
1441
DISAS_INSN(scc)1442 DISAS_INSN(scc)
1443 {
1444 DisasCompare c;
1445 int cond;
1446 TCGv tmp;
1447
1448 cond = (insn >> 8) & 0xf;
1449 gen_cc_cond(&c, s, cond);
1450
1451 tmp = tcg_temp_new();
1452 tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
1453
1454 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1455 }
1456
DISAS_INSN(dbcc)1457 DISAS_INSN(dbcc)
1458 {
1459 TCGLabel *l1;
1460 TCGv reg;
1461 TCGv tmp;
1462 int16_t offset;
1463 uint32_t base;
1464
1465 reg = DREG(insn, 0);
1466 base = s->pc;
1467 offset = (int16_t)read_im16(env, s);
1468 l1 = gen_new_label();
1469 gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1470
1471 tmp = tcg_temp_new();
1472 tcg_gen_ext16s_i32(tmp, reg);
1473 tcg_gen_addi_i32(tmp, tmp, -1);
1474 gen_partset_reg(OS_WORD, reg, tmp);
1475 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1476 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1477 gen_set_label(l1);
1478 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1479 }
1480
DISAS_INSN(undef_mac)1481 DISAS_INSN(undef_mac)
1482 {
1483 gen_exception(s, s->base.pc_next, EXCP_LINEA);
1484 }
1485
DISAS_INSN(undef_fpu)1486 DISAS_INSN(undef_fpu)
1487 {
1488 gen_exception(s, s->base.pc_next, EXCP_LINEF);
1489 }
1490
DISAS_INSN(undef)1491 DISAS_INSN(undef)
1492 {
1493 /*
1494 * ??? This is both instructions that are as yet unimplemented
1495 * for the 680x0 series, as well as those that are implemented
1496 * but actually illegal for CPU32 or pre-68020.
1497 */
1498 qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %" VADDR_PRIx "\n",
1499 insn, s->base.pc_next);
1500 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1501 }
1502
DISAS_INSN(mulw)1503 DISAS_INSN(mulw)
1504 {
1505 TCGv reg;
1506 TCGv tmp;
1507 TCGv src;
1508 int sign;
1509
1510 sign = (insn & 0x100) != 0;
1511 reg = DREG(insn, 9);
1512 tmp = tcg_temp_new();
1513 if (sign)
1514 tcg_gen_ext16s_i32(tmp, reg);
1515 else
1516 tcg_gen_ext16u_i32(tmp, reg);
1517 SRC_EA(env, src, OS_WORD, sign, NULL);
1518 tcg_gen_mul_i32(tmp, tmp, src);
1519 tcg_gen_mov_i32(reg, tmp);
1520 gen_logic_cc(s, tmp, OS_LONG);
1521 }
1522
DISAS_INSN(divw)1523 DISAS_INSN(divw)
1524 {
1525 int sign;
1526 TCGv src;
1527 TCGv destr;
1528 TCGv ilen;
1529
1530 /* divX.w <EA>,Dn 32/16 -> 16r:16q */
1531
1532 sign = (insn & 0x100) != 0;
1533
1534 /* dest.l / src.w */
1535
1536 SRC_EA(env, src, OS_WORD, sign, NULL);
1537 destr = tcg_constant_i32(REG(insn, 9));
1538 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1539 if (sign) {
1540 gen_helper_divsw(tcg_env, destr, src, ilen);
1541 } else {
1542 gen_helper_divuw(tcg_env, destr, src, ilen);
1543 }
1544
1545 set_cc_op(s, CC_OP_FLAGS);
1546 }
1547
DISAS_INSN(divl)1548 DISAS_INSN(divl)
1549 {
1550 TCGv num, reg, den, ilen;
1551 int sign;
1552 uint16_t ext;
1553
1554 ext = read_im16(env, s);
1555
1556 sign = (ext & 0x0800) != 0;
1557
1558 if (ext & 0x400) {
1559 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1560 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1561 return;
1562 }
1563
1564 /* divX.l <EA>, Dr:Dq 64/32 -> 32r:32q */
1565
1566 SRC_EA(env, den, OS_LONG, 0, NULL);
1567 num = tcg_constant_i32(REG(ext, 12));
1568 reg = tcg_constant_i32(REG(ext, 0));
1569 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1570 if (sign) {
1571 gen_helper_divsll(tcg_env, num, reg, den, ilen);
1572 } else {
1573 gen_helper_divull(tcg_env, num, reg, den, ilen);
1574 }
1575 set_cc_op(s, CC_OP_FLAGS);
1576 return;
1577 }
1578
1579 /* divX.l <EA>, Dq 32/32 -> 32q */
1580 /* divXl.l <EA>, Dr:Dq 32/32 -> 32r:32q */
1581
1582 SRC_EA(env, den, OS_LONG, 0, NULL);
1583 num = tcg_constant_i32(REG(ext, 12));
1584 reg = tcg_constant_i32(REG(ext, 0));
1585 ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1586 if (sign) {
1587 gen_helper_divsl(tcg_env, num, reg, den, ilen);
1588 } else {
1589 gen_helper_divul(tcg_env, num, reg, den, ilen);
1590 }
1591
1592 set_cc_op(s, CC_OP_FLAGS);
1593 }
1594
bcd_add(TCGv dest,TCGv src)1595 static void bcd_add(TCGv dest, TCGv src)
1596 {
1597 TCGv t0, t1;
1598
1599 /*
1600 * dest10 = dest10 + src10 + X
1601 *
1602 * t1 = src
1603 * t2 = t1 + 0x066
1604 * t3 = t2 + dest + X
1605 * t4 = t2 ^ dest
1606 * t5 = t3 ^ t4
1607 * t6 = ~t5 & 0x110
1608 * t7 = (t6 >> 2) | (t6 >> 3)
1609 * return t3 - t7
1610 */
1611
1612 /*
1613 * t1 = (src + 0x066) + dest + X
1614 * = result with some possible exceeding 0x6
1615 */
1616
1617 t0 = tcg_temp_new();
1618 tcg_gen_addi_i32(t0, src, 0x066);
1619
1620 t1 = tcg_temp_new();
1621 tcg_gen_add_i32(t1, t0, dest);
1622 tcg_gen_add_i32(t1, t1, QREG_CC_X);
1623
1624 /* we will remove exceeding 0x6 where there is no carry */
1625
1626 /*
1627 * t0 = (src + 0x0066) ^ dest
1628 * = t1 without carries
1629 */
1630
1631 tcg_gen_xor_i32(t0, t0, dest);
1632
1633 /*
1634 * extract the carries
1635 * t0 = t0 ^ t1
1636 * = only the carries
1637 */
1638
1639 tcg_gen_xor_i32(t0, t0, t1);
1640
1641 /*
1642 * generate 0x1 where there is no carry
1643 * and for each 0x10, generate a 0x6
1644 */
1645
1646 tcg_gen_shri_i32(t0, t0, 3);
1647 tcg_gen_not_i32(t0, t0);
1648 tcg_gen_andi_i32(t0, t0, 0x22);
1649 tcg_gen_add_i32(dest, t0, t0);
1650 tcg_gen_add_i32(dest, dest, t0);
1651
1652 /*
1653 * remove the exceeding 0x6
1654 * for digits that have not generated a carry
1655 */
1656
1657 tcg_gen_sub_i32(dest, t1, dest);
1658 }
1659
bcd_sub(TCGv dest,TCGv src)1660 static void bcd_sub(TCGv dest, TCGv src)
1661 {
1662 TCGv t0, t1, t2;
1663
1664 /*
1665 * dest10 = dest10 - src10 - X
1666 * = bcd_add(dest + 1 - X, 0x199 - src)
1667 */
1668
1669 /* t0 = 0x066 + (0x199 - src) */
1670
1671 t0 = tcg_temp_new();
1672 tcg_gen_subfi_i32(t0, 0x1ff, src);
1673
1674 /* t1 = t0 + dest + 1 - X*/
1675
1676 t1 = tcg_temp_new();
1677 tcg_gen_add_i32(t1, t0, dest);
1678 tcg_gen_addi_i32(t1, t1, 1);
1679 tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1680
1681 /* t2 = t0 ^ dest */
1682
1683 t2 = tcg_temp_new();
1684 tcg_gen_xor_i32(t2, t0, dest);
1685
1686 /* t0 = t1 ^ t2 */
1687
1688 tcg_gen_xor_i32(t0, t1, t2);
1689
1690 /*
1691 * t2 = ~t0 & 0x110
1692 * t0 = (t2 >> 2) | (t2 >> 3)
1693 *
1694 * to fit on 8bit operands, changed in:
1695 *
1696 * t2 = ~(t0 >> 3) & 0x22
1697 * t0 = t2 + t2
1698 * t0 = t0 + t2
1699 */
1700
1701 tcg_gen_shri_i32(t2, t0, 3);
1702 tcg_gen_not_i32(t2, t2);
1703 tcg_gen_andi_i32(t2, t2, 0x22);
1704 tcg_gen_add_i32(t0, t2, t2);
1705 tcg_gen_add_i32(t0, t0, t2);
1706
1707 /* return t1 - t0 */
1708
1709 tcg_gen_sub_i32(dest, t1, t0);
1710 }
1711
bcd_flags(TCGv val)1712 static void bcd_flags(TCGv val)
1713 {
1714 tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1715 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1716
1717 tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1718
1719 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1720 }
1721
DISAS_INSN(abcd_reg)1722 DISAS_INSN(abcd_reg)
1723 {
1724 TCGv src;
1725 TCGv dest;
1726
1727 gen_flush_flags(s); /* !Z is sticky */
1728
1729 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1730 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1731 bcd_add(dest, src);
1732 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1733
1734 bcd_flags(dest);
1735 }
1736
DISAS_INSN(abcd_mem)1737 DISAS_INSN(abcd_mem)
1738 {
1739 TCGv src, dest, addr;
1740
1741 gen_flush_flags(s); /* !Z is sticky */
1742
1743 /* Indirect pre-decrement load (mode 4) */
1744
1745 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1746 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1747 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1748 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1749
1750 bcd_add(dest, src);
1751
1752 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1753 EA_STORE, IS_USER(s));
1754
1755 bcd_flags(dest);
1756 }
1757
DISAS_INSN(sbcd_reg)1758 DISAS_INSN(sbcd_reg)
1759 {
1760 TCGv src, dest;
1761
1762 gen_flush_flags(s); /* !Z is sticky */
1763
1764 src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1765 dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1766
1767 bcd_sub(dest, src);
1768
1769 gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1770
1771 bcd_flags(dest);
1772 }
1773
DISAS_INSN(sbcd_mem)1774 DISAS_INSN(sbcd_mem)
1775 {
1776 TCGv src, dest, addr;
1777
1778 gen_flush_flags(s); /* !Z is sticky */
1779
1780 /* Indirect pre-decrement load (mode 4) */
1781
1782 src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1783 NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1784 dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1785 NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1786
1787 bcd_sub(dest, src);
1788
1789 gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1790 EA_STORE, IS_USER(s));
1791
1792 bcd_flags(dest);
1793 }
1794
DISAS_INSN(nbcd)1795 DISAS_INSN(nbcd)
1796 {
1797 TCGv src, dest;
1798 TCGv addr;
1799
1800 gen_flush_flags(s); /* !Z is sticky */
1801
1802 SRC_EA(env, src, OS_BYTE, 0, &addr);
1803
1804 dest = tcg_temp_new();
1805 tcg_gen_movi_i32(dest, 0);
1806 bcd_sub(dest, src);
1807
1808 DEST_EA(env, insn, OS_BYTE, dest, &addr);
1809
1810 bcd_flags(dest);
1811 }
1812
DISAS_INSN(addsub)1813 DISAS_INSN(addsub)
1814 {
1815 TCGv reg;
1816 TCGv dest;
1817 TCGv src;
1818 TCGv tmp;
1819 TCGv addr;
1820 int add;
1821 int opsize;
1822
1823 add = (insn & 0x4000) != 0;
1824 opsize = insn_opsize(insn);
1825 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1826 dest = tcg_temp_new();
1827 if (insn & 0x100) {
1828 SRC_EA(env, tmp, opsize, 1, &addr);
1829 src = reg;
1830 } else {
1831 tmp = reg;
1832 SRC_EA(env, src, opsize, 1, NULL);
1833 }
1834 if (add) {
1835 tcg_gen_add_i32(dest, tmp, src);
1836 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1837 set_cc_op(s, CC_OP_ADDB + opsize);
1838 } else {
1839 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1840 tcg_gen_sub_i32(dest, tmp, src);
1841 set_cc_op(s, CC_OP_SUBB + opsize);
1842 }
1843 gen_update_cc_add(dest, src, opsize);
1844 if (insn & 0x100) {
1845 DEST_EA(env, insn, opsize, dest, &addr);
1846 } else {
1847 gen_partset_reg(opsize, DREG(insn, 9), dest);
1848 }
1849 }
1850
1851 /* Reverse the order of the bits in REG. */
DISAS_INSN(bitrev)1852 DISAS_INSN(bitrev)
1853 {
1854 TCGv reg;
1855 reg = DREG(insn, 0);
1856 gen_helper_bitrev(reg, reg);
1857 }
1858
DISAS_INSN(bitop_reg)1859 DISAS_INSN(bitop_reg)
1860 {
1861 int opsize;
1862 int op;
1863 TCGv src1;
1864 TCGv src2;
1865 TCGv tmp;
1866 TCGv addr;
1867 TCGv dest;
1868
1869 if ((insn & 0x38) != 0)
1870 opsize = OS_BYTE;
1871 else
1872 opsize = OS_LONG;
1873 op = (insn >> 6) & 3;
1874 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1875
1876 gen_flush_flags(s);
1877 src2 = tcg_temp_new();
1878 if (opsize == OS_BYTE)
1879 tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1880 else
1881 tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1882
1883 tmp = tcg_temp_new();
1884 tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1885
1886 tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1887
1888 dest = tcg_temp_new();
1889 switch (op) {
1890 case 1: /* bchg */
1891 tcg_gen_xor_i32(dest, src1, tmp);
1892 break;
1893 case 2: /* bclr */
1894 tcg_gen_andc_i32(dest, src1, tmp);
1895 break;
1896 case 3: /* bset */
1897 tcg_gen_or_i32(dest, src1, tmp);
1898 break;
1899 default: /* btst */
1900 break;
1901 }
1902 if (op) {
1903 DEST_EA(env, insn, opsize, dest, &addr);
1904 }
1905 }
1906
DISAS_INSN(sats)1907 DISAS_INSN(sats)
1908 {
1909 TCGv reg;
1910 reg = DREG(insn, 0);
1911 gen_flush_flags(s);
1912 gen_helper_sats(reg, reg, QREG_CC_V);
1913 gen_logic_cc(s, reg, OS_LONG);
1914 }
1915
gen_push(DisasContext * s,TCGv val)1916 static void gen_push(DisasContext *s, TCGv val)
1917 {
1918 TCGv tmp;
1919
1920 tmp = tcg_temp_new();
1921 tcg_gen_subi_i32(tmp, QREG_SP, 4);
1922 gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1923 tcg_gen_mov_i32(QREG_SP, tmp);
1924 }
1925
mreg(int reg)1926 static TCGv mreg(int reg)
1927 {
1928 if (reg < 8) {
1929 /* Dx */
1930 return cpu_dregs[reg];
1931 }
1932 /* Ax */
1933 return cpu_aregs[reg & 7];
1934 }
1935
DISAS_INSN(movem)1936 DISAS_INSN(movem)
1937 {
1938 TCGv addr, incr, tmp, r[16];
1939 int is_load = (insn & 0x0400) != 0;
1940 int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1941 uint16_t mask = read_im16(env, s);
1942 int mode = extract32(insn, 3, 3);
1943 int reg0 = REG(insn, 0);
1944 int i;
1945
1946 tmp = cpu_aregs[reg0];
1947
1948 switch (mode) {
1949 case 0: /* data register direct */
1950 case 1: /* addr register direct */
1951 do_addr_fault:
1952 gen_addr_fault(s);
1953 return;
1954
1955 case 2: /* indirect */
1956 break;
1957
1958 case 3: /* indirect post-increment */
1959 if (!is_load) {
1960 /* post-increment is not allowed */
1961 goto do_addr_fault;
1962 }
1963 break;
1964
1965 case 4: /* indirect pre-decrement */
1966 if (is_load) {
1967 /* pre-decrement is not allowed */
1968 goto do_addr_fault;
1969 }
1970 /*
1971 * We want a bare copy of the address reg, without any pre-decrement
1972 * adjustment, as gen_lea would provide.
1973 */
1974 break;
1975
1976 default:
1977 tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1978 if (IS_NULL_QREG(tmp)) {
1979 goto do_addr_fault;
1980 }
1981 break;
1982 }
1983
1984 addr = tcg_temp_new();
1985 tcg_gen_mov_i32(addr, tmp);
1986 incr = tcg_constant_i32(opsize_bytes(opsize));
1987
1988 if (is_load) {
1989 /* memory to register */
1990 for (i = 0; i < 16; i++) {
1991 if (mask & (1 << i)) {
1992 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1993 tcg_gen_add_i32(addr, addr, incr);
1994 }
1995 }
1996 for (i = 0; i < 16; i++) {
1997 if (mask & (1 << i)) {
1998 tcg_gen_mov_i32(mreg(i), r[i]);
1999 }
2000 }
2001 if (mode == 3) {
2002 /* post-increment: movem (An)+,X */
2003 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2004 }
2005 } else {
2006 /* register to memory */
2007 if (mode == 4) {
2008 /* pre-decrement: movem X,-(An) */
2009 for (i = 15; i >= 0; i--) {
2010 if ((mask << i) & 0x8000) {
2011 tcg_gen_sub_i32(addr, addr, incr);
2012 if (reg0 + 8 == i &&
2013 m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2014 /*
2015 * M68020+: if the addressing register is the
2016 * register moved to memory, the value written
2017 * is the initial value decremented by the size of
2018 * the operation, regardless of how many actual
2019 * stores have been performed until this point.
2020 * M68000/M68010: the value is the initial value.
2021 */
2022 tmp = tcg_temp_new();
2023 tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2024 gen_store(s, opsize, addr, tmp, IS_USER(s));
2025 } else {
2026 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2027 }
2028 }
2029 }
2030 tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2031 } else {
2032 for (i = 0; i < 16; i++) {
2033 if (mask & (1 << i)) {
2034 gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2035 tcg_gen_add_i32(addr, addr, incr);
2036 }
2037 }
2038 }
2039 }
2040 }
2041
DISAS_INSN(movep)2042 DISAS_INSN(movep)
2043 {
2044 uint8_t i;
2045 int16_t displ;
2046 TCGv reg;
2047 TCGv addr;
2048 TCGv abuf;
2049 TCGv dbuf;
2050
2051 displ = read_im16(env, s);
2052
2053 addr = AREG(insn, 0);
2054 reg = DREG(insn, 9);
2055
2056 abuf = tcg_temp_new();
2057 tcg_gen_addi_i32(abuf, addr, displ);
2058 dbuf = tcg_temp_new();
2059
2060 if (insn & 0x40) {
2061 i = 4;
2062 } else {
2063 i = 2;
2064 }
2065
2066 if (insn & 0x80) {
2067 for ( ; i > 0 ; i--) {
2068 tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2069 tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2070 if (i > 1) {
2071 tcg_gen_addi_i32(abuf, abuf, 2);
2072 }
2073 }
2074 } else {
2075 for ( ; i > 0 ; i--) {
2076 tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2077 tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2078 if (i > 1) {
2079 tcg_gen_addi_i32(abuf, abuf, 2);
2080 }
2081 }
2082 }
2083 }
2084
DISAS_INSN(bitop_im)2085 DISAS_INSN(bitop_im)
2086 {
2087 int opsize;
2088 int op;
2089 TCGv src1;
2090 uint32_t mask;
2091 int bitnum;
2092 TCGv tmp;
2093 TCGv addr;
2094
2095 if ((insn & 0x38) != 0)
2096 opsize = OS_BYTE;
2097 else
2098 opsize = OS_LONG;
2099 op = (insn >> 6) & 3;
2100
2101 bitnum = read_im16(env, s);
2102 if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2103 if (bitnum & 0xfe00) {
2104 disas_undef(env, s, insn);
2105 return;
2106 }
2107 } else {
2108 if (bitnum & 0xff00) {
2109 disas_undef(env, s, insn);
2110 return;
2111 }
2112 }
2113
2114 SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2115
2116 gen_flush_flags(s);
2117 if (opsize == OS_BYTE)
2118 bitnum &= 7;
2119 else
2120 bitnum &= 31;
2121 mask = 1 << bitnum;
2122
2123 tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2124
2125 if (op) {
2126 tmp = tcg_temp_new();
2127 switch (op) {
2128 case 1: /* bchg */
2129 tcg_gen_xori_i32(tmp, src1, mask);
2130 break;
2131 case 2: /* bclr */
2132 tcg_gen_andi_i32(tmp, src1, ~mask);
2133 break;
2134 case 3: /* bset */
2135 tcg_gen_ori_i32(tmp, src1, mask);
2136 break;
2137 default: /* btst */
2138 break;
2139 }
2140 DEST_EA(env, insn, opsize, tmp, &addr);
2141 }
2142 }
2143
gen_get_ccr(DisasContext * s)2144 static TCGv gen_get_ccr(DisasContext *s)
2145 {
2146 TCGv dest;
2147
2148 update_cc_op(s);
2149 dest = tcg_temp_new();
2150 gen_helper_get_ccr(dest, tcg_env);
2151 return dest;
2152 }
2153
gen_get_sr(DisasContext * s)2154 static TCGv gen_get_sr(DisasContext *s)
2155 {
2156 TCGv ccr;
2157 TCGv sr;
2158
2159 ccr = gen_get_ccr(s);
2160 sr = tcg_temp_new();
2161 tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2162 tcg_gen_or_i32(sr, sr, ccr);
2163 return sr;
2164 }
2165
gen_set_sr_im(DisasContext * s,uint16_t val,int ccr_only)2166 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2167 {
2168 if (ccr_only) {
2169 tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2170 tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2171 tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2172 tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2173 tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2174 } else {
2175 /* Must writeback before changing security state. */
2176 do_writebacks(s);
2177 gen_helper_set_sr(tcg_env, tcg_constant_i32(val));
2178 }
2179 set_cc_op(s, CC_OP_FLAGS);
2180 }
2181
gen_set_sr(DisasContext * s,TCGv val,int ccr_only)2182 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2183 {
2184 if (ccr_only) {
2185 gen_helper_set_ccr(tcg_env, val);
2186 } else {
2187 /* Must writeback before changing security state. */
2188 do_writebacks(s);
2189 gen_helper_set_sr(tcg_env, val);
2190 }
2191 set_cc_op(s, CC_OP_FLAGS);
2192 }
2193
gen_move_to_sr(CPUM68KState * env,DisasContext * s,uint16_t insn,bool ccr_only)2194 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2195 bool ccr_only)
2196 {
2197 if ((insn & 0x3f) == 0x3c) {
2198 uint16_t val;
2199 val = read_im16(env, s);
2200 gen_set_sr_im(s, val, ccr_only);
2201 } else {
2202 TCGv src;
2203 SRC_EA(env, src, OS_WORD, 0, NULL);
2204 gen_set_sr(s, src, ccr_only);
2205 }
2206 }
2207
DISAS_INSN(arith_im)2208 DISAS_INSN(arith_im)
2209 {
2210 int op;
2211 TCGv im;
2212 TCGv src1;
2213 TCGv dest;
2214 TCGv addr;
2215 int opsize;
2216 bool with_SR = ((insn & 0x3f) == 0x3c);
2217
2218 op = (insn >> 9) & 7;
2219 opsize = insn_opsize(insn);
2220 switch (opsize) {
2221 case OS_BYTE:
2222 im = tcg_constant_i32((int8_t)read_im8(env, s));
2223 break;
2224 case OS_WORD:
2225 im = tcg_constant_i32((int16_t)read_im16(env, s));
2226 break;
2227 case OS_LONG:
2228 im = tcg_constant_i32(read_im32(env, s));
2229 break;
2230 default:
2231 g_assert_not_reached();
2232 }
2233
2234 if (with_SR) {
2235 /* SR/CCR can only be used with andi/eori/ori */
2236 if (op == 2 || op == 3 || op == 6) {
2237 disas_undef(env, s, insn);
2238 return;
2239 }
2240 switch (opsize) {
2241 case OS_BYTE:
2242 src1 = gen_get_ccr(s);
2243 break;
2244 case OS_WORD:
2245 if (IS_USER(s)) {
2246 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2247 return;
2248 }
2249 src1 = gen_get_sr(s);
2250 break;
2251 default:
2252 /* OS_LONG; others already g_assert_not_reached. */
2253 disas_undef(env, s, insn);
2254 return;
2255 }
2256 } else {
2257 SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2258 }
2259 dest = tcg_temp_new();
2260 switch (op) {
2261 case 0: /* ori */
2262 tcg_gen_or_i32(dest, src1, im);
2263 if (with_SR) {
2264 gen_set_sr(s, dest, opsize == OS_BYTE);
2265 gen_exit_tb(s);
2266 } else {
2267 DEST_EA(env, insn, opsize, dest, &addr);
2268 gen_logic_cc(s, dest, opsize);
2269 }
2270 break;
2271 case 1: /* andi */
2272 tcg_gen_and_i32(dest, src1, im);
2273 if (with_SR) {
2274 gen_set_sr(s, dest, opsize == OS_BYTE);
2275 gen_exit_tb(s);
2276 } else {
2277 DEST_EA(env, insn, opsize, dest, &addr);
2278 gen_logic_cc(s, dest, opsize);
2279 }
2280 break;
2281 case 2: /* subi */
2282 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2283 tcg_gen_sub_i32(dest, src1, im);
2284 gen_update_cc_add(dest, im, opsize);
2285 set_cc_op(s, CC_OP_SUBB + opsize);
2286 DEST_EA(env, insn, opsize, dest, &addr);
2287 break;
2288 case 3: /* addi */
2289 tcg_gen_add_i32(dest, src1, im);
2290 gen_update_cc_add(dest, im, opsize);
2291 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2292 set_cc_op(s, CC_OP_ADDB + opsize);
2293 DEST_EA(env, insn, opsize, dest, &addr);
2294 break;
2295 case 5: /* eori */
2296 tcg_gen_xor_i32(dest, src1, im);
2297 if (with_SR) {
2298 gen_set_sr(s, dest, opsize == OS_BYTE);
2299 gen_exit_tb(s);
2300 } else {
2301 DEST_EA(env, insn, opsize, dest, &addr);
2302 gen_logic_cc(s, dest, opsize);
2303 }
2304 break;
2305 case 6: /* cmpi */
2306 gen_update_cc_cmp(s, src1, im, opsize);
2307 break;
2308 default:
2309 abort();
2310 }
2311 }
2312
DISAS_INSN(cas)2313 DISAS_INSN(cas)
2314 {
2315 int opsize;
2316 TCGv addr;
2317 uint16_t ext;
2318 TCGv load;
2319 TCGv cmp;
2320 MemOp opc;
2321
2322 switch ((insn >> 9) & 3) {
2323 case 1:
2324 opsize = OS_BYTE;
2325 opc = MO_SB;
2326 break;
2327 case 2:
2328 opsize = OS_WORD;
2329 opc = MO_TESW;
2330 break;
2331 case 3:
2332 opsize = OS_LONG;
2333 opc = MO_TESL;
2334 break;
2335 default:
2336 g_assert_not_reached();
2337 }
2338
2339 ext = read_im16(env, s);
2340
2341 /* cas Dc,Du,<EA> */
2342
2343 addr = gen_lea(env, s, insn, opsize);
2344 if (IS_NULL_QREG(addr)) {
2345 gen_addr_fault(s);
2346 return;
2347 }
2348
2349 cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2350
2351 /*
2352 * if <EA> == Dc then
2353 * <EA> = Du
2354 * Dc = <EA> (because <EA> == Dc)
2355 * else
2356 * Dc = <EA>
2357 */
2358
2359 load = tcg_temp_new();
2360 tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2361 IS_USER(s), opc);
2362 /* update flags before setting cmp to load */
2363 gen_update_cc_cmp(s, load, cmp, opsize);
2364 gen_partset_reg(opsize, DREG(ext, 0), load);
2365
2366 switch (extract32(insn, 3, 3)) {
2367 case 3: /* Indirect postincrement. */
2368 tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2369 break;
2370 case 4: /* Indirect predecrememnt. */
2371 tcg_gen_mov_i32(AREG(insn, 0), addr);
2372 break;
2373 }
2374 }
2375
DISAS_INSN(cas2w)2376 DISAS_INSN(cas2w)
2377 {
2378 uint16_t ext1, ext2;
2379 TCGv addr1, addr2;
2380
2381 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2382
2383 ext1 = read_im16(env, s);
2384
2385 if (ext1 & 0x8000) {
2386 /* Address Register */
2387 addr1 = AREG(ext1, 12);
2388 } else {
2389 /* Data Register */
2390 addr1 = DREG(ext1, 12);
2391 }
2392
2393 ext2 = read_im16(env, s);
2394 if (ext2 & 0x8000) {
2395 /* Address Register */
2396 addr2 = AREG(ext2, 12);
2397 } else {
2398 /* Data Register */
2399 addr2 = DREG(ext2, 12);
2400 }
2401
2402 /*
2403 * if (R1) == Dc1 && (R2) == Dc2 then
2404 * (R1) = Du1
2405 * (R2) = Du2
2406 * else
2407 * Dc1 = (R1)
2408 * Dc2 = (R2)
2409 */
2410
2411 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2412 gen_helper_exit_atomic(tcg_env);
2413 } else {
2414 TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2415 (REG(ext1, 6) << 3) |
2416 (REG(ext2, 0) << 6) |
2417 (REG(ext1, 0) << 9));
2418 gen_helper_cas2w(tcg_env, regs, addr1, addr2);
2419 }
2420
2421 /* Note that cas2w also assigned to env->cc_op. */
2422 s->cc_op = CC_OP_CMPW;
2423 s->cc_op_synced = 1;
2424 }
2425
DISAS_INSN(cas2l)2426 DISAS_INSN(cas2l)
2427 {
2428 uint16_t ext1, ext2;
2429 TCGv addr1, addr2, regs;
2430
2431 /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2432
2433 ext1 = read_im16(env, s);
2434
2435 if (ext1 & 0x8000) {
2436 /* Address Register */
2437 addr1 = AREG(ext1, 12);
2438 } else {
2439 /* Data Register */
2440 addr1 = DREG(ext1, 12);
2441 }
2442
2443 ext2 = read_im16(env, s);
2444 if (ext2 & 0x8000) {
2445 /* Address Register */
2446 addr2 = AREG(ext2, 12);
2447 } else {
2448 /* Data Register */
2449 addr2 = DREG(ext2, 12);
2450 }
2451
2452 /*
2453 * if (R1) == Dc1 && (R2) == Dc2 then
2454 * (R1) = Du1
2455 * (R2) = Du2
2456 * else
2457 * Dc1 = (R1)
2458 * Dc2 = (R2)
2459 */
2460
2461 regs = tcg_constant_i32(REG(ext2, 6) |
2462 (REG(ext1, 6) << 3) |
2463 (REG(ext2, 0) << 6) |
2464 (REG(ext1, 0) << 9));
2465 if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2466 gen_helper_cas2l_parallel(tcg_env, regs, addr1, addr2);
2467 } else {
2468 gen_helper_cas2l(tcg_env, regs, addr1, addr2);
2469 }
2470
2471 /* Note that cas2l also assigned to env->cc_op. */
2472 s->cc_op = CC_OP_CMPL;
2473 s->cc_op_synced = 1;
2474 }
2475
DISAS_INSN(byterev)2476 DISAS_INSN(byterev)
2477 {
2478 TCGv reg;
2479
2480 reg = DREG(insn, 0);
2481 tcg_gen_bswap32_i32(reg, reg);
2482 }
2483
DISAS_INSN(move)2484 DISAS_INSN(move)
2485 {
2486 TCGv src;
2487 TCGv dest;
2488 int op;
2489 int opsize;
2490
2491 switch (insn >> 12) {
2492 case 1: /* move.b */
2493 opsize = OS_BYTE;
2494 break;
2495 case 2: /* move.l */
2496 opsize = OS_LONG;
2497 break;
2498 case 3: /* move.w */
2499 opsize = OS_WORD;
2500 break;
2501 default:
2502 abort();
2503 }
2504 SRC_EA(env, src, opsize, 1, NULL);
2505 op = (insn >> 6) & 7;
2506 if (op == 1) {
2507 /* movea */
2508 /* The value will already have been sign extended. */
2509 dest = AREG(insn, 9);
2510 tcg_gen_mov_i32(dest, src);
2511 } else {
2512 /* normal move */
2513 uint16_t dest_ea;
2514 dest_ea = ((insn >> 9) & 7) | (op << 3);
2515 DEST_EA(env, dest_ea, opsize, src, NULL);
2516 /* This will be correct because loads sign extend. */
2517 gen_logic_cc(s, src, opsize);
2518 }
2519 }
2520
DISAS_INSN(negx)2521 DISAS_INSN(negx)
2522 {
2523 TCGv z;
2524 TCGv src;
2525 TCGv addr;
2526 int opsize;
2527
2528 opsize = insn_opsize(insn);
2529 SRC_EA(env, src, opsize, 1, &addr);
2530
2531 gen_flush_flags(s); /* compute old Z */
2532
2533 /*
2534 * Perform subtract with borrow.
2535 * (X, N) = -(src + X);
2536 */
2537
2538 z = tcg_constant_i32(0);
2539 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2540 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2541 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2542
2543 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2544
2545 /*
2546 * Compute signed-overflow for negation. The normal formula for
2547 * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2548 * this simplifies to res & src.
2549 */
2550
2551 tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2552
2553 /* Copy the rest of the results into place. */
2554 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2555 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2556
2557 set_cc_op(s, CC_OP_FLAGS);
2558
2559 /* result is in QREG_CC_N */
2560
2561 DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2562 }
2563
DISAS_INSN(lea)2564 DISAS_INSN(lea)
2565 {
2566 TCGv reg;
2567 TCGv tmp;
2568
2569 reg = AREG(insn, 9);
2570 tmp = gen_lea(env, s, insn, OS_LONG);
2571 if (IS_NULL_QREG(tmp)) {
2572 gen_addr_fault(s);
2573 return;
2574 }
2575 tcg_gen_mov_i32(reg, tmp);
2576 }
2577
DISAS_INSN(clr)2578 DISAS_INSN(clr)
2579 {
2580 int opsize;
2581 TCGv zero;
2582
2583 zero = tcg_constant_i32(0);
2584 opsize = insn_opsize(insn);
2585 DEST_EA(env, insn, opsize, zero, NULL);
2586 gen_logic_cc(s, zero, opsize);
2587 }
2588
DISAS_INSN(move_from_ccr)2589 DISAS_INSN(move_from_ccr)
2590 {
2591 TCGv ccr;
2592
2593 ccr = gen_get_ccr(s);
2594 DEST_EA(env, insn, OS_WORD, ccr, NULL);
2595 }
2596
DISAS_INSN(neg)2597 DISAS_INSN(neg)
2598 {
2599 TCGv src1;
2600 TCGv dest;
2601 TCGv addr;
2602 int opsize;
2603
2604 opsize = insn_opsize(insn);
2605 SRC_EA(env, src1, opsize, 1, &addr);
2606 dest = tcg_temp_new();
2607 tcg_gen_neg_i32(dest, src1);
2608 set_cc_op(s, CC_OP_SUBB + opsize);
2609 gen_update_cc_add(dest, src1, opsize);
2610 tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2611 DEST_EA(env, insn, opsize, dest, &addr);
2612 }
2613
DISAS_INSN(move_to_ccr)2614 DISAS_INSN(move_to_ccr)
2615 {
2616 gen_move_to_sr(env, s, insn, true);
2617 }
2618
DISAS_INSN(not)2619 DISAS_INSN(not)
2620 {
2621 TCGv src1;
2622 TCGv dest;
2623 TCGv addr;
2624 int opsize;
2625
2626 opsize = insn_opsize(insn);
2627 SRC_EA(env, src1, opsize, 1, &addr);
2628 dest = tcg_temp_new();
2629 tcg_gen_not_i32(dest, src1);
2630 DEST_EA(env, insn, opsize, dest, &addr);
2631 gen_logic_cc(s, dest, opsize);
2632 }
2633
DISAS_INSN(swap)2634 DISAS_INSN(swap)
2635 {
2636 TCGv src1;
2637 TCGv src2;
2638 TCGv reg;
2639
2640 src1 = tcg_temp_new();
2641 src2 = tcg_temp_new();
2642 reg = DREG(insn, 0);
2643 tcg_gen_shli_i32(src1, reg, 16);
2644 tcg_gen_shri_i32(src2, reg, 16);
2645 tcg_gen_or_i32(reg, src1, src2);
2646 gen_logic_cc(s, reg, OS_LONG);
2647 }
2648
DISAS_INSN(bkpt)2649 DISAS_INSN(bkpt)
2650 {
2651 #if defined(CONFIG_USER_ONLY)
2652 gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2653 #else
2654 /* BKPT #0 is the alternate semihosting instruction. */
2655 if ((insn & 7) == 0 && semihosting_test(s)) {
2656 gen_exception(s, s->pc, EXCP_SEMIHOSTING);
2657 return;
2658 }
2659 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2660 #endif
2661 }
2662
DISAS_INSN(pea)2663 DISAS_INSN(pea)
2664 {
2665 TCGv tmp;
2666
2667 tmp = gen_lea(env, s, insn, OS_LONG);
2668 if (IS_NULL_QREG(tmp)) {
2669 gen_addr_fault(s);
2670 return;
2671 }
2672 gen_push(s, tmp);
2673 }
2674
DISAS_INSN(ext)2675 DISAS_INSN(ext)
2676 {
2677 int op;
2678 TCGv reg;
2679 TCGv tmp;
2680
2681 reg = DREG(insn, 0);
2682 op = (insn >> 6) & 7;
2683 tmp = tcg_temp_new();
2684 if (op == 3)
2685 tcg_gen_ext16s_i32(tmp, reg);
2686 else
2687 tcg_gen_ext8s_i32(tmp, reg);
2688 if (op == 2)
2689 gen_partset_reg(OS_WORD, reg, tmp);
2690 else
2691 tcg_gen_mov_i32(reg, tmp);
2692 gen_logic_cc(s, tmp, OS_LONG);
2693 }
2694
DISAS_INSN(tst)2695 DISAS_INSN(tst)
2696 {
2697 int opsize;
2698 TCGv tmp;
2699
2700 opsize = insn_opsize(insn);
2701 SRC_EA(env, tmp, opsize, 1, NULL);
2702 gen_logic_cc(s, tmp, opsize);
2703 }
2704
DISAS_INSN(pulse)2705 DISAS_INSN(pulse)
2706 {
2707 /* Implemented as a NOP. */
2708 }
2709
DISAS_INSN(illegal)2710 DISAS_INSN(illegal)
2711 {
2712 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2713 }
2714
DISAS_INSN(tas)2715 DISAS_INSN(tas)
2716 {
2717 int mode = extract32(insn, 3, 3);
2718 int reg0 = REG(insn, 0);
2719
2720 if (mode == 0) {
2721 /* data register direct */
2722 TCGv dest = cpu_dregs[reg0];
2723 gen_logic_cc(s, dest, OS_BYTE);
2724 tcg_gen_ori_tl(dest, dest, 0x80);
2725 } else {
2726 TCGv src1, addr;
2727
2728 addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2729 if (IS_NULL_QREG(addr)) {
2730 gen_addr_fault(s);
2731 return;
2732 }
2733 src1 = tcg_temp_new();
2734 tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2735 IS_USER(s), MO_SB);
2736 gen_logic_cc(s, src1, OS_BYTE);
2737
2738 switch (mode) {
2739 case 3: /* Indirect postincrement. */
2740 tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2741 break;
2742 case 4: /* Indirect predecrememnt. */
2743 tcg_gen_mov_i32(AREG(insn, 0), addr);
2744 break;
2745 }
2746 }
2747 }
2748
DISAS_INSN(mull)2749 DISAS_INSN(mull)
2750 {
2751 uint16_t ext;
2752 TCGv src1;
2753 int sign;
2754
2755 ext = read_im16(env, s);
2756
2757 sign = ext & 0x800;
2758
2759 if (ext & 0x400) {
2760 if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2761 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2762 return;
2763 }
2764
2765 SRC_EA(env, src1, OS_LONG, 0, NULL);
2766
2767 if (sign) {
2768 tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2769 } else {
2770 tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2771 }
2772 /* if Dl == Dh, 68040 returns low word */
2773 tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2774 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2775 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2776
2777 tcg_gen_movi_i32(QREG_CC_V, 0);
2778 tcg_gen_movi_i32(QREG_CC_C, 0);
2779
2780 set_cc_op(s, CC_OP_FLAGS);
2781 return;
2782 }
2783 SRC_EA(env, src1, OS_LONG, 0, NULL);
2784 if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2785 tcg_gen_movi_i32(QREG_CC_C, 0);
2786 if (sign) {
2787 tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2788 /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2789 tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2790 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2791 QREG_CC_V, QREG_CC_Z);
2792 } else {
2793 tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2794 /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2795 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2796 QREG_CC_V, QREG_CC_C);
2797 }
2798 tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2799
2800 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2801
2802 set_cc_op(s, CC_OP_FLAGS);
2803 } else {
2804 /*
2805 * The upper 32 bits of the product are discarded, so
2806 * muls.l and mulu.l are functionally equivalent.
2807 */
2808 tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2809 gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2810 }
2811 }
2812
gen_link(DisasContext * s,uint16_t insn,int32_t offset)2813 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2814 {
2815 TCGv reg;
2816 TCGv tmp;
2817
2818 reg = AREG(insn, 0);
2819 tmp = tcg_temp_new();
2820 tcg_gen_subi_i32(tmp, QREG_SP, 4);
2821 gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2822 if ((insn & 7) != 7) {
2823 tcg_gen_mov_i32(reg, tmp);
2824 }
2825 tcg_gen_addi_i32(QREG_SP, tmp, offset);
2826 }
2827
DISAS_INSN(link)2828 DISAS_INSN(link)
2829 {
2830 int16_t offset;
2831
2832 offset = read_im16(env, s);
2833 gen_link(s, insn, offset);
2834 }
2835
DISAS_INSN(linkl)2836 DISAS_INSN(linkl)
2837 {
2838 int32_t offset;
2839
2840 offset = read_im32(env, s);
2841 gen_link(s, insn, offset);
2842 }
2843
DISAS_INSN(unlk)2844 DISAS_INSN(unlk)
2845 {
2846 TCGv src;
2847 TCGv reg;
2848 TCGv tmp;
2849
2850 src = tcg_temp_new();
2851 reg = AREG(insn, 0);
2852 tcg_gen_mov_i32(src, reg);
2853 tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2854 tcg_gen_mov_i32(reg, tmp);
2855 tcg_gen_addi_i32(QREG_SP, src, 4);
2856 }
2857
2858 #if !defined(CONFIG_USER_ONLY)
DISAS_INSN(reset)2859 DISAS_INSN(reset)
2860 {
2861 if (IS_USER(s)) {
2862 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2863 return;
2864 }
2865
2866 gen_helper_reset(tcg_env);
2867 }
2868 #endif
2869
DISAS_INSN(nop)2870 DISAS_INSN(nop)
2871 {
2872 }
2873
DISAS_INSN(rtd)2874 DISAS_INSN(rtd)
2875 {
2876 TCGv tmp;
2877 int16_t offset = read_im16(env, s);
2878
2879 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2880 tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2881 gen_jmp(s, tmp);
2882 }
2883
DISAS_INSN(rtr)2884 DISAS_INSN(rtr)
2885 {
2886 TCGv tmp;
2887 TCGv ccr;
2888 TCGv sp;
2889
2890 sp = tcg_temp_new();
2891 ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2892 tcg_gen_addi_i32(sp, QREG_SP, 2);
2893 tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2894 tcg_gen_addi_i32(QREG_SP, sp, 4);
2895
2896 gen_set_sr(s, ccr, true);
2897
2898 gen_jmp(s, tmp);
2899 }
2900
DISAS_INSN(rts)2901 DISAS_INSN(rts)
2902 {
2903 TCGv tmp;
2904
2905 tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2906 tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2907 gen_jmp(s, tmp);
2908 }
2909
DISAS_INSN(jump)2910 DISAS_INSN(jump)
2911 {
2912 TCGv tmp;
2913
2914 /*
2915 * Load the target address first to ensure correct exception
2916 * behavior.
2917 */
2918 tmp = gen_lea(env, s, insn, OS_LONG);
2919 if (IS_NULL_QREG(tmp)) {
2920 gen_addr_fault(s);
2921 return;
2922 }
2923 if ((insn & 0x40) == 0) {
2924 /* jsr */
2925 gen_push(s, tcg_constant_i32(s->pc));
2926 }
2927 gen_jmp(s, tmp);
2928 }
2929
DISAS_INSN(addsubq)2930 DISAS_INSN(addsubq)
2931 {
2932 TCGv src;
2933 TCGv dest;
2934 TCGv val;
2935 int imm;
2936 TCGv addr;
2937 int opsize;
2938
2939 if ((insn & 070) == 010) {
2940 /* Operation on address register is always long. */
2941 opsize = OS_LONG;
2942 } else {
2943 opsize = insn_opsize(insn);
2944 }
2945 SRC_EA(env, src, opsize, 1, &addr);
2946 imm = (insn >> 9) & 7;
2947 if (imm == 0) {
2948 imm = 8;
2949 }
2950 val = tcg_constant_i32(imm);
2951 dest = tcg_temp_new();
2952 tcg_gen_mov_i32(dest, src);
2953 if ((insn & 0x38) == 0x08) {
2954 /*
2955 * Don't update condition codes if the destination is an
2956 * address register.
2957 */
2958 if (insn & 0x0100) {
2959 tcg_gen_sub_i32(dest, dest, val);
2960 } else {
2961 tcg_gen_add_i32(dest, dest, val);
2962 }
2963 } else {
2964 if (insn & 0x0100) {
2965 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2966 tcg_gen_sub_i32(dest, dest, val);
2967 set_cc_op(s, CC_OP_SUBB + opsize);
2968 } else {
2969 tcg_gen_add_i32(dest, dest, val);
2970 tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2971 set_cc_op(s, CC_OP_ADDB + opsize);
2972 }
2973 gen_update_cc_add(dest, val, opsize);
2974 }
2975 DEST_EA(env, insn, opsize, dest, &addr);
2976 }
2977
DISAS_INSN(branch)2978 DISAS_INSN(branch)
2979 {
2980 int32_t offset;
2981 uint32_t base;
2982 int op;
2983
2984 base = s->pc;
2985 op = (insn >> 8) & 0xf;
2986 offset = (int8_t)insn;
2987 if (offset == 0) {
2988 offset = (int16_t)read_im16(env, s);
2989 } else if (offset == -1) {
2990 offset = read_im32(env, s);
2991 }
2992 if (op == 1) {
2993 /* bsr */
2994 gen_push(s, tcg_constant_i32(s->pc));
2995 }
2996 if (op > 1) {
2997 /* Bcc */
2998 TCGLabel *l1 = gen_new_label();
2999 gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3000 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
3001 gen_set_label(l1);
3002 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
3003 } else {
3004 /* Unconditional branch. */
3005 update_cc_op(s);
3006 gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
3007 }
3008 }
3009
DISAS_INSN(moveq)3010 DISAS_INSN(moveq)
3011 {
3012 tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3013 gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3014 }
3015
DISAS_INSN(mvzs)3016 DISAS_INSN(mvzs)
3017 {
3018 int opsize;
3019 TCGv src;
3020 TCGv reg;
3021
3022 if (insn & 0x40)
3023 opsize = OS_WORD;
3024 else
3025 opsize = OS_BYTE;
3026 SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3027 reg = DREG(insn, 9);
3028 tcg_gen_mov_i32(reg, src);
3029 gen_logic_cc(s, src, opsize);
3030 }
3031
DISAS_INSN(or)3032 DISAS_INSN(or)
3033 {
3034 TCGv reg;
3035 TCGv dest;
3036 TCGv src;
3037 TCGv addr;
3038 int opsize;
3039
3040 opsize = insn_opsize(insn);
3041 reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3042 dest = tcg_temp_new();
3043 if (insn & 0x100) {
3044 SRC_EA(env, src, opsize, 0, &addr);
3045 tcg_gen_or_i32(dest, src, reg);
3046 DEST_EA(env, insn, opsize, dest, &addr);
3047 } else {
3048 SRC_EA(env, src, opsize, 0, NULL);
3049 tcg_gen_or_i32(dest, src, reg);
3050 gen_partset_reg(opsize, DREG(insn, 9), dest);
3051 }
3052 gen_logic_cc(s, dest, opsize);
3053 }
3054
DISAS_INSN(suba)3055 DISAS_INSN(suba)
3056 {
3057 TCGv src;
3058 TCGv reg;
3059
3060 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3061 reg = AREG(insn, 9);
3062 tcg_gen_sub_i32(reg, reg, src);
3063 }
3064
gen_subx(DisasContext * s,TCGv src,TCGv dest,int opsize)3065 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3066 {
3067 TCGv tmp, zero;
3068
3069 gen_flush_flags(s); /* compute old Z */
3070
3071 /*
3072 * Perform subtract with borrow.
3073 * (X, N) = dest - (src + X);
3074 */
3075
3076 zero = tcg_constant_i32(0);
3077 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3078 tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3079 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3080 tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3081
3082 /* Compute signed-overflow for subtract. */
3083
3084 tmp = tcg_temp_new();
3085 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3086 tcg_gen_xor_i32(tmp, dest, src);
3087 tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3088
3089 /* Copy the rest of the results into place. */
3090 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3091 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3092
3093 set_cc_op(s, CC_OP_FLAGS);
3094
3095 /* result is in QREG_CC_N */
3096 }
3097
DISAS_INSN(subx_reg)3098 DISAS_INSN(subx_reg)
3099 {
3100 TCGv dest;
3101 TCGv src;
3102 int opsize;
3103
3104 opsize = insn_opsize(insn);
3105
3106 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3107 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3108
3109 gen_subx(s, src, dest, opsize);
3110
3111 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3112 }
3113
DISAS_INSN(subx_mem)3114 DISAS_INSN(subx_mem)
3115 {
3116 TCGv src;
3117 TCGv addr_src;
3118 TCGv dest;
3119 TCGv addr_dest;
3120 int opsize;
3121
3122 opsize = insn_opsize(insn);
3123
3124 addr_src = AREG(insn, 0);
3125 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3126 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3127
3128 addr_dest = AREG(insn, 9);
3129 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3130 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3131
3132 gen_subx(s, src, dest, opsize);
3133
3134 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3135 }
3136
DISAS_INSN(mov3q)3137 DISAS_INSN(mov3q)
3138 {
3139 TCGv src;
3140 int val;
3141
3142 val = (insn >> 9) & 7;
3143 if (val == 0) {
3144 val = -1;
3145 }
3146 src = tcg_constant_i32(val);
3147 gen_logic_cc(s, src, OS_LONG);
3148 DEST_EA(env, insn, OS_LONG, src, NULL);
3149 }
3150
DISAS_INSN(cmp)3151 DISAS_INSN(cmp)
3152 {
3153 TCGv src;
3154 TCGv reg;
3155 int opsize;
3156
3157 opsize = insn_opsize(insn);
3158 SRC_EA(env, src, opsize, 1, NULL);
3159 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3160 gen_update_cc_cmp(s, reg, src, opsize);
3161 }
3162
DISAS_INSN(cmpa)3163 DISAS_INSN(cmpa)
3164 {
3165 int opsize;
3166 TCGv src;
3167 TCGv reg;
3168
3169 if (insn & 0x100) {
3170 opsize = OS_LONG;
3171 } else {
3172 opsize = OS_WORD;
3173 }
3174 SRC_EA(env, src, opsize, 1, NULL);
3175 reg = AREG(insn, 9);
3176 gen_update_cc_cmp(s, reg, src, OS_LONG);
3177 }
3178
DISAS_INSN(cmpm)3179 DISAS_INSN(cmpm)
3180 {
3181 int opsize = insn_opsize(insn);
3182 TCGv src, dst;
3183
3184 /* Post-increment load (mode 3) from Ay. */
3185 src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3186 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3187 /* Post-increment load (mode 3) from Ax. */
3188 dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3189 NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3190
3191 gen_update_cc_cmp(s, dst, src, opsize);
3192 }
3193
DISAS_INSN(eor)3194 DISAS_INSN(eor)
3195 {
3196 TCGv src;
3197 TCGv dest;
3198 TCGv addr;
3199 int opsize;
3200
3201 opsize = insn_opsize(insn);
3202
3203 SRC_EA(env, src, opsize, 0, &addr);
3204 dest = tcg_temp_new();
3205 tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3206 gen_logic_cc(s, dest, opsize);
3207 DEST_EA(env, insn, opsize, dest, &addr);
3208 }
3209
do_exg(TCGv reg1,TCGv reg2)3210 static void do_exg(TCGv reg1, TCGv reg2)
3211 {
3212 TCGv temp = tcg_temp_new();
3213 tcg_gen_mov_i32(temp, reg1);
3214 tcg_gen_mov_i32(reg1, reg2);
3215 tcg_gen_mov_i32(reg2, temp);
3216 }
3217
DISAS_INSN(exg_dd)3218 DISAS_INSN(exg_dd)
3219 {
3220 /* exchange Dx and Dy */
3221 do_exg(DREG(insn, 9), DREG(insn, 0));
3222 }
3223
DISAS_INSN(exg_aa)3224 DISAS_INSN(exg_aa)
3225 {
3226 /* exchange Ax and Ay */
3227 do_exg(AREG(insn, 9), AREG(insn, 0));
3228 }
3229
DISAS_INSN(exg_da)3230 DISAS_INSN(exg_da)
3231 {
3232 /* exchange Dx and Ay */
3233 do_exg(DREG(insn, 9), AREG(insn, 0));
3234 }
3235
DISAS_INSN(and)3236 DISAS_INSN(and)
3237 {
3238 TCGv src;
3239 TCGv reg;
3240 TCGv dest;
3241 TCGv addr;
3242 int opsize;
3243
3244 dest = tcg_temp_new();
3245
3246 opsize = insn_opsize(insn);
3247 reg = DREG(insn, 9);
3248 if (insn & 0x100) {
3249 SRC_EA(env, src, opsize, 0, &addr);
3250 tcg_gen_and_i32(dest, src, reg);
3251 DEST_EA(env, insn, opsize, dest, &addr);
3252 } else {
3253 SRC_EA(env, src, opsize, 0, NULL);
3254 tcg_gen_and_i32(dest, src, reg);
3255 gen_partset_reg(opsize, reg, dest);
3256 }
3257 gen_logic_cc(s, dest, opsize);
3258 }
3259
DISAS_INSN(adda)3260 DISAS_INSN(adda)
3261 {
3262 TCGv src;
3263 TCGv reg;
3264
3265 SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3266 reg = AREG(insn, 9);
3267 tcg_gen_add_i32(reg, reg, src);
3268 }
3269
gen_addx(DisasContext * s,TCGv src,TCGv dest,int opsize)3270 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3271 {
3272 TCGv tmp, zero;
3273
3274 gen_flush_flags(s); /* compute old Z */
3275
3276 /*
3277 * Perform addition with carry.
3278 * (X, N) = src + dest + X;
3279 */
3280
3281 zero = tcg_constant_i32(0);
3282 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3283 tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3284 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3285
3286 /* Compute signed-overflow for addition. */
3287
3288 tmp = tcg_temp_new();
3289 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3290 tcg_gen_xor_i32(tmp, dest, src);
3291 tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3292
3293 /* Copy the rest of the results into place. */
3294 tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3295 tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3296
3297 set_cc_op(s, CC_OP_FLAGS);
3298
3299 /* result is in QREG_CC_N */
3300 }
3301
DISAS_INSN(addx_reg)3302 DISAS_INSN(addx_reg)
3303 {
3304 TCGv dest;
3305 TCGv src;
3306 int opsize;
3307
3308 opsize = insn_opsize(insn);
3309
3310 dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3311 src = gen_extend(s, DREG(insn, 0), opsize, 1);
3312
3313 gen_addx(s, src, dest, opsize);
3314
3315 gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3316 }
3317
DISAS_INSN(addx_mem)3318 DISAS_INSN(addx_mem)
3319 {
3320 TCGv src;
3321 TCGv addr_src;
3322 TCGv dest;
3323 TCGv addr_dest;
3324 int opsize;
3325
3326 opsize = insn_opsize(insn);
3327
3328 addr_src = AREG(insn, 0);
3329 tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3330 src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3331
3332 addr_dest = AREG(insn, 9);
3333 tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3334 dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3335
3336 gen_addx(s, src, dest, opsize);
3337
3338 gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3339 }
3340
shift_im(DisasContext * s,uint16_t insn,int opsize)3341 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3342 {
3343 int count = (insn >> 9) & 7;
3344 int logical = insn & 8;
3345 int left = insn & 0x100;
3346 int bits = opsize_bytes(opsize) * 8;
3347 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3348
3349 if (count == 0) {
3350 count = 8;
3351 }
3352
3353 tcg_gen_movi_i32(QREG_CC_V, 0);
3354 if (left) {
3355 tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3356 tcg_gen_shli_i32(QREG_CC_N, reg, count);
3357
3358 /*
3359 * Note that ColdFire always clears V (done above),
3360 * while M68000 sets if the most significant bit is changed at
3361 * any time during the shift operation.
3362 */
3363 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3364 /* if shift count >= bits, V is (reg != 0) */
3365 if (count >= bits) {
3366 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3367 } else {
3368 TCGv t0 = tcg_temp_new();
3369 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3370 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3371 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3372 }
3373 }
3374 } else {
3375 tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3376 if (logical) {
3377 tcg_gen_shri_i32(QREG_CC_N, reg, count);
3378 } else {
3379 tcg_gen_sari_i32(QREG_CC_N, reg, count);
3380 }
3381 }
3382
3383 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3384 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3385 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3386 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3387
3388 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3389 set_cc_op(s, CC_OP_FLAGS);
3390 }
3391
shift_reg(DisasContext * s,uint16_t insn,int opsize)3392 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3393 {
3394 int logical = insn & 8;
3395 int left = insn & 0x100;
3396 int bits = opsize_bytes(opsize) * 8;
3397 TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3398 TCGv s32;
3399 TCGv_i64 t64, s64;
3400
3401 t64 = tcg_temp_new_i64();
3402 s64 = tcg_temp_new_i64();
3403 s32 = tcg_temp_new();
3404
3405 /*
3406 * Note that m68k truncates the shift count modulo 64, not 32.
3407 * In addition, a 64-bit shift makes it easy to find "the last
3408 * bit shifted out", for the carry flag.
3409 */
3410 tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3411 tcg_gen_extu_i32_i64(s64, s32);
3412 tcg_gen_extu_i32_i64(t64, reg);
3413
3414 /* Optimistically set V=0. Also used as a zero source below. */
3415 tcg_gen_movi_i32(QREG_CC_V, 0);
3416 if (left) {
3417 tcg_gen_shl_i64(t64, t64, s64);
3418
3419 if (opsize == OS_LONG) {
3420 tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3421 /* Note that C=0 if shift count is 0, and we get that for free. */
3422 } else {
3423 TCGv zero = tcg_constant_i32(0);
3424 tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3425 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3426 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3427 s32, zero, zero, QREG_CC_C);
3428 }
3429 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3430
3431 /* X = C, but only if the shift count was non-zero. */
3432 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3433 QREG_CC_C, QREG_CC_X);
3434
3435 /*
3436 * M68000 sets V if the most significant bit is changed at
3437 * any time during the shift operation. Do this via creating
3438 * an extension of the sign bit, comparing, and discarding
3439 * the bits below the sign bit. I.e.
3440 * int64_t s = (intN_t)reg;
3441 * int64_t t = (int64_t)(intN_t)reg << count;
3442 * V = ((s ^ t) & (-1 << (bits - 1))) != 0
3443 */
3444 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3445 TCGv_i64 tt = tcg_constant_i64(32);
3446 /* if shift is greater than 32, use 32 */
3447 tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3448 /* Sign extend the input to 64 bits; re-do the shift. */
3449 tcg_gen_ext_i32_i64(t64, reg);
3450 tcg_gen_shl_i64(s64, t64, s64);
3451 /* Clear all bits that are unchanged. */
3452 tcg_gen_xor_i64(t64, t64, s64);
3453 /* Ignore the bits below the sign bit. */
3454 tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3455 /* If any bits remain set, we have overflow. */
3456 tcg_gen_negsetcond_i64(TCG_COND_NE, t64, t64, tcg_constant_i64(0));
3457 tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3458 }
3459 } else {
3460 tcg_gen_shli_i64(t64, t64, 32);
3461 if (logical) {
3462 tcg_gen_shr_i64(t64, t64, s64);
3463 } else {
3464 tcg_gen_sar_i64(t64, t64, s64);
3465 }
3466 tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3467
3468 /* Note that C=0 if shift count is 0, and we get that for free. */
3469 tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3470
3471 /* X = C, but only if the shift count was non-zero. */
3472 tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3473 QREG_CC_C, QREG_CC_X);
3474 }
3475 gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3476 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3477
3478 /* Write back the result. */
3479 gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3480 set_cc_op(s, CC_OP_FLAGS);
3481 }
3482
DISAS_INSN(shift8_im)3483 DISAS_INSN(shift8_im)
3484 {
3485 shift_im(s, insn, OS_BYTE);
3486 }
3487
DISAS_INSN(shift16_im)3488 DISAS_INSN(shift16_im)
3489 {
3490 shift_im(s, insn, OS_WORD);
3491 }
3492
DISAS_INSN(shift_im)3493 DISAS_INSN(shift_im)
3494 {
3495 shift_im(s, insn, OS_LONG);
3496 }
3497
DISAS_INSN(shift8_reg)3498 DISAS_INSN(shift8_reg)
3499 {
3500 shift_reg(s, insn, OS_BYTE);
3501 }
3502
DISAS_INSN(shift16_reg)3503 DISAS_INSN(shift16_reg)
3504 {
3505 shift_reg(s, insn, OS_WORD);
3506 }
3507
DISAS_INSN(shift_reg)3508 DISAS_INSN(shift_reg)
3509 {
3510 shift_reg(s, insn, OS_LONG);
3511 }
3512
DISAS_INSN(shift_mem)3513 DISAS_INSN(shift_mem)
3514 {
3515 int logical = insn & 8;
3516 int left = insn & 0x100;
3517 TCGv src;
3518 TCGv addr;
3519
3520 SRC_EA(env, src, OS_WORD, !logical, &addr);
3521 tcg_gen_movi_i32(QREG_CC_V, 0);
3522 if (left) {
3523 tcg_gen_shri_i32(QREG_CC_C, src, 15);
3524 tcg_gen_shli_i32(QREG_CC_N, src, 1);
3525
3526 /*
3527 * Note that ColdFire always clears V,
3528 * while M68000 sets if the most significant bit is changed at
3529 * any time during the shift operation
3530 */
3531 if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3532 src = gen_extend(s, src, OS_WORD, 1);
3533 tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3534 }
3535 } else {
3536 tcg_gen_mov_i32(QREG_CC_C, src);
3537 if (logical) {
3538 tcg_gen_shri_i32(QREG_CC_N, src, 1);
3539 } else {
3540 tcg_gen_sari_i32(QREG_CC_N, src, 1);
3541 }
3542 }
3543
3544 gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3545 tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3546 tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3547 tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3548
3549 DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3550 set_cc_op(s, CC_OP_FLAGS);
3551 }
3552
rotate(TCGv reg,TCGv shift,int left,int size)3553 static void rotate(TCGv reg, TCGv shift, int left, int size)
3554 {
3555 switch (size) {
3556 case 8:
3557 /* Replicate the 8-bit input so that a 32-bit rotate works. */
3558 tcg_gen_ext8u_i32(reg, reg);
3559 tcg_gen_muli_i32(reg, reg, 0x01010101);
3560 goto do_long;
3561 case 16:
3562 /* Replicate the 16-bit input so that a 32-bit rotate works. */
3563 tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3564 goto do_long;
3565 do_long:
3566 default:
3567 if (left) {
3568 tcg_gen_rotl_i32(reg, reg, shift);
3569 } else {
3570 tcg_gen_rotr_i32(reg, reg, shift);
3571 }
3572 }
3573
3574 /* compute flags */
3575
3576 switch (size) {
3577 case 8:
3578 tcg_gen_ext8s_i32(reg, reg);
3579 break;
3580 case 16:
3581 tcg_gen_ext16s_i32(reg, reg);
3582 break;
3583 default:
3584 break;
3585 }
3586
3587 /* QREG_CC_X is not affected */
3588
3589 tcg_gen_mov_i32(QREG_CC_N, reg);
3590 tcg_gen_mov_i32(QREG_CC_Z, reg);
3591
3592 if (left) {
3593 tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3594 } else {
3595 tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3596 }
3597
3598 tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3599 }
3600
rotate_x_flags(TCGv reg,TCGv X,int size)3601 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3602 {
3603 switch (size) {
3604 case 8:
3605 tcg_gen_ext8s_i32(reg, reg);
3606 break;
3607 case 16:
3608 tcg_gen_ext16s_i32(reg, reg);
3609 break;
3610 default:
3611 break;
3612 }
3613 tcg_gen_mov_i32(QREG_CC_N, reg);
3614 tcg_gen_mov_i32(QREG_CC_Z, reg);
3615 tcg_gen_mov_i32(QREG_CC_X, X);
3616 tcg_gen_mov_i32(QREG_CC_C, X);
3617 tcg_gen_movi_i32(QREG_CC_V, 0);
3618 }
3619
3620 /* Result of rotate_x() is valid if 0 <= shift <= size */
rotate_x(TCGv reg,TCGv shift,int left,int size)3621 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3622 {
3623 TCGv X, shl, shr, shx, sz, zero;
3624
3625 sz = tcg_constant_i32(size);
3626
3627 shr = tcg_temp_new();
3628 shl = tcg_temp_new();
3629 shx = tcg_temp_new();
3630 if (left) {
3631 tcg_gen_mov_i32(shl, shift); /* shl = shift */
3632 tcg_gen_movi_i32(shr, size + 1);
3633 tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3634 tcg_gen_subi_i32(shx, shift, 1); /* shx = shift - 1 */
3635 /* shx = shx < 0 ? size : shx; */
3636 zero = tcg_constant_i32(0);
3637 tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3638 } else {
3639 tcg_gen_mov_i32(shr, shift); /* shr = shift */
3640 tcg_gen_movi_i32(shl, size + 1);
3641 tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3642 tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3643 }
3644
3645 /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3646
3647 tcg_gen_shl_i32(shl, reg, shl);
3648 tcg_gen_shr_i32(shr, reg, shr);
3649 tcg_gen_or_i32(reg, shl, shr);
3650 tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3651 tcg_gen_or_i32(reg, reg, shx);
3652
3653 /* X = (reg >> size) & 1 */
3654
3655 X = tcg_temp_new();
3656 tcg_gen_extract_i32(X, reg, size, 1);
3657
3658 return X;
3659 }
3660
3661 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
rotate32_x(TCGv reg,TCGv shift,int left)3662 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3663 {
3664 TCGv_i64 t0, shift64;
3665 TCGv X, lo, hi, zero;
3666
3667 shift64 = tcg_temp_new_i64();
3668 tcg_gen_extu_i32_i64(shift64, shift);
3669
3670 t0 = tcg_temp_new_i64();
3671
3672 X = tcg_temp_new();
3673 lo = tcg_temp_new();
3674 hi = tcg_temp_new();
3675
3676 if (left) {
3677 /* create [reg:X:..] */
3678
3679 tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3680 tcg_gen_concat_i32_i64(t0, lo, reg);
3681
3682 /* rotate */
3683
3684 tcg_gen_rotl_i64(t0, t0, shift64);
3685
3686 /* result is [reg:..:reg:X] */
3687
3688 tcg_gen_extr_i64_i32(lo, hi, t0);
3689 tcg_gen_andi_i32(X, lo, 1);
3690
3691 tcg_gen_shri_i32(lo, lo, 1);
3692 } else {
3693 /* create [..:X:reg] */
3694
3695 tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3696
3697 tcg_gen_rotr_i64(t0, t0, shift64);
3698
3699 /* result is value: [X:reg:..:reg] */
3700
3701 tcg_gen_extr_i64_i32(lo, hi, t0);
3702
3703 /* extract X */
3704
3705 tcg_gen_shri_i32(X, hi, 31);
3706
3707 /* extract result */
3708
3709 tcg_gen_shli_i32(hi, hi, 1);
3710 }
3711 tcg_gen_or_i32(lo, lo, hi);
3712
3713 /* if shift == 0, register and X are not affected */
3714
3715 zero = tcg_constant_i32(0);
3716 tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3717 tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3718
3719 return X;
3720 }
3721
DISAS_INSN(rotate_im)3722 DISAS_INSN(rotate_im)
3723 {
3724 TCGv shift;
3725 int tmp;
3726 int left = (insn & 0x100);
3727
3728 tmp = (insn >> 9) & 7;
3729 if (tmp == 0) {
3730 tmp = 8;
3731 }
3732
3733 shift = tcg_constant_i32(tmp);
3734 if (insn & 8) {
3735 rotate(DREG(insn, 0), shift, left, 32);
3736 } else {
3737 TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3738 rotate_x_flags(DREG(insn, 0), X, 32);
3739 }
3740
3741 set_cc_op(s, CC_OP_FLAGS);
3742 }
3743
DISAS_INSN(rotate8_im)3744 DISAS_INSN(rotate8_im)
3745 {
3746 int left = (insn & 0x100);
3747 TCGv reg;
3748 TCGv shift;
3749 int tmp;
3750
3751 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3752
3753 tmp = (insn >> 9) & 7;
3754 if (tmp == 0) {
3755 tmp = 8;
3756 }
3757
3758 shift = tcg_constant_i32(tmp);
3759 if (insn & 8) {
3760 rotate(reg, shift, left, 8);
3761 } else {
3762 TCGv X = rotate_x(reg, shift, left, 8);
3763 rotate_x_flags(reg, X, 8);
3764 }
3765 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3766 set_cc_op(s, CC_OP_FLAGS);
3767 }
3768
DISAS_INSN(rotate16_im)3769 DISAS_INSN(rotate16_im)
3770 {
3771 int left = (insn & 0x100);
3772 TCGv reg;
3773 TCGv shift;
3774 int tmp;
3775
3776 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3777 tmp = (insn >> 9) & 7;
3778 if (tmp == 0) {
3779 tmp = 8;
3780 }
3781
3782 shift = tcg_constant_i32(tmp);
3783 if (insn & 8) {
3784 rotate(reg, shift, left, 16);
3785 } else {
3786 TCGv X = rotate_x(reg, shift, left, 16);
3787 rotate_x_flags(reg, X, 16);
3788 }
3789 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3790 set_cc_op(s, CC_OP_FLAGS);
3791 }
3792
DISAS_INSN(rotate_reg)3793 DISAS_INSN(rotate_reg)
3794 {
3795 TCGv reg;
3796 TCGv src;
3797 TCGv t0, t1;
3798 int left = (insn & 0x100);
3799
3800 reg = DREG(insn, 0);
3801 src = DREG(insn, 9);
3802 /* shift in [0..63] */
3803 t0 = tcg_temp_new();
3804 tcg_gen_andi_i32(t0, src, 63);
3805 t1 = tcg_temp_new_i32();
3806 if (insn & 8) {
3807 tcg_gen_andi_i32(t1, src, 31);
3808 rotate(reg, t1, left, 32);
3809 /* if shift == 0, clear C */
3810 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3811 t0, QREG_CC_V /* 0 */,
3812 QREG_CC_V /* 0 */, QREG_CC_C);
3813 } else {
3814 TCGv X;
3815 /* modulo 33 */
3816 tcg_gen_movi_i32(t1, 33);
3817 tcg_gen_remu_i32(t1, t0, t1);
3818 X = rotate32_x(DREG(insn, 0), t1, left);
3819 rotate_x_flags(DREG(insn, 0), X, 32);
3820 }
3821 set_cc_op(s, CC_OP_FLAGS);
3822 }
3823
DISAS_INSN(rotate8_reg)3824 DISAS_INSN(rotate8_reg)
3825 {
3826 TCGv reg;
3827 TCGv src;
3828 TCGv t0, t1;
3829 int left = (insn & 0x100);
3830
3831 reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3832 src = DREG(insn, 9);
3833 /* shift in [0..63] */
3834 t0 = tcg_temp_new_i32();
3835 tcg_gen_andi_i32(t0, src, 63);
3836 t1 = tcg_temp_new_i32();
3837 if (insn & 8) {
3838 tcg_gen_andi_i32(t1, src, 7);
3839 rotate(reg, t1, left, 8);
3840 /* if shift == 0, clear C */
3841 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3842 t0, QREG_CC_V /* 0 */,
3843 QREG_CC_V /* 0 */, QREG_CC_C);
3844 } else {
3845 TCGv X;
3846 /* modulo 9 */
3847 tcg_gen_movi_i32(t1, 9);
3848 tcg_gen_remu_i32(t1, t0, t1);
3849 X = rotate_x(reg, t1, left, 8);
3850 rotate_x_flags(reg, X, 8);
3851 }
3852 gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3853 set_cc_op(s, CC_OP_FLAGS);
3854 }
3855
DISAS_INSN(rotate16_reg)3856 DISAS_INSN(rotate16_reg)
3857 {
3858 TCGv reg;
3859 TCGv src;
3860 TCGv t0, t1;
3861 int left = (insn & 0x100);
3862
3863 reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3864 src = DREG(insn, 9);
3865 /* shift in [0..63] */
3866 t0 = tcg_temp_new_i32();
3867 tcg_gen_andi_i32(t0, src, 63);
3868 t1 = tcg_temp_new_i32();
3869 if (insn & 8) {
3870 tcg_gen_andi_i32(t1, src, 15);
3871 rotate(reg, t1, left, 16);
3872 /* if shift == 0, clear C */
3873 tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3874 t0, QREG_CC_V /* 0 */,
3875 QREG_CC_V /* 0 */, QREG_CC_C);
3876 } else {
3877 TCGv X;
3878 /* modulo 17 */
3879 tcg_gen_movi_i32(t1, 17);
3880 tcg_gen_remu_i32(t1, t0, t1);
3881 X = rotate_x(reg, t1, left, 16);
3882 rotate_x_flags(reg, X, 16);
3883 }
3884 gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3885 set_cc_op(s, CC_OP_FLAGS);
3886 }
3887
DISAS_INSN(rotate_mem)3888 DISAS_INSN(rotate_mem)
3889 {
3890 TCGv src;
3891 TCGv addr;
3892 TCGv shift;
3893 int left = (insn & 0x100);
3894
3895 SRC_EA(env, src, OS_WORD, 0, &addr);
3896
3897 shift = tcg_constant_i32(1);
3898 if (insn & 0x0200) {
3899 rotate(src, shift, left, 16);
3900 } else {
3901 TCGv X = rotate_x(src, shift, left, 16);
3902 rotate_x_flags(src, X, 16);
3903 }
3904 DEST_EA(env, insn, OS_WORD, src, &addr);
3905 set_cc_op(s, CC_OP_FLAGS);
3906 }
3907
DISAS_INSN(bfext_reg)3908 DISAS_INSN(bfext_reg)
3909 {
3910 int ext = read_im16(env, s);
3911 int is_sign = insn & 0x200;
3912 TCGv src = DREG(insn, 0);
3913 TCGv dst = DREG(ext, 12);
3914 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3915 int ofs = extract32(ext, 6, 5); /* big bit-endian */
3916 int pos = 32 - ofs - len; /* little bit-endian */
3917 TCGv tmp = tcg_temp_new();
3918 TCGv shift;
3919
3920 /*
3921 * In general, we're going to rotate the field so that it's at the
3922 * top of the word and then right-shift by the complement of the
3923 * width to extend the field.
3924 */
3925 if (ext & 0x20) {
3926 /* Variable width. */
3927 if (ext & 0x800) {
3928 /* Variable offset. */
3929 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3930 tcg_gen_rotl_i32(tmp, src, tmp);
3931 } else {
3932 tcg_gen_rotli_i32(tmp, src, ofs);
3933 }
3934
3935 shift = tcg_temp_new();
3936 tcg_gen_neg_i32(shift, DREG(ext, 0));
3937 tcg_gen_andi_i32(shift, shift, 31);
3938 tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3939 if (is_sign) {
3940 tcg_gen_mov_i32(dst, QREG_CC_N);
3941 } else {
3942 tcg_gen_shr_i32(dst, tmp, shift);
3943 }
3944 } else {
3945 /* Immediate width. */
3946 if (ext & 0x800) {
3947 /* Variable offset */
3948 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3949 tcg_gen_rotl_i32(tmp, src, tmp);
3950 src = tmp;
3951 pos = 32 - len;
3952 } else {
3953 /*
3954 * Immediate offset. If the field doesn't wrap around the
3955 * end of the word, rely on (s)extract completely.
3956 */
3957 if (pos < 0) {
3958 tcg_gen_rotli_i32(tmp, src, ofs);
3959 src = tmp;
3960 pos = 32 - len;
3961 }
3962 }
3963
3964 tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3965 if (is_sign) {
3966 tcg_gen_mov_i32(dst, QREG_CC_N);
3967 } else {
3968 tcg_gen_extract_i32(dst, src, pos, len);
3969 }
3970 }
3971
3972 set_cc_op(s, CC_OP_LOGIC);
3973 }
3974
DISAS_INSN(bfext_mem)3975 DISAS_INSN(bfext_mem)
3976 {
3977 int ext = read_im16(env, s);
3978 int is_sign = insn & 0x200;
3979 TCGv dest = DREG(ext, 12);
3980 TCGv addr, len, ofs;
3981
3982 addr = gen_lea(env, s, insn, OS_UNSIZED);
3983 if (IS_NULL_QREG(addr)) {
3984 gen_addr_fault(s);
3985 return;
3986 }
3987
3988 if (ext & 0x20) {
3989 len = DREG(ext, 0);
3990 } else {
3991 len = tcg_constant_i32(extract32(ext, 0, 5));
3992 }
3993 if (ext & 0x800) {
3994 ofs = DREG(ext, 6);
3995 } else {
3996 ofs = tcg_constant_i32(extract32(ext, 6, 5));
3997 }
3998
3999 if (is_sign) {
4000 gen_helper_bfexts_mem(dest, tcg_env, addr, ofs, len);
4001 tcg_gen_mov_i32(QREG_CC_N, dest);
4002 } else {
4003 TCGv_i64 tmp = tcg_temp_new_i64();
4004 gen_helper_bfextu_mem(tmp, tcg_env, addr, ofs, len);
4005 tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4006 }
4007 set_cc_op(s, CC_OP_LOGIC);
4008 }
4009
DISAS_INSN(bfop_reg)4010 DISAS_INSN(bfop_reg)
4011 {
4012 int ext = read_im16(env, s);
4013 TCGv src = DREG(insn, 0);
4014 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4015 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4016 TCGv mask, tofs = NULL, tlen = NULL;
4017 bool is_bfffo = (insn & 0x0f00) == 0x0d00;
4018
4019 if ((ext & 0x820) == 0) {
4020 /* Immediate width and offset. */
4021 uint32_t maski = 0x7fffffffu >> (len - 1);
4022 if (ofs + len <= 32) {
4023 tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4024 } else {
4025 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4026 }
4027 tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4028
4029 mask = tcg_constant_i32(ror32(maski, ofs));
4030 if (is_bfffo) {
4031 tofs = tcg_constant_i32(ofs);
4032 tlen = tcg_constant_i32(len);
4033 }
4034 } else {
4035 TCGv tmp = tcg_temp_new();
4036
4037 mask = tcg_temp_new();
4038 if (ext & 0x20) {
4039 /* Variable width */
4040 tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4041 tcg_gen_andi_i32(tmp, tmp, 31);
4042 tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4043 if (is_bfffo) {
4044 tlen = tcg_temp_new();
4045 tcg_gen_addi_i32(tlen, tmp, 1);
4046 }
4047 } else {
4048 /* Immediate width */
4049 tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4050 if (is_bfffo) {
4051 tlen = tcg_constant_i32(len);
4052 }
4053 }
4054
4055 if (ext & 0x800) {
4056 /* Variable offset */
4057 tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4058 tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4059 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4060 tcg_gen_rotr_i32(mask, mask, tmp);
4061 if (is_bfffo) {
4062 tofs = tmp;
4063 }
4064 } else {
4065 /* Immediate offset (and variable width) */
4066 tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4067 tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4068 tcg_gen_rotri_i32(mask, mask, ofs);
4069 if (is_bfffo) {
4070 tofs = tcg_constant_i32(ofs);
4071 }
4072 }
4073 }
4074 set_cc_op(s, CC_OP_LOGIC);
4075
4076 switch (insn & 0x0f00) {
4077 case 0x0a00: /* bfchg */
4078 tcg_gen_eqv_i32(src, src, mask);
4079 break;
4080 case 0x0c00: /* bfclr */
4081 tcg_gen_and_i32(src, src, mask);
4082 break;
4083 case 0x0d00: /* bfffo */
4084 gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4085 break;
4086 case 0x0e00: /* bfset */
4087 tcg_gen_orc_i32(src, src, mask);
4088 break;
4089 case 0x0800: /* bftst */
4090 /* flags already set; no other work to do. */
4091 break;
4092 default:
4093 g_assert_not_reached();
4094 }
4095 }
4096
DISAS_INSN(bfop_mem)4097 DISAS_INSN(bfop_mem)
4098 {
4099 int ext = read_im16(env, s);
4100 TCGv addr, len, ofs;
4101 TCGv_i64 t64;
4102
4103 addr = gen_lea(env, s, insn, OS_UNSIZED);
4104 if (IS_NULL_QREG(addr)) {
4105 gen_addr_fault(s);
4106 return;
4107 }
4108
4109 if (ext & 0x20) {
4110 len = DREG(ext, 0);
4111 } else {
4112 len = tcg_constant_i32(extract32(ext, 0, 5));
4113 }
4114 if (ext & 0x800) {
4115 ofs = DREG(ext, 6);
4116 } else {
4117 ofs = tcg_constant_i32(extract32(ext, 6, 5));
4118 }
4119
4120 switch (insn & 0x0f00) {
4121 case 0x0a00: /* bfchg */
4122 gen_helper_bfchg_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4123 break;
4124 case 0x0c00: /* bfclr */
4125 gen_helper_bfclr_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4126 break;
4127 case 0x0d00: /* bfffo */
4128 t64 = tcg_temp_new_i64();
4129 gen_helper_bfffo_mem(t64, tcg_env, addr, ofs, len);
4130 tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4131 break;
4132 case 0x0e00: /* bfset */
4133 gen_helper_bfset_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4134 break;
4135 case 0x0800: /* bftst */
4136 gen_helper_bfexts_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4137 break;
4138 default:
4139 g_assert_not_reached();
4140 }
4141 set_cc_op(s, CC_OP_LOGIC);
4142 }
4143
DISAS_INSN(bfins_reg)4144 DISAS_INSN(bfins_reg)
4145 {
4146 int ext = read_im16(env, s);
4147 TCGv dst = DREG(insn, 0);
4148 TCGv src = DREG(ext, 12);
4149 int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4150 int ofs = extract32(ext, 6, 5); /* big bit-endian */
4151 int pos = 32 - ofs - len; /* little bit-endian */
4152 TCGv tmp;
4153
4154 tmp = tcg_temp_new();
4155
4156 if (ext & 0x20) {
4157 /* Variable width */
4158 tcg_gen_neg_i32(tmp, DREG(ext, 0));
4159 tcg_gen_andi_i32(tmp, tmp, 31);
4160 tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4161 } else {
4162 /* Immediate width */
4163 tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4164 }
4165 set_cc_op(s, CC_OP_LOGIC);
4166
4167 /* Immediate width and offset */
4168 if ((ext & 0x820) == 0) {
4169 /* Check for suitability for deposit. */
4170 if (pos >= 0) {
4171 tcg_gen_deposit_i32(dst, dst, src, pos, len);
4172 } else {
4173 uint32_t maski = -2U << (len - 1);
4174 uint32_t roti = (ofs + len) & 31;
4175 tcg_gen_andi_i32(tmp, src, ~maski);
4176 tcg_gen_rotri_i32(tmp, tmp, roti);
4177 tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4178 tcg_gen_or_i32(dst, dst, tmp);
4179 }
4180 } else {
4181 TCGv mask = tcg_temp_new();
4182 TCGv rot = tcg_temp_new();
4183
4184 if (ext & 0x20) {
4185 /* Variable width */
4186 tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4187 tcg_gen_andi_i32(rot, rot, 31);
4188 tcg_gen_movi_i32(mask, -2);
4189 tcg_gen_shl_i32(mask, mask, rot);
4190 tcg_gen_mov_i32(rot, DREG(ext, 0));
4191 tcg_gen_andc_i32(tmp, src, mask);
4192 } else {
4193 /* Immediate width (variable offset) */
4194 uint32_t maski = -2U << (len - 1);
4195 tcg_gen_andi_i32(tmp, src, ~maski);
4196 tcg_gen_movi_i32(mask, maski);
4197 tcg_gen_movi_i32(rot, len & 31);
4198 }
4199 if (ext & 0x800) {
4200 /* Variable offset */
4201 tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4202 } else {
4203 /* Immediate offset (variable width) */
4204 tcg_gen_addi_i32(rot, rot, ofs);
4205 }
4206 tcg_gen_andi_i32(rot, rot, 31);
4207 tcg_gen_rotr_i32(mask, mask, rot);
4208 tcg_gen_rotr_i32(tmp, tmp, rot);
4209 tcg_gen_and_i32(dst, dst, mask);
4210 tcg_gen_or_i32(dst, dst, tmp);
4211 }
4212 }
4213
DISAS_INSN(bfins_mem)4214 DISAS_INSN(bfins_mem)
4215 {
4216 int ext = read_im16(env, s);
4217 TCGv src = DREG(ext, 12);
4218 TCGv addr, len, ofs;
4219
4220 addr = gen_lea(env, s, insn, OS_UNSIZED);
4221 if (IS_NULL_QREG(addr)) {
4222 gen_addr_fault(s);
4223 return;
4224 }
4225
4226 if (ext & 0x20) {
4227 len = DREG(ext, 0);
4228 } else {
4229 len = tcg_constant_i32(extract32(ext, 0, 5));
4230 }
4231 if (ext & 0x800) {
4232 ofs = DREG(ext, 6);
4233 } else {
4234 ofs = tcg_constant_i32(extract32(ext, 6, 5));
4235 }
4236
4237 gen_helper_bfins_mem(QREG_CC_N, tcg_env, addr, src, ofs, len);
4238 set_cc_op(s, CC_OP_LOGIC);
4239 }
4240
DISAS_INSN(ff1)4241 DISAS_INSN(ff1)
4242 {
4243 TCGv reg;
4244 reg = DREG(insn, 0);
4245 gen_logic_cc(s, reg, OS_LONG);
4246 gen_helper_ff1(reg, reg);
4247 }
4248
DISAS_INSN(chk)4249 DISAS_INSN(chk)
4250 {
4251 TCGv src, reg;
4252 int opsize;
4253
4254 switch ((insn >> 7) & 3) {
4255 case 3:
4256 opsize = OS_WORD;
4257 break;
4258 case 2:
4259 if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4260 opsize = OS_LONG;
4261 break;
4262 }
4263 /* fallthru */
4264 default:
4265 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4266 return;
4267 }
4268 SRC_EA(env, src, opsize, 1, NULL);
4269 reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4270
4271 gen_flush_flags(s);
4272 gen_helper_chk(tcg_env, reg, src);
4273 }
4274
DISAS_INSN(chk2)4275 DISAS_INSN(chk2)
4276 {
4277 uint16_t ext;
4278 TCGv addr1, addr2, bound1, bound2, reg;
4279 int opsize;
4280
4281 switch ((insn >> 9) & 3) {
4282 case 0:
4283 opsize = OS_BYTE;
4284 break;
4285 case 1:
4286 opsize = OS_WORD;
4287 break;
4288 case 2:
4289 opsize = OS_LONG;
4290 break;
4291 default:
4292 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4293 return;
4294 }
4295
4296 ext = read_im16(env, s);
4297 if ((ext & 0x0800) == 0) {
4298 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4299 return;
4300 }
4301
4302 addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4303 addr2 = tcg_temp_new();
4304 tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4305
4306 bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4307 bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4308
4309 reg = tcg_temp_new();
4310 if (ext & 0x8000) {
4311 tcg_gen_mov_i32(reg, AREG(ext, 12));
4312 } else {
4313 gen_ext(reg, DREG(ext, 12), opsize, 1);
4314 }
4315
4316 gen_flush_flags(s);
4317 gen_helper_chk2(tcg_env, reg, bound1, bound2);
4318 }
4319
m68k_copy_line(TCGv dst,TCGv src,int index)4320 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4321 {
4322 TCGv addr;
4323 TCGv_i64 t0, t1;
4324
4325 addr = tcg_temp_new();
4326
4327 t0 = tcg_temp_new_i64();
4328 t1 = tcg_temp_new_i64();
4329
4330 tcg_gen_andi_i32(addr, src, ~15);
4331 tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4332 tcg_gen_addi_i32(addr, addr, 8);
4333 tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4334
4335 tcg_gen_andi_i32(addr, dst, ~15);
4336 tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4337 tcg_gen_addi_i32(addr, addr, 8);
4338 tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4339 }
4340
DISAS_INSN(move16_reg)4341 DISAS_INSN(move16_reg)
4342 {
4343 int index = IS_USER(s);
4344 TCGv tmp;
4345 uint16_t ext;
4346
4347 ext = read_im16(env, s);
4348 if ((ext & (1 << 15)) == 0) {
4349 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4350 }
4351
4352 m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4353
4354 /* Ax can be Ay, so save Ay before incrementing Ax */
4355 tmp = tcg_temp_new();
4356 tcg_gen_mov_i32(tmp, AREG(ext, 12));
4357 tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4358 tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4359 }
4360
DISAS_INSN(move16_mem)4361 DISAS_INSN(move16_mem)
4362 {
4363 int index = IS_USER(s);
4364 TCGv reg, addr;
4365
4366 reg = AREG(insn, 0);
4367 addr = tcg_constant_i32(read_im32(env, s));
4368
4369 if ((insn >> 3) & 1) {
4370 /* MOVE16 (xxx).L, (Ay) */
4371 m68k_copy_line(reg, addr, index);
4372 } else {
4373 /* MOVE16 (Ay), (xxx).L */
4374 m68k_copy_line(addr, reg, index);
4375 }
4376
4377 if (((insn >> 3) & 2) == 0) {
4378 /* (Ay)+ */
4379 tcg_gen_addi_i32(reg, reg, 16);
4380 }
4381 }
4382
DISAS_INSN(strldsr)4383 DISAS_INSN(strldsr)
4384 {
4385 uint16_t ext;
4386 uint32_t addr;
4387
4388 addr = s->pc - 2;
4389 ext = read_im16(env, s);
4390 if (ext != 0x46FC) {
4391 gen_exception(s, addr, EXCP_ILLEGAL);
4392 return;
4393 }
4394 ext = read_im16(env, s);
4395 if (IS_USER(s) || (ext & SR_S) == 0) {
4396 gen_exception(s, addr, EXCP_PRIVILEGE);
4397 return;
4398 }
4399 gen_push(s, gen_get_sr(s));
4400 gen_set_sr_im(s, ext, 0);
4401 gen_exit_tb(s);
4402 }
4403
DISAS_INSN(move_from_sr)4404 DISAS_INSN(move_from_sr)
4405 {
4406 TCGv sr;
4407
4408 if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4409 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4410 return;
4411 }
4412 sr = gen_get_sr(s);
4413 DEST_EA(env, insn, OS_WORD, sr, NULL);
4414 }
4415
4416 #if !defined(CONFIG_USER_ONLY)
DISAS_INSN(moves)4417 DISAS_INSN(moves)
4418 {
4419 int opsize;
4420 uint16_t ext;
4421 TCGv reg;
4422 TCGv addr;
4423 int extend;
4424
4425 if (IS_USER(s)) {
4426 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4427 return;
4428 }
4429
4430 ext = read_im16(env, s);
4431
4432 opsize = insn_opsize(insn);
4433
4434 if (ext & 0x8000) {
4435 /* address register */
4436 reg = AREG(ext, 12);
4437 extend = 1;
4438 } else {
4439 /* data register */
4440 reg = DREG(ext, 12);
4441 extend = 0;
4442 }
4443
4444 addr = gen_lea(env, s, insn, opsize);
4445 if (IS_NULL_QREG(addr)) {
4446 gen_addr_fault(s);
4447 return;
4448 }
4449
4450 if (ext & 0x0800) {
4451 /* from reg to ea */
4452 gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4453 } else {
4454 /* from ea to reg */
4455 TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4456 if (extend) {
4457 gen_ext(reg, tmp, opsize, 1);
4458 } else {
4459 gen_partset_reg(opsize, reg, tmp);
4460 }
4461 }
4462 switch (extract32(insn, 3, 3)) {
4463 case 3: /* Indirect postincrement. */
4464 tcg_gen_addi_i32(AREG(insn, 0), addr,
4465 REG(insn, 0) == 7 && opsize == OS_BYTE
4466 ? 2
4467 : opsize_bytes(opsize));
4468 break;
4469 case 4: /* Indirect predecrememnt. */
4470 tcg_gen_mov_i32(AREG(insn, 0), addr);
4471 break;
4472 }
4473 }
4474
DISAS_INSN(move_to_sr)4475 DISAS_INSN(move_to_sr)
4476 {
4477 if (IS_USER(s)) {
4478 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4479 return;
4480 }
4481 gen_move_to_sr(env, s, insn, false);
4482 gen_exit_tb(s);
4483 }
4484
DISAS_INSN(move_from_usp)4485 DISAS_INSN(move_from_usp)
4486 {
4487 if (IS_USER(s)) {
4488 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4489 return;
4490 }
4491 tcg_gen_ld_i32(AREG(insn, 0), tcg_env,
4492 offsetof(CPUM68KState, sp[M68K_USP]));
4493 }
4494
DISAS_INSN(move_to_usp)4495 DISAS_INSN(move_to_usp)
4496 {
4497 if (IS_USER(s)) {
4498 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4499 return;
4500 }
4501 tcg_gen_st_i32(AREG(insn, 0), tcg_env,
4502 offsetof(CPUM68KState, sp[M68K_USP]));
4503 }
4504
DISAS_INSN(halt)4505 DISAS_INSN(halt)
4506 {
4507 if (IS_USER(s)) {
4508 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4509 return;
4510 }
4511 if (semihosting_test(s)) {
4512 gen_exception(s, s->pc, EXCP_SEMIHOSTING);
4513 return;
4514 }
4515 tcg_gen_movi_i32(cpu_halted, 1);
4516 gen_exception(s, s->pc, EXCP_HLT);
4517 }
4518
DISAS_INSN(stop)4519 DISAS_INSN(stop)
4520 {
4521 uint16_t ext;
4522
4523 if (IS_USER(s)) {
4524 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4525 return;
4526 }
4527
4528 ext = read_im16(env, s);
4529
4530 gen_set_sr_im(s, ext, 0);
4531 tcg_gen_movi_i32(cpu_halted, 1);
4532 gen_exception(s, s->pc, EXCP_HLT);
4533 }
4534
DISAS_INSN(rte)4535 DISAS_INSN(rte)
4536 {
4537 if (IS_USER(s)) {
4538 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4539 return;
4540 }
4541 gen_exception(s, s->base.pc_next, EXCP_RTE);
4542 }
4543
DISAS_INSN(cf_movec)4544 DISAS_INSN(cf_movec)
4545 {
4546 uint16_t ext;
4547 TCGv reg;
4548
4549 if (IS_USER(s)) {
4550 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4551 return;
4552 }
4553
4554 ext = read_im16(env, s);
4555
4556 if (ext & 0x8000) {
4557 reg = AREG(ext, 12);
4558 } else {
4559 reg = DREG(ext, 12);
4560 }
4561 gen_helper_cf_movec_to(tcg_env, tcg_constant_i32(ext & 0xfff), reg);
4562 gen_exit_tb(s);
4563 }
4564
DISAS_INSN(m68k_movec)4565 DISAS_INSN(m68k_movec)
4566 {
4567 uint16_t ext;
4568 TCGv reg, creg;
4569
4570 if (IS_USER(s)) {
4571 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4572 return;
4573 }
4574
4575 ext = read_im16(env, s);
4576
4577 if (ext & 0x8000) {
4578 reg = AREG(ext, 12);
4579 } else {
4580 reg = DREG(ext, 12);
4581 }
4582 creg = tcg_constant_i32(ext & 0xfff);
4583 if (insn & 1) {
4584 gen_helper_m68k_movec_to(tcg_env, creg, reg);
4585 } else {
4586 gen_helper_m68k_movec_from(reg, tcg_env, creg);
4587 }
4588 gen_exit_tb(s);
4589 }
4590
DISAS_INSN(intouch)4591 DISAS_INSN(intouch)
4592 {
4593 if (IS_USER(s)) {
4594 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4595 return;
4596 }
4597 /* ICache fetch. Implement as no-op. */
4598 }
4599
DISAS_INSN(cpushl)4600 DISAS_INSN(cpushl)
4601 {
4602 if (IS_USER(s)) {
4603 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4604 return;
4605 }
4606 /* Cache push/invalidate. Implement as no-op. */
4607 }
4608
DISAS_INSN(cpush)4609 DISAS_INSN(cpush)
4610 {
4611 if (IS_USER(s)) {
4612 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4613 return;
4614 }
4615 /* Cache push/invalidate. Implement as no-op. */
4616 }
4617
DISAS_INSN(cinv)4618 DISAS_INSN(cinv)
4619 {
4620 if (IS_USER(s)) {
4621 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4622 return;
4623 }
4624 /* Invalidate cache line. Implement as no-op. */
4625 }
4626
4627 #if !defined(CONFIG_USER_ONLY)
DISAS_INSN(pflush)4628 DISAS_INSN(pflush)
4629 {
4630 TCGv opmode;
4631
4632 if (IS_USER(s)) {
4633 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4634 return;
4635 }
4636
4637 opmode = tcg_constant_i32((insn >> 3) & 3);
4638 gen_helper_pflush(tcg_env, AREG(insn, 0), opmode);
4639 }
4640
DISAS_INSN(ptest)4641 DISAS_INSN(ptest)
4642 {
4643 TCGv is_read;
4644
4645 if (IS_USER(s)) {
4646 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4647 return;
4648 }
4649 is_read = tcg_constant_i32((insn >> 5) & 1);
4650 gen_helper_ptest(tcg_env, AREG(insn, 0), is_read);
4651 }
4652 #endif
4653
DISAS_INSN(wddata)4654 DISAS_INSN(wddata)
4655 {
4656 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4657 }
4658
DISAS_INSN(wdebug)4659 DISAS_INSN(wdebug)
4660 {
4661 if (IS_USER(s)) {
4662 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4663 return;
4664 }
4665 /* TODO: Implement wdebug. */
4666 cpu_abort(env_cpu(env), "WDEBUG not implemented");
4667 }
4668 #endif
4669
DISAS_INSN(trap)4670 DISAS_INSN(trap)
4671 {
4672 gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4673 }
4674
do_trapcc(DisasContext * s,DisasCompare * c)4675 static void do_trapcc(DisasContext *s, DisasCompare *c)
4676 {
4677 if (c->tcond != TCG_COND_NEVER) {
4678 TCGLabel *over = NULL;
4679
4680 update_cc_op(s);
4681
4682 if (c->tcond != TCG_COND_ALWAYS) {
4683 /* Jump over if !c. */
4684 over = gen_new_label();
4685 tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4686 }
4687
4688 tcg_gen_movi_i32(QREG_PC, s->pc);
4689 gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4690
4691 if (over != NULL) {
4692 gen_set_label(over);
4693 s->base.is_jmp = DISAS_NEXT;
4694 }
4695 }
4696 }
4697
DISAS_INSN(trapcc)4698 DISAS_INSN(trapcc)
4699 {
4700 DisasCompare c;
4701
4702 /* Consume and discard the immediate operand. */
4703 switch (extract32(insn, 0, 3)) {
4704 case 2: /* trapcc.w */
4705 (void)read_im16(env, s);
4706 break;
4707 case 3: /* trapcc.l */
4708 (void)read_im32(env, s);
4709 break;
4710 case 4: /* trapcc (no operand) */
4711 break;
4712 default:
4713 /* trapcc registered with only valid opmodes */
4714 g_assert_not_reached();
4715 }
4716
4717 gen_cc_cond(&c, s, extract32(insn, 8, 4));
4718 do_trapcc(s, &c);
4719 }
4720
DISAS_INSN(trapv)4721 DISAS_INSN(trapv)
4722 {
4723 DisasCompare c;
4724
4725 gen_cc_cond(&c, s, 9); /* V set */
4726 do_trapcc(s, &c);
4727 }
4728
gen_load_fcr(DisasContext * s,TCGv res,int reg)4729 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4730 {
4731 switch (reg) {
4732 case M68K_FPIAR:
4733 tcg_gen_movi_i32(res, 0);
4734 break;
4735 case M68K_FPSR:
4736 gen_helper_get_fpsr(res, tcg_env);
4737 break;
4738 case M68K_FPCR:
4739 tcg_gen_ld_i32(res, tcg_env, offsetof(CPUM68KState, fpcr));
4740 break;
4741 }
4742 }
4743
gen_store_fcr(DisasContext * s,TCGv val,int reg)4744 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4745 {
4746 switch (reg) {
4747 case M68K_FPIAR:
4748 break;
4749 case M68K_FPSR:
4750 gen_helper_set_fpsr(tcg_env, val);
4751 break;
4752 case M68K_FPCR:
4753 gen_helper_set_fpcr(tcg_env, val);
4754 break;
4755 }
4756 }
4757
gen_qemu_store_fcr(DisasContext * s,TCGv addr,int reg)4758 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4759 {
4760 int index = IS_USER(s);
4761 TCGv tmp;
4762
4763 tmp = tcg_temp_new();
4764 gen_load_fcr(s, tmp, reg);
4765 tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4766 }
4767
gen_qemu_load_fcr(DisasContext * s,TCGv addr,int reg)4768 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4769 {
4770 int index = IS_USER(s);
4771 TCGv tmp;
4772
4773 tmp = tcg_temp_new();
4774 tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4775 gen_store_fcr(s, tmp, reg);
4776 }
4777
4778
gen_op_fmove_fcr(CPUM68KState * env,DisasContext * s,uint32_t insn,uint32_t ext)4779 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4780 uint32_t insn, uint32_t ext)
4781 {
4782 int mask = (ext >> 10) & 7;
4783 int is_write = (ext >> 13) & 1;
4784 int mode = extract32(insn, 3, 3);
4785 int i;
4786 TCGv addr, tmp;
4787
4788 switch (mode) {
4789 case 0: /* Dn */
4790 if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4791 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4792 return;
4793 }
4794 if (is_write) {
4795 gen_load_fcr(s, DREG(insn, 0), mask);
4796 } else {
4797 gen_store_fcr(s, DREG(insn, 0), mask);
4798 }
4799 return;
4800 case 1: /* An, only with FPIAR */
4801 if (mask != M68K_FPIAR) {
4802 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4803 return;
4804 }
4805 if (is_write) {
4806 gen_load_fcr(s, AREG(insn, 0), mask);
4807 } else {
4808 gen_store_fcr(s, AREG(insn, 0), mask);
4809 }
4810 return;
4811 case 7: /* Immediate */
4812 if (REG(insn, 0) == 4) {
4813 if (is_write ||
4814 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4815 mask != M68K_FPCR)) {
4816 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4817 return;
4818 }
4819 tmp = tcg_constant_i32(read_im32(env, s));
4820 gen_store_fcr(s, tmp, mask);
4821 return;
4822 }
4823 break;
4824 default:
4825 break;
4826 }
4827
4828 tmp = gen_lea(env, s, insn, OS_LONG);
4829 if (IS_NULL_QREG(tmp)) {
4830 gen_addr_fault(s);
4831 return;
4832 }
4833
4834 addr = tcg_temp_new();
4835 tcg_gen_mov_i32(addr, tmp);
4836
4837 /*
4838 * mask:
4839 *
4840 * 0b100 Floating-Point Control Register
4841 * 0b010 Floating-Point Status Register
4842 * 0b001 Floating-Point Instruction Address Register
4843 *
4844 */
4845
4846 if (is_write && mode == 4) {
4847 for (i = 2; i >= 0; i--, mask >>= 1) {
4848 if (mask & 1) {
4849 gen_qemu_store_fcr(s, addr, 1 << i);
4850 if (mask != 1) {
4851 tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4852 }
4853 }
4854 }
4855 tcg_gen_mov_i32(AREG(insn, 0), addr);
4856 } else {
4857 for (i = 0; i < 3; i++, mask >>= 1) {
4858 if (mask & 1) {
4859 if (is_write) {
4860 gen_qemu_store_fcr(s, addr, 1 << i);
4861 } else {
4862 gen_qemu_load_fcr(s, addr, 1 << i);
4863 }
4864 if (mask != 1 || mode == 3) {
4865 tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4866 }
4867 }
4868 }
4869 if (mode == 3) {
4870 tcg_gen_mov_i32(AREG(insn, 0), addr);
4871 }
4872 }
4873 }
4874
gen_op_fmovem(CPUM68KState * env,DisasContext * s,uint32_t insn,uint32_t ext)4875 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4876 uint32_t insn, uint32_t ext)
4877 {
4878 int opsize;
4879 TCGv addr, tmp;
4880 int mode = (ext >> 11) & 0x3;
4881 int is_load = ((ext & 0x2000) == 0);
4882
4883 if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4884 opsize = OS_EXTENDED;
4885 } else {
4886 opsize = OS_DOUBLE; /* FIXME */
4887 }
4888
4889 addr = gen_lea(env, s, insn, opsize);
4890 if (IS_NULL_QREG(addr)) {
4891 gen_addr_fault(s);
4892 return;
4893 }
4894
4895 tmp = tcg_temp_new();
4896 if (mode & 0x1) {
4897 /* Dynamic register list */
4898 tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4899 } else {
4900 /* Static register list */
4901 tcg_gen_movi_i32(tmp, ext & 0xff);
4902 }
4903
4904 if (!is_load && (mode & 2) == 0) {
4905 /*
4906 * predecrement addressing mode
4907 * only available to store register to memory
4908 */
4909 if (opsize == OS_EXTENDED) {
4910 gen_helper_fmovemx_st_predec(tmp, tcg_env, addr, tmp);
4911 } else {
4912 gen_helper_fmovemd_st_predec(tmp, tcg_env, addr, tmp);
4913 }
4914 } else {
4915 /* postincrement addressing mode */
4916 if (opsize == OS_EXTENDED) {
4917 if (is_load) {
4918 gen_helper_fmovemx_ld_postinc(tmp, tcg_env, addr, tmp);
4919 } else {
4920 gen_helper_fmovemx_st_postinc(tmp, tcg_env, addr, tmp);
4921 }
4922 } else {
4923 if (is_load) {
4924 gen_helper_fmovemd_ld_postinc(tmp, tcg_env, addr, tmp);
4925 } else {
4926 gen_helper_fmovemd_st_postinc(tmp, tcg_env, addr, tmp);
4927 }
4928 }
4929 }
4930 if ((insn & 070) == 030 || (insn & 070) == 040) {
4931 tcg_gen_mov_i32(AREG(insn, 0), tmp);
4932 }
4933 }
4934
4935 /*
4936 * ??? FP exceptions are not implemented. Most exceptions are deferred until
4937 * immediately before the next FP instruction is executed.
4938 */
DISAS_INSN(fpu)4939 DISAS_INSN(fpu)
4940 {
4941 uint16_t ext;
4942 int opmode;
4943 int opsize;
4944 TCGv_ptr cpu_src, cpu_dest;
4945
4946 ext = read_im16(env, s);
4947 opmode = ext & 0x7f;
4948 switch ((ext >> 13) & 7) {
4949 case 0:
4950 break;
4951 case 1:
4952 goto undef;
4953 case 2:
4954 if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4955 /* fmovecr */
4956 TCGv rom_offset = tcg_constant_i32(opmode);
4957 cpu_dest = gen_fp_ptr(REG(ext, 7));
4958 gen_helper_fconst(tcg_env, cpu_dest, rom_offset);
4959 return;
4960 }
4961 break;
4962 case 3: /* fmove out */
4963 cpu_src = gen_fp_ptr(REG(ext, 7));
4964 opsize = ext_opsize(ext, 10);
4965 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4966 EA_STORE, IS_USER(s)) == -1) {
4967 gen_addr_fault(s);
4968 }
4969 gen_helper_ftst(tcg_env, cpu_src);
4970 return;
4971 case 4: /* fmove to control register. */
4972 case 5: /* fmove from control register. */
4973 gen_op_fmove_fcr(env, s, insn, ext);
4974 return;
4975 case 6: /* fmovem */
4976 case 7:
4977 if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4978 goto undef;
4979 }
4980 gen_op_fmovem(env, s, insn, ext);
4981 return;
4982 }
4983 if (ext & (1 << 14)) {
4984 /* Source effective address. */
4985 opsize = ext_opsize(ext, 10);
4986 cpu_src = gen_fp_result_ptr();
4987 if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4988 EA_LOADS, IS_USER(s)) == -1) {
4989 gen_addr_fault(s);
4990 return;
4991 }
4992 } else {
4993 /* Source register. */
4994 opsize = OS_EXTENDED;
4995 cpu_src = gen_fp_ptr(REG(ext, 10));
4996 }
4997 cpu_dest = gen_fp_ptr(REG(ext, 7));
4998 switch (opmode) {
4999 case 0: /* fmove */
5000 gen_fp_move(cpu_dest, cpu_src);
5001 break;
5002 case 0x40: /* fsmove */
5003 gen_helper_fsround(tcg_env, cpu_dest, cpu_src);
5004 break;
5005 case 0x44: /* fdmove */
5006 gen_helper_fdround(tcg_env, cpu_dest, cpu_src);
5007 break;
5008 case 1: /* fint */
5009 gen_helper_firound(tcg_env, cpu_dest, cpu_src);
5010 break;
5011 case 2: /* fsinh */
5012 gen_helper_fsinh(tcg_env, cpu_dest, cpu_src);
5013 break;
5014 case 3: /* fintrz */
5015 gen_helper_fitrunc(tcg_env, cpu_dest, cpu_src);
5016 break;
5017 case 4: /* fsqrt */
5018 gen_helper_fsqrt(tcg_env, cpu_dest, cpu_src);
5019 break;
5020 case 0x41: /* fssqrt */
5021 gen_helper_fssqrt(tcg_env, cpu_dest, cpu_src);
5022 break;
5023 case 0x45: /* fdsqrt */
5024 gen_helper_fdsqrt(tcg_env, cpu_dest, cpu_src);
5025 break;
5026 case 0x06: /* flognp1 */
5027 gen_helper_flognp1(tcg_env, cpu_dest, cpu_src);
5028 break;
5029 case 0x08: /* fetoxm1 */
5030 gen_helper_fetoxm1(tcg_env, cpu_dest, cpu_src);
5031 break;
5032 case 0x09: /* ftanh */
5033 gen_helper_ftanh(tcg_env, cpu_dest, cpu_src);
5034 break;
5035 case 0x0a: /* fatan */
5036 gen_helper_fatan(tcg_env, cpu_dest, cpu_src);
5037 break;
5038 case 0x0c: /* fasin */
5039 gen_helper_fasin(tcg_env, cpu_dest, cpu_src);
5040 break;
5041 case 0x0d: /* fatanh */
5042 gen_helper_fatanh(tcg_env, cpu_dest, cpu_src);
5043 break;
5044 case 0x0e: /* fsin */
5045 gen_helper_fsin(tcg_env, cpu_dest, cpu_src);
5046 break;
5047 case 0x0f: /* ftan */
5048 gen_helper_ftan(tcg_env, cpu_dest, cpu_src);
5049 break;
5050 case 0x10: /* fetox */
5051 gen_helper_fetox(tcg_env, cpu_dest, cpu_src);
5052 break;
5053 case 0x11: /* ftwotox */
5054 gen_helper_ftwotox(tcg_env, cpu_dest, cpu_src);
5055 break;
5056 case 0x12: /* ftentox */
5057 gen_helper_ftentox(tcg_env, cpu_dest, cpu_src);
5058 break;
5059 case 0x14: /* flogn */
5060 gen_helper_flogn(tcg_env, cpu_dest, cpu_src);
5061 break;
5062 case 0x15: /* flog10 */
5063 gen_helper_flog10(tcg_env, cpu_dest, cpu_src);
5064 break;
5065 case 0x16: /* flog2 */
5066 gen_helper_flog2(tcg_env, cpu_dest, cpu_src);
5067 break;
5068 case 0x18: /* fabs */
5069 gen_helper_fabs(tcg_env, cpu_dest, cpu_src);
5070 break;
5071 case 0x58: /* fsabs */
5072 gen_helper_fsabs(tcg_env, cpu_dest, cpu_src);
5073 break;
5074 case 0x5c: /* fdabs */
5075 gen_helper_fdabs(tcg_env, cpu_dest, cpu_src);
5076 break;
5077 case 0x19: /* fcosh */
5078 gen_helper_fcosh(tcg_env, cpu_dest, cpu_src);
5079 break;
5080 case 0x1a: /* fneg */
5081 gen_helper_fneg(tcg_env, cpu_dest, cpu_src);
5082 break;
5083 case 0x5a: /* fsneg */
5084 gen_helper_fsneg(tcg_env, cpu_dest, cpu_src);
5085 break;
5086 case 0x5e: /* fdneg */
5087 gen_helper_fdneg(tcg_env, cpu_dest, cpu_src);
5088 break;
5089 case 0x1c: /* facos */
5090 gen_helper_facos(tcg_env, cpu_dest, cpu_src);
5091 break;
5092 case 0x1d: /* fcos */
5093 gen_helper_fcos(tcg_env, cpu_dest, cpu_src);
5094 break;
5095 case 0x1e: /* fgetexp */
5096 gen_helper_fgetexp(tcg_env, cpu_dest, cpu_src);
5097 break;
5098 case 0x1f: /* fgetman */
5099 gen_helper_fgetman(tcg_env, cpu_dest, cpu_src);
5100 break;
5101 case 0x20: /* fdiv */
5102 gen_helper_fdiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5103 break;
5104 case 0x60: /* fsdiv */
5105 gen_helper_fsdiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5106 break;
5107 case 0x64: /* fddiv */
5108 gen_helper_fddiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5109 break;
5110 case 0x21: /* fmod */
5111 gen_helper_fmod(tcg_env, cpu_dest, cpu_src, cpu_dest);
5112 break;
5113 case 0x22: /* fadd */
5114 gen_helper_fadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5115 break;
5116 case 0x62: /* fsadd */
5117 gen_helper_fsadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5118 break;
5119 case 0x66: /* fdadd */
5120 gen_helper_fdadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5121 break;
5122 case 0x23: /* fmul */
5123 gen_helper_fmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5124 break;
5125 case 0x63: /* fsmul */
5126 gen_helper_fsmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5127 break;
5128 case 0x67: /* fdmul */
5129 gen_helper_fdmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5130 break;
5131 case 0x24: /* fsgldiv */
5132 gen_helper_fsgldiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5133 break;
5134 case 0x25: /* frem */
5135 gen_helper_frem(tcg_env, cpu_dest, cpu_src, cpu_dest);
5136 break;
5137 case 0x26: /* fscale */
5138 gen_helper_fscale(tcg_env, cpu_dest, cpu_src, cpu_dest);
5139 break;
5140 case 0x27: /* fsglmul */
5141 gen_helper_fsglmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5142 break;
5143 case 0x28: /* fsub */
5144 gen_helper_fsub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5145 break;
5146 case 0x68: /* fssub */
5147 gen_helper_fssub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5148 break;
5149 case 0x6c: /* fdsub */
5150 gen_helper_fdsub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5151 break;
5152 case 0x30: case 0x31: case 0x32:
5153 case 0x33: case 0x34: case 0x35:
5154 case 0x36: case 0x37: {
5155 TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5156 gen_helper_fsincos(tcg_env, cpu_dest, cpu_dest2, cpu_src);
5157 }
5158 break;
5159 case 0x38: /* fcmp */
5160 gen_helper_fcmp(tcg_env, cpu_src, cpu_dest);
5161 return;
5162 case 0x3a: /* ftst */
5163 gen_helper_ftst(tcg_env, cpu_src);
5164 return;
5165 default:
5166 goto undef;
5167 }
5168 gen_helper_ftst(tcg_env, cpu_dest);
5169 return;
5170 undef:
5171 /* FIXME: Is this right for offset addressing modes? */
5172 s->pc -= 2;
5173 disas_undef_fpu(env, s, insn);
5174 }
5175
gen_fcc_cond(DisasCompare * c,DisasContext * s,int cond)5176 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5177 {
5178 TCGv fpsr;
5179 int imm = 0;
5180
5181 /* TODO: Raise BSUN exception. */
5182 fpsr = tcg_temp_new();
5183 gen_load_fcr(s, fpsr, M68K_FPSR);
5184 c->v1 = fpsr;
5185
5186 switch (cond) {
5187 case 0: /* False */
5188 case 16: /* Signaling False */
5189 c->tcond = TCG_COND_NEVER;
5190 break;
5191 case 1: /* EQual Z */
5192 case 17: /* Signaling EQual Z */
5193 imm = FPSR_CC_Z;
5194 c->tcond = TCG_COND_TSTNE;
5195 break;
5196 case 2: /* Ordered Greater Than !(A || Z || N) */
5197 case 18: /* Greater Than !(A || Z || N) */
5198 imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5199 c->tcond = TCG_COND_TSTEQ;
5200 break;
5201 case 3: /* Ordered Greater than or Equal Z || !(A || N) */
5202 case 19: /* Greater than or Equal Z || !(A || N) */
5203 c->v1 = tcg_temp_new();
5204 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5205 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5206 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5207 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5208 imm = FPSR_CC_Z | FPSR_CC_N;
5209 c->tcond = TCG_COND_TSTNE;
5210 break;
5211 case 4: /* Ordered Less Than !(!N || A || Z); */
5212 case 20: /* Less Than !(!N || A || Z); */
5213 c->v1 = tcg_temp_new();
5214 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5215 imm = FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z;
5216 c->tcond = TCG_COND_TSTEQ;
5217 break;
5218 case 5: /* Ordered Less than or Equal Z || (N && !A) */
5219 case 21: /* Less than or Equal Z || (N && !A) */
5220 c->v1 = tcg_temp_new();
5221 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5222 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5223 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5224 imm = FPSR_CC_Z | FPSR_CC_N;
5225 c->tcond = TCG_COND_TSTNE;
5226 break;
5227 case 6: /* Ordered Greater or Less than !(A || Z) */
5228 case 22: /* Greater or Less than !(A || Z) */
5229 imm = FPSR_CC_A | FPSR_CC_Z;
5230 c->tcond = TCG_COND_TSTEQ;
5231 break;
5232 case 7: /* Ordered !A */
5233 case 23: /* Greater, Less or Equal !A */
5234 imm = FPSR_CC_A;
5235 c->tcond = TCG_COND_TSTEQ;
5236 break;
5237 case 8: /* Unordered A */
5238 case 24: /* Not Greater, Less or Equal A */
5239 imm = FPSR_CC_A;
5240 c->tcond = TCG_COND_TSTNE;
5241 break;
5242 case 9: /* Unordered or Equal A || Z */
5243 case 25: /* Not Greater or Less then A || Z */
5244 imm = FPSR_CC_A | FPSR_CC_Z;
5245 c->tcond = TCG_COND_TSTNE;
5246 break;
5247 case 10: /* Unordered or Greater Than A || !(N || Z)) */
5248 case 26: /* Not Less or Equal A || !(N || Z)) */
5249 c->v1 = tcg_temp_new();
5250 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5251 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5252 tcg_gen_or_i32(c->v1, c->v1, fpsr);
5253 tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5254 imm = FPSR_CC_A | FPSR_CC_N;
5255 c->tcond = TCG_COND_TSTNE;
5256 break;
5257 case 11: /* Unordered or Greater or Equal A || Z || !N */
5258 case 27: /* Not Less Than A || Z || !N */
5259 c->v1 = tcg_temp_new();
5260 tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5261 imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5262 c->tcond = TCG_COND_TSTNE;
5263 break;
5264 case 12: /* Unordered or Less Than A || (N && !Z) */
5265 case 28: /* Not Greater than or Equal A || (N && !Z) */
5266 c->v1 = tcg_temp_new();
5267 tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5268 tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5269 tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5270 imm = FPSR_CC_A | FPSR_CC_N;
5271 c->tcond = TCG_COND_TSTNE;
5272 break;
5273 case 13: /* Unordered or Less or Equal A || Z || N */
5274 case 29: /* Not Greater Than A || Z || N */
5275 imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5276 c->tcond = TCG_COND_TSTNE;
5277 break;
5278 case 14: /* Not Equal !Z */
5279 case 30: /* Signaling Not Equal !Z */
5280 imm = FPSR_CC_Z;
5281 c->tcond = TCG_COND_TSTEQ;
5282 break;
5283 case 15: /* True */
5284 case 31: /* Signaling True */
5285 c->tcond = TCG_COND_ALWAYS;
5286 break;
5287 }
5288 c->v2 = tcg_constant_i32(imm);
5289 }
5290
gen_fjmpcc(DisasContext * s,int cond,TCGLabel * l1)5291 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5292 {
5293 DisasCompare c;
5294
5295 gen_fcc_cond(&c, s, cond);
5296 update_cc_op(s);
5297 tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5298 }
5299
DISAS_INSN(fbcc)5300 DISAS_INSN(fbcc)
5301 {
5302 uint32_t offset;
5303 uint32_t base;
5304 TCGLabel *l1;
5305
5306 base = s->pc;
5307 offset = (int16_t)read_im16(env, s);
5308 if (insn & (1 << 6)) {
5309 offset = (offset << 16) | read_im16(env, s);
5310 }
5311
5312 l1 = gen_new_label();
5313 update_cc_op(s);
5314 gen_fjmpcc(s, insn & 0x3f, l1);
5315 gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5316 gen_set_label(l1);
5317 gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5318 }
5319
DISAS_INSN(fscc)5320 DISAS_INSN(fscc)
5321 {
5322 DisasCompare c;
5323 int cond;
5324 TCGv tmp;
5325 uint16_t ext;
5326
5327 ext = read_im16(env, s);
5328 cond = ext & 0x3f;
5329 gen_fcc_cond(&c, s, cond);
5330
5331 tmp = tcg_temp_new();
5332 tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
5333
5334 DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5335 }
5336
DISAS_INSN(ftrapcc)5337 DISAS_INSN(ftrapcc)
5338 {
5339 DisasCompare c;
5340 uint16_t ext;
5341 int cond;
5342
5343 ext = read_im16(env, s);
5344 cond = ext & 0x3f;
5345
5346 /* Consume and discard the immediate operand. */
5347 switch (extract32(insn, 0, 3)) {
5348 case 2: /* ftrapcc.w */
5349 (void)read_im16(env, s);
5350 break;
5351 case 3: /* ftrapcc.l */
5352 (void)read_im32(env, s);
5353 break;
5354 case 4: /* ftrapcc (no operand) */
5355 break;
5356 default:
5357 /* ftrapcc registered with only valid opmodes */
5358 g_assert_not_reached();
5359 }
5360
5361 gen_fcc_cond(&c, s, cond);
5362 do_trapcc(s, &c);
5363 }
5364
5365 #if !defined(CONFIG_USER_ONLY)
DISAS_INSN(frestore)5366 DISAS_INSN(frestore)
5367 {
5368 TCGv addr;
5369
5370 if (IS_USER(s)) {
5371 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5372 return;
5373 }
5374 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5375 SRC_EA(env, addr, OS_LONG, 0, NULL);
5376 /* FIXME: check the state frame */
5377 } else {
5378 disas_undef(env, s, insn);
5379 }
5380 }
5381
DISAS_INSN(fsave)5382 DISAS_INSN(fsave)
5383 {
5384 if (IS_USER(s)) {
5385 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5386 return;
5387 }
5388
5389 if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5390 /* always write IDLE */
5391 TCGv idle = tcg_constant_i32(0x41000000);
5392 DEST_EA(env, insn, OS_LONG, idle, NULL);
5393 } else {
5394 disas_undef(env, s, insn);
5395 }
5396 }
5397 #endif
5398
gen_mac_extract_word(DisasContext * s,TCGv val,int upper)5399 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5400 {
5401 TCGv tmp = tcg_temp_new();
5402 if (s->env->macsr & MACSR_FI) {
5403 if (upper)
5404 tcg_gen_andi_i32(tmp, val, 0xffff0000);
5405 else
5406 tcg_gen_shli_i32(tmp, val, 16);
5407 } else if (s->env->macsr & MACSR_SU) {
5408 if (upper)
5409 tcg_gen_sari_i32(tmp, val, 16);
5410 else
5411 tcg_gen_ext16s_i32(tmp, val);
5412 } else {
5413 if (upper)
5414 tcg_gen_shri_i32(tmp, val, 16);
5415 else
5416 tcg_gen_ext16u_i32(tmp, val);
5417 }
5418 return tmp;
5419 }
5420
gen_mac_clear_flags(void)5421 static void gen_mac_clear_flags(void)
5422 {
5423 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5424 ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5425 }
5426
DISAS_INSN(mac)5427 DISAS_INSN(mac)
5428 {
5429 TCGv rx;
5430 TCGv ry;
5431 uint16_t ext;
5432 int acc;
5433 TCGv tmp;
5434 TCGv addr;
5435 TCGv loadval;
5436 int dual;
5437 TCGv saved_flags;
5438
5439 if (!s->done_mac) {
5440 s->mactmp = tcg_temp_new_i64();
5441 s->done_mac = 1;
5442 }
5443
5444 ext = read_im16(env, s);
5445
5446 acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5447 dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5448 if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5449 disas_undef(env, s, insn);
5450 return;
5451 }
5452 if (insn & 0x30) {
5453 /* MAC with load. */
5454 tmp = gen_lea(env, s, insn, OS_LONG);
5455 addr = tcg_temp_new();
5456 tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5457 /*
5458 * Load the value now to ensure correct exception behavior.
5459 * Perform writeback after reading the MAC inputs.
5460 */
5461 loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5462
5463 acc ^= 1;
5464 rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5465 ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5466 } else {
5467 loadval = addr = NULL_QREG;
5468 rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5469 ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5470 }
5471
5472 gen_mac_clear_flags();
5473 #if 0
5474 l1 = -1;
5475 /* Disabled because conditional branches clobber temporary vars. */
5476 if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5477 /* Skip the multiply if we know we will ignore it. */
5478 l1 = gen_new_label();
5479 tmp = tcg_temp_new();
5480 tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5481 gen_op_jmp_nz32(tmp, l1);
5482 }
5483 #endif
5484
5485 if ((ext & 0x0800) == 0) {
5486 /* Word. */
5487 rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5488 ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5489 }
5490 if (s->env->macsr & MACSR_FI) {
5491 gen_helper_macmulf(s->mactmp, tcg_env, rx, ry);
5492 } else {
5493 if (s->env->macsr & MACSR_SU)
5494 gen_helper_macmuls(s->mactmp, tcg_env, rx, ry);
5495 else
5496 gen_helper_macmulu(s->mactmp, tcg_env, rx, ry);
5497 switch ((ext >> 9) & 3) {
5498 case 1:
5499 tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5500 break;
5501 case 3:
5502 tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5503 break;
5504 }
5505 }
5506
5507 if (dual) {
5508 /* Save the overflow flag from the multiply. */
5509 saved_flags = tcg_temp_new();
5510 tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5511 } else {
5512 saved_flags = NULL_QREG;
5513 }
5514
5515 #if 0
5516 /* Disabled because conditional branches clobber temporary vars. */
5517 if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5518 /* Skip the accumulate if the value is already saturated. */
5519 l1 = gen_new_label();
5520 tmp = tcg_temp_new();
5521 gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5522 gen_op_jmp_nz32(tmp, l1);
5523 }
5524 #endif
5525
5526 if (insn & 0x100)
5527 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5528 else
5529 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5530
5531 if (s->env->macsr & MACSR_FI)
5532 gen_helper_macsatf(tcg_env, tcg_constant_i32(acc));
5533 else if (s->env->macsr & MACSR_SU)
5534 gen_helper_macsats(tcg_env, tcg_constant_i32(acc));
5535 else
5536 gen_helper_macsatu(tcg_env, tcg_constant_i32(acc));
5537
5538 #if 0
5539 /* Disabled because conditional branches clobber temporary vars. */
5540 if (l1 != -1)
5541 gen_set_label(l1);
5542 #endif
5543
5544 if (dual) {
5545 /* Dual accumulate variant. */
5546 acc = (ext >> 2) & 3;
5547 /* Restore the overflow flag from the multiplier. */
5548 tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5549 #if 0
5550 /* Disabled because conditional branches clobber temporary vars. */
5551 if ((s->env->macsr & MACSR_OMC) != 0) {
5552 /* Skip the accumulate if the value is already saturated. */
5553 l1 = gen_new_label();
5554 tmp = tcg_temp_new();
5555 gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5556 gen_op_jmp_nz32(tmp, l1);
5557 }
5558 #endif
5559 if (ext & 2)
5560 tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5561 else
5562 tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5563 if (s->env->macsr & MACSR_FI)
5564 gen_helper_macsatf(tcg_env, tcg_constant_i32(acc));
5565 else if (s->env->macsr & MACSR_SU)
5566 gen_helper_macsats(tcg_env, tcg_constant_i32(acc));
5567 else
5568 gen_helper_macsatu(tcg_env, tcg_constant_i32(acc));
5569 #if 0
5570 /* Disabled because conditional branches clobber temporary vars. */
5571 if (l1 != -1)
5572 gen_set_label(l1);
5573 #endif
5574 }
5575 gen_helper_mac_set_flags(tcg_env, tcg_constant_i32(acc));
5576
5577 if (insn & 0x30) {
5578 TCGv rw;
5579 rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5580 tcg_gen_mov_i32(rw, loadval);
5581 /*
5582 * FIXME: Should address writeback happen with the masked or
5583 * unmasked value?
5584 */
5585 switch ((insn >> 3) & 7) {
5586 case 3: /* Post-increment. */
5587 tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5588 break;
5589 case 4: /* Pre-decrement. */
5590 tcg_gen_mov_i32(AREG(insn, 0), addr);
5591 }
5592 }
5593 }
5594
DISAS_INSN(from_mac)5595 DISAS_INSN(from_mac)
5596 {
5597 TCGv rx;
5598 TCGv_i64 acc;
5599 int accnum;
5600
5601 rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5602 accnum = (insn >> 9) & 3;
5603 acc = MACREG(accnum);
5604 if (s->env->macsr & MACSR_FI) {
5605 gen_helper_get_macf(rx, tcg_env, acc);
5606 } else if ((s->env->macsr & MACSR_OMC) == 0) {
5607 tcg_gen_extrl_i64_i32(rx, acc);
5608 } else if (s->env->macsr & MACSR_SU) {
5609 gen_helper_get_macs(rx, acc);
5610 } else {
5611 gen_helper_get_macu(rx, acc);
5612 }
5613 if (insn & 0x40) {
5614 tcg_gen_movi_i64(acc, 0);
5615 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5616 }
5617 }
5618
DISAS_INSN(move_mac)5619 DISAS_INSN(move_mac)
5620 {
5621 /* FIXME: This can be done without a helper. */
5622 int src;
5623 TCGv dest;
5624 src = insn & 3;
5625 dest = tcg_constant_i32((insn >> 9) & 3);
5626 gen_helper_mac_move(tcg_env, dest, tcg_constant_i32(src));
5627 gen_mac_clear_flags();
5628 gen_helper_mac_set_flags(tcg_env, dest);
5629 }
5630
DISAS_INSN(from_macsr)5631 DISAS_INSN(from_macsr)
5632 {
5633 TCGv reg;
5634
5635 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5636 tcg_gen_mov_i32(reg, QREG_MACSR);
5637 }
5638
DISAS_INSN(from_mask)5639 DISAS_INSN(from_mask)
5640 {
5641 TCGv reg;
5642 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5643 tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5644 }
5645
DISAS_INSN(from_mext)5646 DISAS_INSN(from_mext)
5647 {
5648 TCGv reg;
5649 TCGv acc;
5650 reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5651 acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5652 if (s->env->macsr & MACSR_FI)
5653 gen_helper_get_mac_extf(reg, tcg_env, acc);
5654 else
5655 gen_helper_get_mac_exti(reg, tcg_env, acc);
5656 }
5657
DISAS_INSN(macsr_to_ccr)5658 DISAS_INSN(macsr_to_ccr)
5659 {
5660 TCGv tmp = tcg_temp_new();
5661
5662 /* Note that X and C are always cleared. */
5663 tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5664 gen_helper_set_ccr(tcg_env, tmp);
5665 set_cc_op(s, CC_OP_FLAGS);
5666 }
5667
DISAS_INSN(to_mac)5668 DISAS_INSN(to_mac)
5669 {
5670 TCGv_i64 acc;
5671 TCGv val;
5672 int accnum;
5673 accnum = (insn >> 9) & 3;
5674 acc = MACREG(accnum);
5675 SRC_EA(env, val, OS_LONG, 0, NULL);
5676 if (s->env->macsr & MACSR_FI) {
5677 tcg_gen_ext_i32_i64(acc, val);
5678 tcg_gen_shli_i64(acc, acc, 8);
5679 } else if (s->env->macsr & MACSR_SU) {
5680 tcg_gen_ext_i32_i64(acc, val);
5681 } else {
5682 tcg_gen_extu_i32_i64(acc, val);
5683 }
5684 tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5685 gen_mac_clear_flags();
5686 gen_helper_mac_set_flags(tcg_env, tcg_constant_i32(accnum));
5687 }
5688
DISAS_INSN(to_macsr)5689 DISAS_INSN(to_macsr)
5690 {
5691 TCGv val;
5692 SRC_EA(env, val, OS_LONG, 0, NULL);
5693 gen_helper_set_macsr(tcg_env, val);
5694 gen_exit_tb(s);
5695 }
5696
DISAS_INSN(to_mask)5697 DISAS_INSN(to_mask)
5698 {
5699 TCGv val;
5700 SRC_EA(env, val, OS_LONG, 0, NULL);
5701 tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5702 }
5703
DISAS_INSN(to_mext)5704 DISAS_INSN(to_mext)
5705 {
5706 TCGv val;
5707 TCGv acc;
5708 SRC_EA(env, val, OS_LONG, 0, NULL);
5709 acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5710 if (s->env->macsr & MACSR_FI)
5711 gen_helper_set_mac_extf(tcg_env, val, acc);
5712 else if (s->env->macsr & MACSR_SU)
5713 gen_helper_set_mac_exts(tcg_env, val, acc);
5714 else
5715 gen_helper_set_mac_extu(tcg_env, val, acc);
5716 }
5717
5718 static disas_proc opcode_table[65536];
5719
5720 static void
register_opcode(disas_proc proc,uint16_t opcode,uint16_t mask)5721 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5722 {
5723 int i;
5724 int from;
5725 int to;
5726
5727 /* Sanity check. All set bits must be included in the mask. */
5728 if (opcode & ~mask) {
5729 fprintf(stderr,
5730 "qemu internal error: bogus opcode definition %04x/%04x\n",
5731 opcode, mask);
5732 abort();
5733 }
5734 /*
5735 * This could probably be cleverer. For now just optimize the case where
5736 * the top bits are known.
5737 */
5738 /* Find the first zero bit in the mask. */
5739 i = 0x8000;
5740 while ((i & mask) != 0)
5741 i >>= 1;
5742 /* Iterate over all combinations of this and lower bits. */
5743 if (i == 0)
5744 i = 1;
5745 else
5746 i <<= 1;
5747 from = opcode & ~(i - 1);
5748 to = from + i;
5749 for (i = from; i < to; i++) {
5750 if ((i & mask) == opcode)
5751 opcode_table[i] = proc;
5752 }
5753 }
5754
5755 /*
5756 * Register m68k opcode handlers. Order is important.
5757 * Later insn override earlier ones.
5758 */
register_m68k_insns(CPUM68KState * env)5759 void register_m68k_insns (CPUM68KState *env)
5760 {
5761 /*
5762 * Build the opcode table only once to avoid
5763 * multithreading issues.
5764 */
5765 if (opcode_table[0] != NULL) {
5766 return;
5767 }
5768
5769 /*
5770 * use BASE() for instruction available
5771 * for CF_ISA_A and M68000.
5772 */
5773 #define BASE(name, opcode, mask) \
5774 register_opcode(disas_##name, 0x##opcode, 0x##mask)
5775 #define INSN(name, opcode, mask, feature) do { \
5776 if (m68k_feature(env, M68K_FEATURE_##feature)) \
5777 BASE(name, opcode, mask); \
5778 } while(0)
5779 BASE(undef, 0000, 0000);
5780 INSN(arith_im, 0080, fff8, CF_ISA_A);
5781 INSN(arith_im, 0000, ff00, M68K);
5782 INSN(chk2, 00c0, f9c0, CHK2);
5783 INSN(bitrev, 00c0, fff8, CF_ISA_APLUSC);
5784 BASE(bitop_reg, 0100, f1c0);
5785 BASE(bitop_reg, 0140, f1c0);
5786 BASE(bitop_reg, 0180, f1c0);
5787 BASE(bitop_reg, 01c0, f1c0);
5788 INSN(movep, 0108, f138, MOVEP);
5789 INSN(arith_im, 0280, fff8, CF_ISA_A);
5790 INSN(arith_im, 0200, ff00, M68K);
5791 INSN(undef, 02c0, ffc0, M68K);
5792 INSN(byterev, 02c0, fff8, CF_ISA_APLUSC);
5793 INSN(arith_im, 0480, fff8, CF_ISA_A);
5794 INSN(arith_im, 0400, ff00, M68K);
5795 INSN(undef, 04c0, ffc0, M68K);
5796 INSN(arith_im, 0600, ff00, M68K);
5797 INSN(undef, 06c0, ffc0, M68K);
5798 INSN(ff1, 04c0, fff8, CF_ISA_APLUSC);
5799 INSN(arith_im, 0680, fff8, CF_ISA_A);
5800 INSN(arith_im, 0c00, ff38, CF_ISA_A);
5801 INSN(arith_im, 0c00, ff00, M68K);
5802 BASE(bitop_im, 0800, ffc0);
5803 BASE(bitop_im, 0840, ffc0);
5804 BASE(bitop_im, 0880, ffc0);
5805 BASE(bitop_im, 08c0, ffc0);
5806 INSN(arith_im, 0a80, fff8, CF_ISA_A);
5807 INSN(arith_im, 0a00, ff00, M68K);
5808 #if !defined(CONFIG_USER_ONLY)
5809 INSN(moves, 0e00, ff00, M68K);
5810 #endif
5811 INSN(cas, 0ac0, ffc0, CAS);
5812 INSN(cas, 0cc0, ffc0, CAS);
5813 INSN(cas, 0ec0, ffc0, CAS);
5814 INSN(cas2w, 0cfc, ffff, CAS);
5815 INSN(cas2l, 0efc, ffff, CAS);
5816 BASE(move, 1000, f000);
5817 BASE(move, 2000, f000);
5818 BASE(move, 3000, f000);
5819 INSN(chk, 4000, f040, M68K);
5820 INSN(strldsr, 40e7, ffff, CF_ISA_APLUSC);
5821 INSN(negx, 4080, fff8, CF_ISA_A);
5822 INSN(negx, 4000, ff00, M68K);
5823 INSN(undef, 40c0, ffc0, M68K);
5824 INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5825 INSN(move_from_sr, 40c0, ffc0, M68K);
5826 BASE(lea, 41c0, f1c0);
5827 BASE(clr, 4200, ff00);
5828 BASE(undef, 42c0, ffc0);
5829 INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5830 INSN(move_from_ccr, 42c0, ffc0, M68K);
5831 INSN(neg, 4480, fff8, CF_ISA_A);
5832 INSN(neg, 4400, ff00, M68K);
5833 INSN(undef, 44c0, ffc0, M68K);
5834 BASE(move_to_ccr, 44c0, ffc0);
5835 INSN(not, 4680, fff8, CF_ISA_A);
5836 INSN(not, 4600, ff00, M68K);
5837 #if !defined(CONFIG_USER_ONLY)
5838 BASE(move_to_sr, 46c0, ffc0);
5839 #endif
5840 INSN(nbcd, 4800, ffc0, M68K);
5841 INSN(linkl, 4808, fff8, M68K);
5842 BASE(pea, 4840, ffc0);
5843 BASE(swap, 4840, fff8);
5844 INSN(bkpt, 4848, fff8, BKPT);
5845 INSN(movem, 48d0, fbf8, CF_ISA_A);
5846 INSN(movem, 48e8, fbf8, CF_ISA_A);
5847 INSN(movem, 4880, fb80, M68K);
5848 BASE(ext, 4880, fff8);
5849 BASE(ext, 48c0, fff8);
5850 BASE(ext, 49c0, fff8);
5851 BASE(tst, 4a00, ff00);
5852 INSN(tas, 4ac0, ffc0, CF_ISA_B);
5853 INSN(tas, 4ac0, ffc0, M68K);
5854 #if !defined(CONFIG_USER_ONLY)
5855 INSN(halt, 4ac8, ffff, CF_ISA_A);
5856 INSN(halt, 4ac8, ffff, M68K);
5857 #endif
5858 INSN(pulse, 4acc, ffff, CF_ISA_A);
5859 BASE(illegal, 4afc, ffff);
5860 INSN(mull, 4c00, ffc0, CF_ISA_A);
5861 INSN(mull, 4c00, ffc0, LONG_MULDIV);
5862 INSN(divl, 4c40, ffc0, CF_ISA_A);
5863 INSN(divl, 4c40, ffc0, LONG_MULDIV);
5864 INSN(sats, 4c80, fff8, CF_ISA_B);
5865 BASE(trap, 4e40, fff0);
5866 BASE(link, 4e50, fff8);
5867 BASE(unlk, 4e58, fff8);
5868 #if !defined(CONFIG_USER_ONLY)
5869 INSN(move_to_usp, 4e60, fff8, USP);
5870 INSN(move_from_usp, 4e68, fff8, USP);
5871 INSN(reset, 4e70, ffff, M68K);
5872 BASE(stop, 4e72, ffff);
5873 BASE(rte, 4e73, ffff);
5874 INSN(cf_movec, 4e7b, ffff, CF_ISA_A);
5875 INSN(m68k_movec, 4e7a, fffe, MOVEC);
5876 #endif
5877 BASE(nop, 4e71, ffff);
5878 INSN(rtd, 4e74, ffff, RTD);
5879 BASE(rts, 4e75, ffff);
5880 INSN(trapv, 4e76, ffff, M68K);
5881 INSN(rtr, 4e77, ffff, M68K);
5882 BASE(jump, 4e80, ffc0);
5883 BASE(jump, 4ec0, ffc0);
5884 INSN(addsubq, 5000, f080, M68K);
5885 BASE(addsubq, 5080, f0c0);
5886 INSN(scc, 50c0, f0f8, CF_ISA_A); /* Scc.B Dx */
5887 INSN(scc, 50c0, f0c0, M68K); /* Scc.B <EA> */
5888 INSN(dbcc, 50c8, f0f8, M68K);
5889 INSN(trapcc, 50fa, f0fe, TRAPCC); /* opmode 010, 011 */
5890 INSN(trapcc, 50fc, f0ff, TRAPCC); /* opmode 100 */
5891 INSN(trapcc, 51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5892 INSN(trapcc, 51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5893
5894 /* Branch instructions. */
5895 BASE(branch, 6000, f000);
5896 /* Disable long branch instructions, then add back the ones we want. */
5897 BASE(undef, 60ff, f0ff); /* All long branches. */
5898 INSN(branch, 60ff, f0ff, CF_ISA_B);
5899 INSN(undef, 60ff, ffff, CF_ISA_B); /* bra.l */
5900 INSN(branch, 60ff, ffff, BRAL);
5901 INSN(branch, 60ff, f0ff, BCCL);
5902
5903 BASE(moveq, 7000, f100);
5904 INSN(mvzs, 7100, f100, CF_ISA_B);
5905 BASE(or, 8000, f000);
5906 BASE(divw, 80c0, f0c0);
5907 INSN(sbcd_reg, 8100, f1f8, M68K);
5908 INSN(sbcd_mem, 8108, f1f8, M68K);
5909 BASE(addsub, 9000, f000);
5910 INSN(undef, 90c0, f0c0, CF_ISA_A);
5911 INSN(subx_reg, 9180, f1f8, CF_ISA_A);
5912 INSN(subx_reg, 9100, f138, M68K);
5913 INSN(subx_mem, 9108, f138, M68K);
5914 INSN(suba, 91c0, f1c0, CF_ISA_A);
5915 INSN(suba, 90c0, f0c0, M68K);
5916
5917 BASE(undef_mac, a000, f000);
5918 INSN(mac, a000, f100, CF_EMAC);
5919 INSN(from_mac, a180, f9b0, CF_EMAC);
5920 INSN(move_mac, a110, f9fc, CF_EMAC);
5921 INSN(from_macsr,a980, f9f0, CF_EMAC);
5922 INSN(from_mask, ad80, fff0, CF_EMAC);
5923 INSN(from_mext, ab80, fbf0, CF_EMAC);
5924 INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5925 INSN(to_mac, a100, f9c0, CF_EMAC);
5926 INSN(to_macsr, a900, ffc0, CF_EMAC);
5927 INSN(to_mext, ab00, fbc0, CF_EMAC);
5928 INSN(to_mask, ad00, ffc0, CF_EMAC);
5929
5930 INSN(mov3q, a140, f1c0, CF_ISA_B);
5931 INSN(cmp, b000, f1c0, CF_ISA_B); /* cmp.b */
5932 INSN(cmp, b040, f1c0, CF_ISA_B); /* cmp.w */
5933 INSN(cmpa, b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5934 INSN(cmp, b080, f1c0, CF_ISA_A);
5935 INSN(cmpa, b1c0, f1c0, CF_ISA_A);
5936 INSN(cmp, b000, f100, M68K);
5937 INSN(eor, b100, f100, M68K);
5938 INSN(cmpm, b108, f138, M68K);
5939 INSN(cmpa, b0c0, f0c0, M68K);
5940 INSN(eor, b180, f1c0, CF_ISA_A);
5941 BASE(and, c000, f000);
5942 INSN(exg_dd, c140, f1f8, M68K);
5943 INSN(exg_aa, c148, f1f8, M68K);
5944 INSN(exg_da, c188, f1f8, M68K);
5945 BASE(mulw, c0c0, f0c0);
5946 INSN(abcd_reg, c100, f1f8, M68K);
5947 INSN(abcd_mem, c108, f1f8, M68K);
5948 BASE(addsub, d000, f000);
5949 INSN(undef, d0c0, f0c0, CF_ISA_A);
5950 INSN(addx_reg, d180, f1f8, CF_ISA_A);
5951 INSN(addx_reg, d100, f138, M68K);
5952 INSN(addx_mem, d108, f138, M68K);
5953 INSN(adda, d1c0, f1c0, CF_ISA_A);
5954 INSN(adda, d0c0, f0c0, M68K);
5955 INSN(shift_im, e080, f0f0, CF_ISA_A);
5956 INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5957 INSN(shift8_im, e000, f0f0, M68K);
5958 INSN(shift16_im, e040, f0f0, M68K);
5959 INSN(shift_im, e080, f0f0, M68K);
5960 INSN(shift8_reg, e020, f0f0, M68K);
5961 INSN(shift16_reg, e060, f0f0, M68K);
5962 INSN(shift_reg, e0a0, f0f0, M68K);
5963 INSN(shift_mem, e0c0, fcc0, M68K);
5964 INSN(rotate_im, e090, f0f0, M68K);
5965 INSN(rotate8_im, e010, f0f0, M68K);
5966 INSN(rotate16_im, e050, f0f0, M68K);
5967 INSN(rotate_reg, e0b0, f0f0, M68K);
5968 INSN(rotate8_reg, e030, f0f0, M68K);
5969 INSN(rotate16_reg, e070, f0f0, M68K);
5970 INSN(rotate_mem, e4c0, fcc0, M68K);
5971 INSN(bfext_mem, e9c0, fdc0, BITFIELD); /* bfextu & bfexts */
5972 INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5973 INSN(bfins_mem, efc0, ffc0, BITFIELD);
5974 INSN(bfins_reg, efc0, fff8, BITFIELD);
5975 INSN(bfop_mem, eac0, ffc0, BITFIELD); /* bfchg */
5976 INSN(bfop_reg, eac0, fff8, BITFIELD); /* bfchg */
5977 INSN(bfop_mem, ecc0, ffc0, BITFIELD); /* bfclr */
5978 INSN(bfop_reg, ecc0, fff8, BITFIELD); /* bfclr */
5979 INSN(bfop_mem, edc0, ffc0, BITFIELD); /* bfffo */
5980 INSN(bfop_reg, edc0, fff8, BITFIELD); /* bfffo */
5981 INSN(bfop_mem, eec0, ffc0, BITFIELD); /* bfset */
5982 INSN(bfop_reg, eec0, fff8, BITFIELD); /* bfset */
5983 INSN(bfop_mem, e8c0, ffc0, BITFIELD); /* bftst */
5984 INSN(bfop_reg, e8c0, fff8, BITFIELD); /* bftst */
5985 BASE(undef_fpu, f000, f000);
5986 INSN(fpu, f200, ffc0, CF_FPU);
5987 INSN(fbcc, f280, ffc0, CF_FPU);
5988 INSN(fpu, f200, ffc0, FPU);
5989 INSN(fscc, f240, ffc0, FPU);
5990 INSN(ftrapcc, f27a, fffe, FPU); /* opmode 010, 011 */
5991 INSN(ftrapcc, f27c, ffff, FPU); /* opmode 100 */
5992 INSN(fbcc, f280, ff80, FPU);
5993 #if !defined(CONFIG_USER_ONLY)
5994 INSN(frestore, f340, ffc0, CF_FPU);
5995 INSN(fsave, f300, ffc0, CF_FPU);
5996 INSN(frestore, f340, ffc0, FPU);
5997 INSN(fsave, f300, ffc0, FPU);
5998 INSN(intouch, f340, ffc0, CF_ISA_A);
5999 INSN(cpushl, f428, ff38, CF_ISA_A);
6000 INSN(cpush, f420, ff20, M68040);
6001 INSN(cinv, f400, ff20, M68040);
6002 INSN(pflush, f500, ffe0, M68040);
6003 INSN(ptest, f548, ffd8, M68040);
6004 INSN(wddata, fb00, ff00, CF_ISA_A);
6005 INSN(wdebug, fbc0, ffc0, CF_ISA_A);
6006 #endif
6007 INSN(move16_mem, f600, ffe0, M68040);
6008 INSN(move16_reg, f620, fff8, M68040);
6009 #undef INSN
6010 }
6011
m68k_tr_init_disas_context(DisasContextBase * dcbase,CPUState * cpu)6012 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6013 {
6014 DisasContext *dc = container_of(dcbase, DisasContext, base);
6015 CPUM68KState *env = cpu_env(cpu);
6016
6017 dc->env = env;
6018 dc->pc = dc->base.pc_first;
6019 /* This value will always be filled in properly before m68k_tr_tb_stop. */
6020 dc->pc_prev = 0xdeadbeef;
6021 dc->cc_op = CC_OP_DYNAMIC;
6022 dc->cc_op_synced = 1;
6023 dc->done_mac = 0;
6024 dc->writeback_mask = 0;
6025
6026 dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6027 /* If architectural single step active, limit to 1 */
6028 if (dc->ss_active) {
6029 dc->base.max_insns = 1;
6030 }
6031 }
6032
m68k_tr_tb_start(DisasContextBase * dcbase,CPUState * cpu)6033 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6034 {
6035 }
6036
m68k_tr_insn_start(DisasContextBase * dcbase,CPUState * cpu)6037 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6038 {
6039 DisasContext *dc = container_of(dcbase, DisasContext, base);
6040 tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6041 }
6042
m68k_tr_translate_insn(DisasContextBase * dcbase,CPUState * cpu)6043 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6044 {
6045 DisasContext *dc = container_of(dcbase, DisasContext, base);
6046 CPUM68KState *env = cpu_env(cpu);
6047 uint16_t insn = read_im16(env, dc);
6048
6049 opcode_table[insn](env, dc, insn);
6050 do_writebacks(dc);
6051
6052 dc->pc_prev = dc->base.pc_next;
6053 dc->base.pc_next = dc->pc;
6054
6055 if (dc->base.is_jmp == DISAS_NEXT) {
6056 /*
6057 * Stop translation when the next insn might touch a new page.
6058 * This ensures that prefetch aborts at the right place.
6059 *
6060 * We cannot determine the size of the next insn without
6061 * completely decoding it. However, the maximum insn size
6062 * is 32 bytes, so end if we do not have that much remaining.
6063 * This may produce several small TBs at the end of each page,
6064 * but they will all be linked with goto_tb.
6065 *
6066 * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6067 * smaller than MC68020's.
6068 */
6069 target_ulong start_page_offset
6070 = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6071
6072 if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6073 dc->base.is_jmp = DISAS_TOO_MANY;
6074 }
6075 }
6076 }
6077
m68k_tr_tb_stop(DisasContextBase * dcbase,CPUState * cpu)6078 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6079 {
6080 DisasContext *dc = container_of(dcbase, DisasContext, base);
6081
6082 switch (dc->base.is_jmp) {
6083 case DISAS_NORETURN:
6084 break;
6085 case DISAS_TOO_MANY:
6086 update_cc_op(dc);
6087 gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6088 break;
6089 case DISAS_JUMP:
6090 /* We updated CC_OP and PC in gen_jmp/gen_jmp_im. */
6091 if (dc->ss_active) {
6092 gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6093 } else {
6094 tcg_gen_lookup_and_goto_ptr();
6095 }
6096 break;
6097 case DISAS_EXIT:
6098 /*
6099 * We updated CC_OP and PC in gen_exit_tb, but also modified
6100 * other state that may require returning to the main loop.
6101 */
6102 if (dc->ss_active) {
6103 gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6104 } else {
6105 tcg_gen_exit_tb(NULL, 0);
6106 }
6107 break;
6108 default:
6109 g_assert_not_reached();
6110 }
6111 }
6112
6113 static const TranslatorOps m68k_tr_ops = {
6114 .init_disas_context = m68k_tr_init_disas_context,
6115 .tb_start = m68k_tr_tb_start,
6116 .insn_start = m68k_tr_insn_start,
6117 .translate_insn = m68k_tr_translate_insn,
6118 .tb_stop = m68k_tr_tb_stop,
6119 };
6120
m68k_translate_code(CPUState * cpu,TranslationBlock * tb,int * max_insns,vaddr pc,void * host_pc)6121 void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
6122 int *max_insns, vaddr pc, void *host_pc)
6123 {
6124 DisasContext dc;
6125 translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6126 }
6127
floatx80_to_double(CPUM68KState * env,uint16_t high,uint64_t low)6128 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6129 {
6130 floatx80 a = { .high = high, .low = low };
6131 union {
6132 float64 f64;
6133 double d;
6134 } u;
6135
6136 u.f64 = floatx80_to_float64(a, &env->fp_status);
6137 return u.d;
6138 }
6139
m68k_cpu_dump_state(CPUState * cs,FILE * f,int flags)6140 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6141 {
6142 CPUM68KState *env = cpu_env(cs);
6143 int i;
6144 uint16_t sr;
6145 for (i = 0; i < 8; i++) {
6146 qemu_fprintf(f, "D%d = %08x A%d = %08x "
6147 "F%d = %04x %016"PRIx64" (%12g)\n",
6148 i, env->dregs[i], i, env->aregs[i],
6149 i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6150 floatx80_to_double(env, env->fregs[i].l.upper,
6151 env->fregs[i].l.lower));
6152 }
6153 qemu_fprintf(f, "PC = %08x ", env->pc);
6154 sr = env->sr | cpu_m68k_get_ccr(env);
6155 qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6156 sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6157 (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6158 (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6159 (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6160 (sr & CCF_C) ? 'C' : '-');
6161 qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6162 (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6163 (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6164 (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6165 (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6166 qemu_fprintf(f, "\n "
6167 "FPCR = %04x ", env->fpcr);
6168 switch (env->fpcr & FPCR_PREC_MASK) {
6169 case FPCR_PREC_X:
6170 qemu_fprintf(f, "X ");
6171 break;
6172 case FPCR_PREC_S:
6173 qemu_fprintf(f, "S ");
6174 break;
6175 case FPCR_PREC_D:
6176 qemu_fprintf(f, "D ");
6177 break;
6178 }
6179 switch (env->fpcr & FPCR_RND_MASK) {
6180 case FPCR_RND_N:
6181 qemu_fprintf(f, "RN ");
6182 break;
6183 case FPCR_RND_Z:
6184 qemu_fprintf(f, "RZ ");
6185 break;
6186 case FPCR_RND_M:
6187 qemu_fprintf(f, "RM ");
6188 break;
6189 case FPCR_RND_P:
6190 qemu_fprintf(f, "RP ");
6191 break;
6192 }
6193 qemu_fprintf(f, "\n");
6194 #ifndef CONFIG_USER_ONLY
6195 qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6196 env->current_sp == M68K_SSP ? "->" : " ", env->sp[M68K_SSP],
6197 env->current_sp == M68K_USP ? "->" : " ", env->sp[M68K_USP],
6198 env->current_sp == M68K_ISP ? "->" : " ", env->sp[M68K_ISP]);
6199 qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6200 qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6201 qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6202 env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6203 qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6204 env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6205 env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6206 qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6207 env->mmu.mmusr, env->mmu.ar);
6208 #endif /* !CONFIG_USER_ONLY */
6209 }
6210