xref: /qemu/target/m68k/translate.c (revision 73047c825e25a18127dddb89eff0c0bf97a26aed)
1 /*
2  *  m68k translation
3  *
4  *  Copyright (c) 2005-2007 CodeSourcery
5  *  Written by Paul Brook
6  *
7  * This library is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * This library is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/translation-block.h"
25 #include "exec/target_page.h"
26 #include "tcg/tcg-op.h"
27 #include "qemu/log.h"
28 #include "qemu/qemu-print.h"
29 #include "exec/translator.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "exec/log.h"
33 #include "fpu/softfloat.h"
34 #include "semihosting/semihost.h"
35 
36 #define HELPER_H "helper.h"
37 #include "exec/helper-info.c.inc"
38 #undef  HELPER_H
39 
40 //#define DEBUG_DISPATCH 1
41 
42 #define DEFO32(name, offset) static TCGv QREG_##name;
43 #define DEFO64(name, offset) static TCGv_i64 QREG_##name;
44 #include "qregs.h.inc"
45 #undef DEFO32
46 #undef DEFO64
47 
48 static TCGv_i32 cpu_halted;
49 static TCGv_i32 cpu_exception_index;
50 
51 static char cpu_reg_names[2 * 8 * 3 + 5 * 4];
52 static TCGv cpu_dregs[8];
53 static TCGv cpu_aregs[8];
54 static TCGv_i64 cpu_macc[4];
55 
56 #define REG(insn, pos)  (((insn) >> (pos)) & 7)
57 #define DREG(insn, pos) cpu_dregs[REG(insn, pos)]
58 #define AREG(insn, pos) get_areg(s, REG(insn, pos))
59 #define MACREG(acc)     cpu_macc[acc]
60 #define QREG_SP         get_areg(s, 7)
61 
62 static TCGv NULL_QREG;
63 #define IS_NULL_QREG(t) (t == NULL_QREG)
64 /* Used to distinguish stores from bad addressing modes.  */
65 static TCGv store_dummy;
66 
67 void m68k_tcg_init(void)
68 {
69     char *p;
70     int i;
71 
72 #define DEFO32(name, offset) \
73     QREG_##name = tcg_global_mem_new_i32(tcg_env, \
74         offsetof(CPUM68KState, offset), #name);
75 #define DEFO64(name, offset) \
76     QREG_##name = tcg_global_mem_new_i64(tcg_env, \
77         offsetof(CPUM68KState, offset), #name);
78 #include "qregs.h.inc"
79 #undef DEFO32
80 #undef DEFO64
81 
82     cpu_halted = tcg_global_mem_new_i32(tcg_env,
83                                         -offsetof(M68kCPU, env) +
84                                         offsetof(CPUState, halted), "HALTED");
85     cpu_exception_index = tcg_global_mem_new_i32(tcg_env,
86                                                  -offsetof(M68kCPU, env) +
87                                                  offsetof(CPUState, exception_index),
88                                                  "EXCEPTION");
89 
90     p = cpu_reg_names;
91     for (i = 0; i < 8; i++) {
92         sprintf(p, "D%d", i);
93         cpu_dregs[i] = tcg_global_mem_new(tcg_env,
94                                           offsetof(CPUM68KState, dregs[i]), p);
95         p += 3;
96         sprintf(p, "A%d", i);
97         cpu_aregs[i] = tcg_global_mem_new(tcg_env,
98                                           offsetof(CPUM68KState, aregs[i]), p);
99         p += 3;
100     }
101     for (i = 0; i < 4; i++) {
102         sprintf(p, "ACC%d", i);
103         cpu_macc[i] = tcg_global_mem_new_i64(tcg_env,
104                                          offsetof(CPUM68KState, macc[i]), p);
105         p += 5;
106     }
107 
108     NULL_QREG = tcg_global_mem_new(tcg_env, -4, "NULL");
109     store_dummy = tcg_global_mem_new(tcg_env, -8, "NULL");
110 }
111 
112 /* internal defines */
113 typedef struct DisasContext {
114     DisasContextBase base;
115     CPUM68KState *env;
116     target_ulong pc;
117     target_ulong pc_prev;
118     CCOp cc_op; /* Current CC operation */
119     int cc_op_synced;
120     TCGv_i64 mactmp;
121     int done_mac;
122     int writeback_mask;
123     TCGv writeback[8];
124     bool ss_active;
125 } DisasContext;
126 
127 static TCGv get_areg(DisasContext *s, unsigned regno)
128 {
129     if (s->writeback_mask & (1 << regno)) {
130         return s->writeback[regno];
131     } else {
132         return cpu_aregs[regno];
133     }
134 }
135 
136 static void delay_set_areg(DisasContext *s, unsigned regno,
137                            TCGv val, bool give_temp)
138 {
139     if (s->writeback_mask & (1 << regno)) {
140         if (give_temp) {
141             s->writeback[regno] = val;
142         } else {
143             tcg_gen_mov_i32(s->writeback[regno], val);
144         }
145     } else {
146         s->writeback_mask |= 1 << regno;
147         if (give_temp) {
148             s->writeback[regno] = val;
149         } else {
150             TCGv tmp = tcg_temp_new();
151             s->writeback[regno] = tmp;
152             tcg_gen_mov_i32(tmp, val);
153         }
154     }
155 }
156 
157 static void do_writebacks(DisasContext *s)
158 {
159     unsigned mask = s->writeback_mask;
160     if (mask) {
161         s->writeback_mask = 0;
162         do {
163             unsigned regno = ctz32(mask);
164             tcg_gen_mov_i32(cpu_aregs[regno], s->writeback[regno]);
165             mask &= mask - 1;
166         } while (mask);
167     }
168 }
169 
170 /* is_jmp field values */
171 #define DISAS_JUMP      DISAS_TARGET_0 /* only pc was modified dynamically */
172 #define DISAS_EXIT      DISAS_TARGET_1 /* cpu state was modified dynamically */
173 
174 #if defined(CONFIG_USER_ONLY)
175 #define IS_USER(s) 1
176 #else
177 #define IS_USER(s)   (!(s->base.tb->flags & TB_FLAGS_MSR_S))
178 #define SFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_SFC_S) ? \
179                       MMU_KERNEL_IDX : MMU_USER_IDX)
180 #define DFC_INDEX(s) ((s->base.tb->flags & TB_FLAGS_DFC_S) ? \
181                       MMU_KERNEL_IDX : MMU_USER_IDX)
182 #endif
183 
184 typedef void (*disas_proc)(CPUM68KState *env, DisasContext *s, uint16_t insn);
185 
186 #ifdef DEBUG_DISPATCH
187 #define DISAS_INSN(name)                                                \
188     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
189                                   uint16_t insn);                       \
190     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
191                              uint16_t insn)                             \
192     {                                                                   \
193         qemu_log("Dispatch " #name "\n");                               \
194         real_disas_##name(env, s, insn);                                \
195     }                                                                   \
196     static void real_disas_##name(CPUM68KState *env, DisasContext *s,   \
197                                   uint16_t insn)
198 #else
199 #define DISAS_INSN(name)                                                \
200     static void disas_##name(CPUM68KState *env, DisasContext *s,        \
201                              uint16_t insn)
202 #endif
203 
204 static const uint8_t cc_op_live[CC_OP_NB] = {
205     [CC_OP_DYNAMIC] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
206     [CC_OP_FLAGS] = CCF_C | CCF_V | CCF_Z | CCF_N | CCF_X,
207     [CC_OP_ADDB ... CC_OP_ADDL] = CCF_X | CCF_N | CCF_V,
208     [CC_OP_SUBB ... CC_OP_SUBL] = CCF_X | CCF_N | CCF_V,
209     [CC_OP_CMPB ... CC_OP_CMPL] = CCF_X | CCF_N | CCF_V,
210     [CC_OP_LOGIC] = CCF_X | CCF_N
211 };
212 
213 static void set_cc_op(DisasContext *s, CCOp op)
214 {
215     CCOp old_op = s->cc_op;
216     int dead;
217 
218     if (old_op == op) {
219         return;
220     }
221     s->cc_op = op;
222     s->cc_op_synced = 0;
223 
224     /*
225      * Discard CC computation that will no longer be used.
226      * Note that X and N are never dead.
227      */
228     dead = cc_op_live[old_op] & ~cc_op_live[op];
229     if (dead & CCF_C) {
230         tcg_gen_discard_i32(QREG_CC_C);
231     }
232     if (dead & CCF_Z) {
233         tcg_gen_discard_i32(QREG_CC_Z);
234     }
235     if (dead & CCF_V) {
236         tcg_gen_discard_i32(QREG_CC_V);
237     }
238 }
239 
240 /* Update the CPU env CC_OP state.  */
241 static void update_cc_op(DisasContext *s)
242 {
243     if (!s->cc_op_synced) {
244         s->cc_op_synced = 1;
245         tcg_gen_movi_i32(QREG_CC_OP, s->cc_op);
246     }
247 }
248 
249 /* Generate a jump to an immediate address.  */
250 static void gen_jmp_im(DisasContext *s, uint32_t dest)
251 {
252     update_cc_op(s);
253     tcg_gen_movi_i32(QREG_PC, dest);
254     s->base.is_jmp = DISAS_JUMP;
255 }
256 
257 /* Generate a jump to the address in qreg DEST.  */
258 static void gen_jmp(DisasContext *s, TCGv dest)
259 {
260     update_cc_op(s);
261     tcg_gen_mov_i32(QREG_PC, dest);
262     s->base.is_jmp = DISAS_JUMP;
263 }
264 
265 static void gen_raise_exception(int nr)
266 {
267     gen_helper_raise_exception(tcg_env, tcg_constant_i32(nr));
268 }
269 
270 static void gen_raise_exception_format2(DisasContext *s, int nr,
271                                         target_ulong this_pc)
272 {
273     /*
274      * Pass the address of the insn to the exception handler,
275      * for recording in the Format $2 (6-word) stack frame.
276      * Re-use mmu.ar for the purpose, since that's only valid
277      * after tlb_fill.
278      */
279     tcg_gen_st_i32(tcg_constant_i32(this_pc), tcg_env,
280                    offsetof(CPUM68KState, mmu.ar));
281     gen_raise_exception(nr);
282     s->base.is_jmp = DISAS_NORETURN;
283 }
284 
285 static void gen_exception(DisasContext *s, uint32_t dest, int nr)
286 {
287     update_cc_op(s);
288     tcg_gen_movi_i32(QREG_PC, dest);
289 
290     gen_raise_exception(nr);
291 
292     s->base.is_jmp = DISAS_NORETURN;
293 }
294 
295 static inline void gen_addr_fault(DisasContext *s)
296 {
297     gen_exception(s, s->base.pc_next, EXCP_ADDRESS);
298 }
299 
300 /*
301  * Generate a load from the specified address.  Narrow values are
302  *  sign extended to full register width.
303  */
304 static inline TCGv gen_load(DisasContext *s, int opsize, TCGv addr,
305                             int sign, int index)
306 {
307     TCGv tmp = tcg_temp_new_i32();
308 
309     switch (opsize) {
310     case OS_BYTE:
311     case OS_WORD:
312     case OS_LONG:
313         tcg_gen_qemu_ld_tl(tmp, addr, index,
314                            opsize | (sign ? MO_SIGN : 0) | MO_TE);
315         break;
316     default:
317         g_assert_not_reached();
318     }
319     return tmp;
320 }
321 
322 /* Generate a store.  */
323 static inline void gen_store(DisasContext *s, int opsize, TCGv addr, TCGv val,
324                              int index)
325 {
326     switch (opsize) {
327     case OS_BYTE:
328     case OS_WORD:
329     case OS_LONG:
330         tcg_gen_qemu_st_tl(val, addr, index, opsize | MO_TE);
331         break;
332     default:
333         g_assert_not_reached();
334     }
335 }
336 
337 typedef enum {
338     EA_STORE,
339     EA_LOADU,
340     EA_LOADS
341 } ea_what;
342 
343 /*
344  * Generate an unsigned load if VAL is 0 a signed load if val is -1,
345  * otherwise generate a store.
346  */
347 static TCGv gen_ldst(DisasContext *s, int opsize, TCGv addr, TCGv val,
348                      ea_what what, int index)
349 {
350     if (what == EA_STORE) {
351         gen_store(s, opsize, addr, val, index);
352         return store_dummy;
353     } else {
354         return gen_load(s, opsize, addr, what == EA_LOADS, index);
355     }
356 }
357 
358 /* Read a 16-bit immediate constant */
359 static inline uint16_t read_im16(CPUM68KState *env, DisasContext *s)
360 {
361     uint16_t im;
362     im = translator_lduw(env, &s->base, s->pc);
363     s->pc += 2;
364     return im;
365 }
366 
367 /* Read an 8-bit immediate constant */
368 static inline uint8_t read_im8(CPUM68KState *env, DisasContext *s)
369 {
370     return read_im16(env, s);
371 }
372 
373 /* Read a 32-bit immediate constant.  */
374 static inline uint32_t read_im32(CPUM68KState *env, DisasContext *s)
375 {
376     uint32_t im;
377     im = read_im16(env, s) << 16;
378     im |= 0xffff & read_im16(env, s);
379     return im;
380 }
381 
382 /* Read a 64-bit immediate constant.  */
383 static inline uint64_t read_im64(CPUM68KState *env, DisasContext *s)
384 {
385     uint64_t im;
386     im = (uint64_t)read_im32(env, s) << 32;
387     im |= (uint64_t)read_im32(env, s);
388     return im;
389 }
390 
391 /* Calculate and address index.  */
392 static TCGv gen_addr_index(DisasContext *s, uint16_t ext, TCGv tmp)
393 {
394     TCGv add;
395     int scale;
396 
397     add = (ext & 0x8000) ? AREG(ext, 12) : DREG(ext, 12);
398     if ((ext & 0x800) == 0) {
399         tcg_gen_ext16s_i32(tmp, add);
400         add = tmp;
401     }
402     scale = (ext >> 9) & 3;
403     if (scale != 0) {
404         tcg_gen_shli_i32(tmp, add, scale);
405         add = tmp;
406     }
407     return add;
408 }
409 
410 /*
411  * Handle a base + index + displacement effective address.
412  * A NULL_QREG base means pc-relative.
413  */
414 static TCGv gen_lea_indexed(CPUM68KState *env, DisasContext *s, TCGv base)
415 {
416     uint32_t offset;
417     uint16_t ext;
418     TCGv add;
419     TCGv tmp;
420     uint32_t bd, od;
421 
422     offset = s->pc;
423     ext = read_im16(env, s);
424 
425     if ((ext & 0x800) == 0 && !m68k_feature(s->env, M68K_FEATURE_WORD_INDEX))
426         return NULL_QREG;
427 
428     if (m68k_feature(s->env, M68K_FEATURE_M68K) &&
429         !m68k_feature(s->env, M68K_FEATURE_SCALED_INDEX)) {
430         ext &= ~(3 << 9);
431     }
432 
433     if (ext & 0x100) {
434         /* full extension word format */
435         if (!m68k_feature(s->env, M68K_FEATURE_EXT_FULL))
436             return NULL_QREG;
437 
438         if ((ext & 0x30) > 0x10) {
439             /* base displacement */
440             if ((ext & 0x30) == 0x20) {
441                 bd = (int16_t)read_im16(env, s);
442             } else {
443                 bd = read_im32(env, s);
444             }
445         } else {
446             bd = 0;
447         }
448         tmp = tcg_temp_new();
449         if ((ext & 0x44) == 0) {
450             /* pre-index */
451             add = gen_addr_index(s, ext, tmp);
452         } else {
453             add = NULL_QREG;
454         }
455         if ((ext & 0x80) == 0) {
456             /* base not suppressed */
457             if (IS_NULL_QREG(base)) {
458                 base = tcg_constant_i32(offset + bd);
459                 bd = 0;
460             }
461             if (!IS_NULL_QREG(add)) {
462                 tcg_gen_add_i32(tmp, add, base);
463                 add = tmp;
464             } else {
465                 add = base;
466             }
467         }
468         if (!IS_NULL_QREG(add)) {
469             if (bd != 0) {
470                 tcg_gen_addi_i32(tmp, add, bd);
471                 add = tmp;
472             }
473         } else {
474             add = tcg_constant_i32(bd);
475         }
476         if ((ext & 3) != 0) {
477             /* memory indirect */
478             base = gen_load(s, OS_LONG, add, 0, IS_USER(s));
479             if ((ext & 0x44) == 4) {
480                 add = gen_addr_index(s, ext, tmp);
481                 tcg_gen_add_i32(tmp, add, base);
482                 add = tmp;
483             } else {
484                 add = base;
485             }
486             if ((ext & 3) > 1) {
487                 /* outer displacement */
488                 if ((ext & 3) == 2) {
489                     od = (int16_t)read_im16(env, s);
490                 } else {
491                     od = read_im32(env, s);
492                 }
493             } else {
494                 od = 0;
495             }
496             if (od != 0) {
497                 tcg_gen_addi_i32(tmp, add, od);
498                 add = tmp;
499             }
500         }
501     } else {
502         /* brief extension word format */
503         tmp = tcg_temp_new();
504         add = gen_addr_index(s, ext, tmp);
505         if (!IS_NULL_QREG(base)) {
506             tcg_gen_add_i32(tmp, add, base);
507             if ((int8_t)ext)
508                 tcg_gen_addi_i32(tmp, tmp, (int8_t)ext);
509         } else {
510             tcg_gen_addi_i32(tmp, add, offset + (int8_t)ext);
511         }
512         add = tmp;
513     }
514     return add;
515 }
516 
517 /* Sign or zero extend a value.  */
518 
519 static inline void gen_ext(TCGv res, TCGv val, int opsize, int sign)
520 {
521     switch (opsize) {
522     case OS_BYTE:
523     case OS_WORD:
524     case OS_LONG:
525         tcg_gen_ext_i32(res, val, opsize | (sign ? MO_SIGN : 0));
526         break;
527     default:
528         g_assert_not_reached();
529     }
530 }
531 
532 /* Evaluate all the CC flags.  */
533 
534 static void gen_flush_flags(DisasContext *s)
535 {
536     TCGv t0, t1;
537 
538     switch (s->cc_op) {
539     case CC_OP_FLAGS:
540         return;
541 
542     case CC_OP_ADDB:
543     case CC_OP_ADDW:
544     case CC_OP_ADDL:
545         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
546         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
547         /* Compute signed overflow for addition.  */
548         t0 = tcg_temp_new();
549         t1 = tcg_temp_new();
550         tcg_gen_sub_i32(t0, QREG_CC_N, QREG_CC_V);
551         gen_ext(t0, t0, s->cc_op - CC_OP_ADDB, 1);
552         tcg_gen_xor_i32(t1, QREG_CC_N, QREG_CC_V);
553         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
554         tcg_gen_andc_i32(QREG_CC_V, t1, QREG_CC_V);
555         break;
556 
557     case CC_OP_SUBB:
558     case CC_OP_SUBW:
559     case CC_OP_SUBL:
560         tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
561         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
562         /* Compute signed overflow for subtraction.  */
563         t0 = tcg_temp_new();
564         t1 = tcg_temp_new();
565         tcg_gen_add_i32(t0, QREG_CC_N, QREG_CC_V);
566         gen_ext(t0, t0, s->cc_op - CC_OP_SUBB, 1);
567         tcg_gen_xor_i32(t1, QREG_CC_N, t0);
568         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, t0);
569         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t1);
570         break;
571 
572     case CC_OP_CMPB:
573     case CC_OP_CMPW:
574     case CC_OP_CMPL:
575         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_C, QREG_CC_N, QREG_CC_V);
576         tcg_gen_sub_i32(QREG_CC_Z, QREG_CC_N, QREG_CC_V);
577         gen_ext(QREG_CC_Z, QREG_CC_Z, s->cc_op - CC_OP_CMPB, 1);
578         /* Compute signed overflow for subtraction.  */
579         t0 = tcg_temp_new();
580         tcg_gen_xor_i32(t0, QREG_CC_Z, QREG_CC_N);
581         tcg_gen_xor_i32(QREG_CC_V, QREG_CC_V, QREG_CC_N);
582         tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, t0);
583         tcg_gen_mov_i32(QREG_CC_N, QREG_CC_Z);
584         break;
585 
586     case CC_OP_LOGIC:
587         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
588         tcg_gen_movi_i32(QREG_CC_C, 0);
589         tcg_gen_movi_i32(QREG_CC_V, 0);
590         break;
591 
592     case CC_OP_DYNAMIC:
593         gen_helper_flush_flags(tcg_env, QREG_CC_OP);
594         s->cc_op_synced = 1;
595         break;
596 
597     default:
598         gen_helper_flush_flags(tcg_env, tcg_constant_i32(s->cc_op));
599         s->cc_op_synced = 1;
600         break;
601     }
602 
603     /* Note that flush_flags also assigned to env->cc_op.  */
604     s->cc_op = CC_OP_FLAGS;
605 }
606 
607 static inline TCGv gen_extend(DisasContext *s, TCGv val, int opsize, int sign)
608 {
609     TCGv tmp;
610 
611     if (opsize == OS_LONG) {
612         tmp = val;
613     } else {
614         tmp = tcg_temp_new();
615         gen_ext(tmp, val, opsize, sign);
616     }
617 
618     return tmp;
619 }
620 
621 static void gen_logic_cc(DisasContext *s, TCGv val, int opsize)
622 {
623     gen_ext(QREG_CC_N, val, opsize, 1);
624     set_cc_op(s, CC_OP_LOGIC);
625 }
626 
627 static void gen_update_cc_cmp(DisasContext *s, TCGv dest, TCGv src, int opsize)
628 {
629     tcg_gen_mov_i32(QREG_CC_N, dest);
630     tcg_gen_mov_i32(QREG_CC_V, src);
631     set_cc_op(s, CC_OP_CMPB + opsize);
632 }
633 
634 static void gen_update_cc_add(TCGv dest, TCGv src, int opsize)
635 {
636     gen_ext(QREG_CC_N, dest, opsize, 1);
637     tcg_gen_mov_i32(QREG_CC_V, src);
638 }
639 
640 static inline int opsize_bytes(int opsize)
641 {
642     switch (opsize) {
643     case OS_BYTE: return 1;
644     case OS_WORD: return 2;
645     case OS_LONG: return 4;
646     case OS_SINGLE: return 4;
647     case OS_DOUBLE: return 8;
648     case OS_EXTENDED: return 12;
649     case OS_PACKED: return 12;
650     default:
651         g_assert_not_reached();
652     }
653 }
654 
655 static inline int insn_opsize(int insn)
656 {
657     switch ((insn >> 6) & 3) {
658     case 0: return OS_BYTE;
659     case 1: return OS_WORD;
660     case 2: return OS_LONG;
661     default:
662         g_assert_not_reached();
663     }
664 }
665 
666 static inline int ext_opsize(int ext, int pos)
667 {
668     switch ((ext >> pos) & 7) {
669     case 0: return OS_LONG;
670     case 1: return OS_SINGLE;
671     case 2: return OS_EXTENDED;
672     case 3: return OS_PACKED;
673     case 4: return OS_WORD;
674     case 5: return OS_DOUBLE;
675     case 6: return OS_BYTE;
676     default:
677         g_assert_not_reached();
678     }
679 }
680 
681 /*
682  * Assign value to a register.  If the width is less than the register width
683  * only the low part of the register is set.
684  */
685 static void gen_partset_reg(int opsize, TCGv reg, TCGv val)
686 {
687     switch (opsize) {
688     case OS_BYTE:
689         tcg_gen_deposit_i32(reg, reg, val, 0, 8);
690         break;
691     case OS_WORD:
692         tcg_gen_deposit_i32(reg, reg, val, 0, 16);
693         break;
694     case OS_LONG:
695     case OS_SINGLE:
696         tcg_gen_mov_i32(reg, val);
697         break;
698     default:
699         g_assert_not_reached();
700     }
701 }
702 
703 /*
704  * Generate code for an "effective address".  Does not adjust the base
705  * register for autoincrement addressing modes.
706  */
707 static TCGv gen_lea_mode(CPUM68KState *env, DisasContext *s,
708                          int mode, int reg0, int opsize)
709 {
710     TCGv reg;
711     TCGv tmp;
712     uint16_t ext;
713     uint32_t offset;
714 
715     switch (mode) {
716     case 0: /* Data register direct.  */
717     case 1: /* Address register direct.  */
718         return NULL_QREG;
719     case 3: /* Indirect postincrement.  */
720         if (opsize == OS_UNSIZED) {
721             return NULL_QREG;
722         }
723         /* fallthru */
724     case 2: /* Indirect register */
725         tmp = tcg_temp_new();
726         tcg_gen_mov_i32(tmp, get_areg(s, reg0));
727         return tmp;
728     case 4: /* Indirect predecrememnt.  */
729         if (opsize == OS_UNSIZED) {
730             return NULL_QREG;
731         }
732         reg = get_areg(s, reg0);
733         tmp = tcg_temp_new();
734         if (reg0 == 7 && opsize == OS_BYTE &&
735             m68k_feature(s->env, M68K_FEATURE_M68K)) {
736             tcg_gen_subi_i32(tmp, reg, 2);
737         } else {
738             tcg_gen_subi_i32(tmp, reg, opsize_bytes(opsize));
739         }
740         return tmp;
741     case 5: /* Indirect displacement.  */
742         reg = get_areg(s, reg0);
743         tmp = tcg_temp_new();
744         ext = read_im16(env, s);
745         tcg_gen_addi_i32(tmp, reg, (int16_t)ext);
746         return tmp;
747     case 6: /* Indirect index + displacement.  */
748         reg = get_areg(s, reg0);
749         return gen_lea_indexed(env, s, reg);
750     case 7: /* Other */
751         switch (reg0) {
752         case 0: /* Absolute short.  */
753             offset = (int16_t)read_im16(env, s);
754             break;
755         case 1: /* Absolute long.  */
756             offset = read_im32(env, s);
757             break;
758         case 2: /* pc displacement  */
759             offset = s->pc;
760             offset += (int16_t)read_im16(env, s);
761             break;
762         case 3: /* pc index+displacement.  */
763             return gen_lea_indexed(env, s, NULL_QREG);
764         case 4: /* Immediate.  */
765         default:
766             return NULL_QREG;
767         }
768         tmp = tcg_temp_new();
769         tcg_gen_movi_i32(tmp, offset);
770         return tmp;
771     }
772     /* Should never happen.  */
773     return NULL_QREG;
774 }
775 
776 static TCGv gen_lea(CPUM68KState *env, DisasContext *s, uint16_t insn,
777                     int opsize)
778 {
779     int mode = extract32(insn, 3, 3);
780     int reg0 = REG(insn, 0);
781     return gen_lea_mode(env, s, mode, reg0, opsize);
782 }
783 
784 /*
785  * Generate code to load/store a value from/into an EA.  If WHAT > 0 this is
786  * a write otherwise it is a read (0 == sign extend, -1 == zero extend).
787  * ADDRP is non-null for readwrite operands.
788  */
789 static TCGv gen_ea_mode(CPUM68KState *env, DisasContext *s, int mode, int reg0,
790                         int opsize, TCGv val, TCGv *addrp, ea_what what,
791                         int index)
792 {
793     TCGv reg, tmp, result;
794     int32_t offset;
795 
796     switch (mode) {
797     case 0: /* Data register direct.  */
798         reg = cpu_dregs[reg0];
799         if (what == EA_STORE) {
800             gen_partset_reg(opsize, reg, val);
801             return store_dummy;
802         } else {
803             return gen_extend(s, reg, opsize, what == EA_LOADS);
804         }
805     case 1: /* Address register direct.  */
806         reg = get_areg(s, reg0);
807         if (what == EA_STORE) {
808             tcg_gen_mov_i32(reg, val);
809             return store_dummy;
810         } else {
811             return gen_extend(s, reg, opsize, what == EA_LOADS);
812         }
813     case 2: /* Indirect register */
814         reg = get_areg(s, reg0);
815         return gen_ldst(s, opsize, reg, val, what, index);
816     case 3: /* Indirect postincrement.  */
817         reg = get_areg(s, reg0);
818         result = gen_ldst(s, opsize, reg, val, what, index);
819         if (what == EA_STORE || !addrp) {
820             tmp = tcg_temp_new();
821             if (reg0 == 7 && opsize == OS_BYTE &&
822                 m68k_feature(s->env, M68K_FEATURE_M68K)) {
823                 tcg_gen_addi_i32(tmp, reg, 2);
824             } else {
825                 tcg_gen_addi_i32(tmp, reg, opsize_bytes(opsize));
826             }
827             delay_set_areg(s, reg0, tmp, true);
828         }
829         return result;
830     case 4: /* Indirect predecrememnt.  */
831         if (addrp && what == EA_STORE) {
832             tmp = *addrp;
833         } else {
834             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
835             if (IS_NULL_QREG(tmp)) {
836                 return tmp;
837             }
838             if (addrp) {
839                 *addrp = tmp;
840             }
841         }
842         result = gen_ldst(s, opsize, tmp, val, what, index);
843         if (what == EA_STORE || !addrp) {
844             delay_set_areg(s, reg0, tmp, false);
845         }
846         return result;
847     case 5: /* Indirect displacement.  */
848     case 6: /* Indirect index + displacement.  */
849     do_indirect:
850         if (addrp && what == EA_STORE) {
851             tmp = *addrp;
852         } else {
853             tmp = gen_lea_mode(env, s, mode, reg0, opsize);
854             if (IS_NULL_QREG(tmp)) {
855                 return tmp;
856             }
857             if (addrp) {
858                 *addrp = tmp;
859             }
860         }
861         return gen_ldst(s, opsize, tmp, val, what, index);
862     case 7: /* Other */
863         switch (reg0) {
864         case 0: /* Absolute short.  */
865         case 1: /* Absolute long.  */
866         case 2: /* pc displacement  */
867         case 3: /* pc index+displacement.  */
868             goto do_indirect;
869         case 4: /* Immediate.  */
870             /* Sign extend values for consistency.  */
871             switch (opsize) {
872             case OS_BYTE:
873                 if (what == EA_LOADS) {
874                     offset = (int8_t)read_im8(env, s);
875                 } else {
876                     offset = read_im8(env, s);
877                 }
878                 break;
879             case OS_WORD:
880                 if (what == EA_LOADS) {
881                     offset = (int16_t)read_im16(env, s);
882                 } else {
883                     offset = read_im16(env, s);
884                 }
885                 break;
886             case OS_LONG:
887                 offset = read_im32(env, s);
888                 break;
889             default:
890                 g_assert_not_reached();
891             }
892             return tcg_constant_i32(offset);
893         default:
894             return NULL_QREG;
895         }
896     }
897     /* Should never happen.  */
898     return NULL_QREG;
899 }
900 
901 static TCGv gen_ea(CPUM68KState *env, DisasContext *s, uint16_t insn,
902                    int opsize, TCGv val, TCGv *addrp, ea_what what, int index)
903 {
904     int mode = extract32(insn, 3, 3);
905     int reg0 = REG(insn, 0);
906     return gen_ea_mode(env, s, mode, reg0, opsize, val, addrp, what, index);
907 }
908 
909 static TCGv_ptr gen_fp_ptr(int freg)
910 {
911     TCGv_ptr fp = tcg_temp_new_ptr();
912     tcg_gen_addi_ptr(fp, tcg_env, offsetof(CPUM68KState, fregs[freg]));
913     return fp;
914 }
915 
916 static TCGv_ptr gen_fp_result_ptr(void)
917 {
918     TCGv_ptr fp = tcg_temp_new_ptr();
919     tcg_gen_addi_ptr(fp, tcg_env, offsetof(CPUM68KState, fp_result));
920     return fp;
921 }
922 
923 static void gen_fp_move(TCGv_ptr dest, TCGv_ptr src)
924 {
925     TCGv t32;
926     TCGv_i64 t64;
927 
928     t32 = tcg_temp_new();
929     tcg_gen_ld16u_i32(t32, src, offsetof(FPReg, l.upper));
930     tcg_gen_st16_i32(t32, dest, offsetof(FPReg, l.upper));
931 
932     t64 = tcg_temp_new_i64();
933     tcg_gen_ld_i64(t64, src, offsetof(FPReg, l.lower));
934     tcg_gen_st_i64(t64, dest, offsetof(FPReg, l.lower));
935 }
936 
937 static void gen_load_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
938                         int index)
939 {
940     TCGv tmp;
941     TCGv_i64 t64;
942 
943     t64 = tcg_temp_new_i64();
944     tmp = tcg_temp_new();
945     switch (opsize) {
946     case OS_BYTE:
947     case OS_WORD:
948     case OS_LONG:
949         tcg_gen_qemu_ld_tl(tmp, addr, index, opsize | MO_SIGN | MO_TE);
950         gen_helper_exts32(tcg_env, fp, tmp);
951         break;
952     case OS_SINGLE:
953         tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
954         gen_helper_extf32(tcg_env, fp, tmp);
955         break;
956     case OS_DOUBLE:
957         tcg_gen_qemu_ld_i64(t64, addr, index, MO_TEUQ);
958         gen_helper_extf64(tcg_env, fp, t64);
959         break;
960     case OS_EXTENDED:
961         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
962             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
963             break;
964         }
965         tcg_gen_qemu_ld_i32(tmp, addr, index, MO_TEUL);
966         tcg_gen_shri_i32(tmp, tmp, 16);
967         tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
968         tcg_gen_addi_i32(tmp, addr, 4);
969         tcg_gen_qemu_ld_i64(t64, tmp, index, MO_TEUQ);
970         tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
971         break;
972     case OS_PACKED:
973         /*
974          * unimplemented data type on 68040/ColdFire
975          * FIXME if needed for another FPU
976          */
977         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
978         break;
979     default:
980         g_assert_not_reached();
981     }
982 }
983 
984 static void gen_store_fp(DisasContext *s, int opsize, TCGv addr, TCGv_ptr fp,
985                          int index)
986 {
987     TCGv tmp;
988     TCGv_i64 t64;
989 
990     t64 = tcg_temp_new_i64();
991     tmp = tcg_temp_new();
992     switch (opsize) {
993     case OS_BYTE:
994     case OS_WORD:
995     case OS_LONG:
996         gen_helper_reds32(tmp, tcg_env, fp);
997         tcg_gen_qemu_st_tl(tmp, addr, index, opsize | MO_TE);
998         break;
999     case OS_SINGLE:
1000         gen_helper_redf32(tmp, tcg_env, fp);
1001         tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
1002         break;
1003     case OS_DOUBLE:
1004         gen_helper_redf64(t64, tcg_env, fp);
1005         tcg_gen_qemu_st_i64(t64, addr, index, MO_TEUQ);
1006         break;
1007     case OS_EXTENDED:
1008         if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1009             gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1010             break;
1011         }
1012         tcg_gen_ld16u_i32(tmp, fp, offsetof(FPReg, l.upper));
1013         tcg_gen_shli_i32(tmp, tmp, 16);
1014         tcg_gen_qemu_st_i32(tmp, addr, index, MO_TEUL);
1015         tcg_gen_addi_i32(tmp, addr, 4);
1016         tcg_gen_ld_i64(t64, fp, offsetof(FPReg, l.lower));
1017         tcg_gen_qemu_st_i64(t64, tmp, index, MO_TEUQ);
1018         break;
1019     case OS_PACKED:
1020         /*
1021          * unimplemented data type on 68040/ColdFire
1022          * FIXME if needed for another FPU
1023          */
1024         gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1025         break;
1026     default:
1027         g_assert_not_reached();
1028     }
1029 }
1030 
1031 static void gen_ldst_fp(DisasContext *s, int opsize, TCGv addr,
1032                         TCGv_ptr fp, ea_what what, int index)
1033 {
1034     if (what == EA_STORE) {
1035         gen_store_fp(s, opsize, addr, fp, index);
1036     } else {
1037         gen_load_fp(s, opsize, addr, fp, index);
1038     }
1039 }
1040 
1041 static int gen_ea_mode_fp(CPUM68KState *env, DisasContext *s, int mode,
1042                           int reg0, int opsize, TCGv_ptr fp, ea_what what,
1043                           int index)
1044 {
1045     TCGv reg, addr, tmp;
1046     TCGv_i64 t64;
1047 
1048     switch (mode) {
1049     case 0: /* Data register direct.  */
1050         reg = cpu_dregs[reg0];
1051         if (what == EA_STORE) {
1052             switch (opsize) {
1053             case OS_BYTE:
1054             case OS_WORD:
1055             case OS_LONG:
1056                 gen_helper_reds32(reg, tcg_env, fp);
1057                 break;
1058             case OS_SINGLE:
1059                 gen_helper_redf32(reg, tcg_env, fp);
1060                 break;
1061             default:
1062                 g_assert_not_reached();
1063             }
1064         } else {
1065             tmp = tcg_temp_new();
1066             switch (opsize) {
1067             case OS_BYTE:
1068             case OS_WORD:
1069             case OS_LONG:
1070                 tcg_gen_ext_i32(tmp, reg, opsize | MO_SIGN);
1071                 gen_helper_exts32(tcg_env, fp, tmp);
1072                 break;
1073             case OS_SINGLE:
1074                 gen_helper_extf32(tcg_env, fp, reg);
1075                 break;
1076             default:
1077                 g_assert_not_reached();
1078             }
1079         }
1080         return 0;
1081     case 1: /* Address register direct.  */
1082         return -1;
1083     case 2: /* Indirect register */
1084         addr = get_areg(s, reg0);
1085         gen_ldst_fp(s, opsize, addr, fp, what, index);
1086         return 0;
1087     case 3: /* Indirect postincrement.  */
1088         addr = cpu_aregs[reg0];
1089         gen_ldst_fp(s, opsize, addr, fp, what, index);
1090         tcg_gen_addi_i32(addr, addr, opsize_bytes(opsize));
1091         return 0;
1092     case 4: /* Indirect predecrememnt.  */
1093         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1094         if (IS_NULL_QREG(addr)) {
1095             return -1;
1096         }
1097         gen_ldst_fp(s, opsize, addr, fp, what, index);
1098         tcg_gen_mov_i32(cpu_aregs[reg0], addr);
1099         return 0;
1100     case 5: /* Indirect displacement.  */
1101     case 6: /* Indirect index + displacement.  */
1102     do_indirect:
1103         addr = gen_lea_mode(env, s, mode, reg0, opsize);
1104         if (IS_NULL_QREG(addr)) {
1105             return -1;
1106         }
1107         gen_ldst_fp(s, opsize, addr, fp, what, index);
1108         return 0;
1109     case 7: /* Other */
1110         switch (reg0) {
1111         case 0: /* Absolute short.  */
1112         case 1: /* Absolute long.  */
1113         case 2: /* pc displacement  */
1114         case 3: /* pc index+displacement.  */
1115             goto do_indirect;
1116         case 4: /* Immediate.  */
1117             if (what == EA_STORE) {
1118                 return -1;
1119             }
1120             switch (opsize) {
1121             case OS_BYTE:
1122                 tmp = tcg_constant_i32((int8_t)read_im8(env, s));
1123                 gen_helper_exts32(tcg_env, fp, tmp);
1124                 break;
1125             case OS_WORD:
1126                 tmp = tcg_constant_i32((int16_t)read_im16(env, s));
1127                 gen_helper_exts32(tcg_env, fp, tmp);
1128                 break;
1129             case OS_LONG:
1130                 tmp = tcg_constant_i32(read_im32(env, s));
1131                 gen_helper_exts32(tcg_env, fp, tmp);
1132                 break;
1133             case OS_SINGLE:
1134                 tmp = tcg_constant_i32(read_im32(env, s));
1135                 gen_helper_extf32(tcg_env, fp, tmp);
1136                 break;
1137             case OS_DOUBLE:
1138                 t64 = tcg_constant_i64(read_im64(env, s));
1139                 gen_helper_extf64(tcg_env, fp, t64);
1140                 break;
1141             case OS_EXTENDED:
1142                 if (m68k_feature(s->env, M68K_FEATURE_CF_FPU)) {
1143                     gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1144                     break;
1145                 }
1146                 tmp = tcg_constant_i32(read_im32(env, s) >> 16);
1147                 tcg_gen_st16_i32(tmp, fp, offsetof(FPReg, l.upper));
1148                 t64 = tcg_constant_i64(read_im64(env, s));
1149                 tcg_gen_st_i64(t64, fp, offsetof(FPReg, l.lower));
1150                 break;
1151             case OS_PACKED:
1152                 /*
1153                  * unimplemented data type on 68040/ColdFire
1154                  * FIXME if needed for another FPU
1155                  */
1156                 gen_exception(s, s->base.pc_next, EXCP_FP_UNIMP);
1157                 break;
1158             default:
1159                 g_assert_not_reached();
1160             }
1161             return 0;
1162         default:
1163             return -1;
1164         }
1165     }
1166     return -1;
1167 }
1168 
1169 static int gen_ea_fp(CPUM68KState *env, DisasContext *s, uint16_t insn,
1170                        int opsize, TCGv_ptr fp, ea_what what, int index)
1171 {
1172     int mode = extract32(insn, 3, 3);
1173     int reg0 = REG(insn, 0);
1174     return gen_ea_mode_fp(env, s, mode, reg0, opsize, fp, what, index);
1175 }
1176 
1177 typedef struct {
1178     TCGCond tcond;
1179     TCGv v1;
1180     TCGv v2;
1181 } DisasCompare;
1182 
1183 static void gen_cc_cond(DisasCompare *c, DisasContext *s, int cond)
1184 {
1185     TCGv tmp, tmp2;
1186     TCGCond tcond;
1187     CCOp op = s->cc_op;
1188 
1189     /* The CC_OP_CMP form can handle most normal comparisons directly.  */
1190     if (op == CC_OP_CMPB || op == CC_OP_CMPW || op == CC_OP_CMPL) {
1191         c->v1 = QREG_CC_N;
1192         c->v2 = QREG_CC_V;
1193         switch (cond) {
1194         case 2: /* HI */
1195         case 3: /* LS */
1196             tcond = TCG_COND_LEU;
1197             goto done;
1198         case 4: /* CC */
1199         case 5: /* CS */
1200             tcond = TCG_COND_LTU;
1201             goto done;
1202         case 6: /* NE */
1203         case 7: /* EQ */
1204             tcond = TCG_COND_EQ;
1205             goto done;
1206         case 10: /* PL */
1207         case 11: /* MI */
1208             c->v2 = tcg_constant_i32(0);
1209             c->v1 = tmp = tcg_temp_new();
1210             tcg_gen_sub_i32(tmp, QREG_CC_N, QREG_CC_V);
1211             gen_ext(tmp, tmp, op - CC_OP_CMPB, 1);
1212             /* fallthru */
1213         case 12: /* GE */
1214         case 13: /* LT */
1215             tcond = TCG_COND_LT;
1216             goto done;
1217         case 14: /* GT */
1218         case 15: /* LE */
1219             tcond = TCG_COND_LE;
1220             goto done;
1221         }
1222     }
1223 
1224     c->v2 = tcg_constant_i32(0);
1225 
1226     switch (cond) {
1227     case 0: /* T */
1228     case 1: /* F */
1229         c->v1 = c->v2;
1230         tcond = TCG_COND_NEVER;
1231         goto done;
1232     case 14: /* GT (!(Z || (N ^ V))) */
1233     case 15: /* LE (Z || (N ^ V)) */
1234         /*
1235          * Logic operations clear V, which simplifies LE to (Z || N),
1236          * and since Z and N are co-located, this becomes a normal
1237          * comparison vs N.
1238          */
1239         if (op == CC_OP_LOGIC) {
1240             c->v1 = QREG_CC_N;
1241             tcond = TCG_COND_LE;
1242             goto done;
1243         }
1244         break;
1245     case 12: /* GE (!(N ^ V)) */
1246     case 13: /* LT (N ^ V) */
1247         /* Logic operations clear V, which simplifies this to N.  */
1248         if (op != CC_OP_LOGIC) {
1249             break;
1250         }
1251         /* fallthru */
1252     case 10: /* PL (!N) */
1253     case 11: /* MI (N) */
1254         /* Several cases represent N normally.  */
1255         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1256             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1257             op == CC_OP_LOGIC) {
1258             c->v1 = QREG_CC_N;
1259             tcond = TCG_COND_LT;
1260             goto done;
1261         }
1262         break;
1263     case 6: /* NE (!Z) */
1264     case 7: /* EQ (Z) */
1265         /* Some cases fold Z into N.  */
1266         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1267             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL ||
1268             op == CC_OP_LOGIC) {
1269             tcond = TCG_COND_EQ;
1270             c->v1 = QREG_CC_N;
1271             goto done;
1272         }
1273         break;
1274     case 4: /* CC (!C) */
1275     case 5: /* CS (C) */
1276         /* Some cases fold C into X.  */
1277         if (op == CC_OP_ADDB || op == CC_OP_ADDW || op == CC_OP_ADDL ||
1278             op == CC_OP_SUBB || op == CC_OP_SUBW || op == CC_OP_SUBL) {
1279             tcond = TCG_COND_NE;
1280             c->v1 = QREG_CC_X;
1281             goto done;
1282         }
1283         /* fallthru */
1284     case 8: /* VC (!V) */
1285     case 9: /* VS (V) */
1286         /* Logic operations clear V and C.  */
1287         if (op == CC_OP_LOGIC) {
1288             tcond = TCG_COND_NEVER;
1289             c->v1 = c->v2;
1290             goto done;
1291         }
1292         break;
1293     }
1294 
1295     /* Otherwise, flush flag state to CC_OP_FLAGS.  */
1296     gen_flush_flags(s);
1297 
1298     switch (cond) {
1299     case 0: /* T */
1300     case 1: /* F */
1301     default:
1302         /* Invalid, or handled above.  */
1303         abort();
1304     case 2: /* HI (!C && !Z) -> !(C || Z)*/
1305     case 3: /* LS (C || Z) */
1306         c->v1 = tmp = tcg_temp_new();
1307         tcg_gen_setcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1308         tcg_gen_or_i32(tmp, tmp, QREG_CC_C);
1309         tcond = TCG_COND_NE;
1310         break;
1311     case 4: /* CC (!C) */
1312     case 5: /* CS (C) */
1313         c->v1 = QREG_CC_C;
1314         tcond = TCG_COND_NE;
1315         break;
1316     case 6: /* NE (!Z) */
1317     case 7: /* EQ (Z) */
1318         c->v1 = QREG_CC_Z;
1319         tcond = TCG_COND_EQ;
1320         break;
1321     case 8: /* VC (!V) */
1322     case 9: /* VS (V) */
1323         c->v1 = QREG_CC_V;
1324         tcond = TCG_COND_LT;
1325         break;
1326     case 10: /* PL (!N) */
1327     case 11: /* MI (N) */
1328         c->v1 = QREG_CC_N;
1329         tcond = TCG_COND_LT;
1330         break;
1331     case 12: /* GE (!(N ^ V)) */
1332     case 13: /* LT (N ^ V) */
1333         c->v1 = tmp = tcg_temp_new();
1334         tcg_gen_xor_i32(tmp, QREG_CC_N, QREG_CC_V);
1335         tcond = TCG_COND_LT;
1336         break;
1337     case 14: /* GT (!(Z || (N ^ V))) */
1338     case 15: /* LE (Z || (N ^ V)) */
1339         c->v1 = tmp = tcg_temp_new();
1340         tcg_gen_negsetcond_i32(TCG_COND_EQ, tmp, QREG_CC_Z, c->v2);
1341         tmp2 = tcg_temp_new();
1342         tcg_gen_xor_i32(tmp2, QREG_CC_N, QREG_CC_V);
1343         tcg_gen_or_i32(tmp, tmp, tmp2);
1344         tcond = TCG_COND_LT;
1345         break;
1346     }
1347 
1348  done:
1349     if ((cond & 1) == 0) {
1350         tcond = tcg_invert_cond(tcond);
1351     }
1352     c->tcond = tcond;
1353 }
1354 
1355 static void gen_jmpcc(DisasContext *s, int cond, TCGLabel *l1)
1356 {
1357   DisasCompare c;
1358 
1359   gen_cc_cond(&c, s, cond);
1360   update_cc_op(s);
1361   tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
1362 }
1363 
1364 /* Force a TB lookup after an instruction that changes the CPU state.  */
1365 static void gen_exit_tb(DisasContext *s)
1366 {
1367     update_cc_op(s);
1368     tcg_gen_movi_i32(QREG_PC, s->pc);
1369     s->base.is_jmp = DISAS_EXIT;
1370 }
1371 
1372 #define SRC_EA(env, result, opsize, op_sign, addrp) do {                \
1373         result = gen_ea(env, s, insn, opsize, NULL_QREG, addrp,         \
1374                         op_sign ? EA_LOADS : EA_LOADU, IS_USER(s));     \
1375         if (IS_NULL_QREG(result)) {                                     \
1376             gen_addr_fault(s);                                          \
1377             return;                                                     \
1378         }                                                               \
1379     } while (0)
1380 
1381 #define DEST_EA(env, insn, opsize, val, addrp) do {                     \
1382         TCGv ea_result = gen_ea(env, s, insn, opsize, val, addrp,       \
1383                                 EA_STORE, IS_USER(s));                  \
1384         if (IS_NULL_QREG(ea_result)) {                                  \
1385             gen_addr_fault(s);                                          \
1386             return;                                                     \
1387         }                                                               \
1388     } while (0)
1389 
1390 /* Generate a jump to an immediate address.  */
1391 static void gen_jmp_tb(DisasContext *s, int n, target_ulong dest,
1392                        target_ulong src)
1393 {
1394     if (unlikely(s->ss_active)) {
1395         update_cc_op(s);
1396         tcg_gen_movi_i32(QREG_PC, dest);
1397         gen_raise_exception_format2(s, EXCP_TRACE, src);
1398     } else if (translator_use_goto_tb(&s->base, dest)) {
1399         tcg_gen_goto_tb(n);
1400         tcg_gen_movi_i32(QREG_PC, dest);
1401         tcg_gen_exit_tb(s->base.tb, n);
1402     } else {
1403         gen_jmp_im(s, dest);
1404         tcg_gen_exit_tb(NULL, 0);
1405     }
1406     s->base.is_jmp = DISAS_NORETURN;
1407 }
1408 
1409 #ifndef CONFIG_USER_ONLY
1410 static bool semihosting_test(DisasContext *s)
1411 {
1412     uint32_t test;
1413 
1414     if (!semihosting_enabled(IS_USER(s))) {
1415         return false;
1416     }
1417 
1418     /*
1419      * "The semihosting instruction is immediately preceded by a
1420      * nop aligned to a 4-byte boundary..."
1421      * The preceding 2-byte (aligned) nop plus the 2-byte halt/bkpt
1422      * means that we have advanced 4 bytes from the required nop.
1423      */
1424     if (s->pc % 4 != 0) {
1425         return false;
1426     }
1427     test = translator_lduw(s->env, &s->base, s->pc - 4);
1428     if (test != 0x4e71) {
1429         return false;
1430     }
1431     /* "... and followed by an invalid sentinel instruction movec %sp,0." */
1432     test = translator_ldl(s->env, &s->base, s->pc);
1433     if (test != 0x4e7bf000) {
1434         return false;
1435     }
1436 
1437     /* Consume the sentinel. */
1438     s->pc += 4;
1439     return true;
1440 }
1441 #endif /* !CONFIG_USER_ONLY */
1442 
1443 DISAS_INSN(scc)
1444 {
1445     DisasCompare c;
1446     int cond;
1447     TCGv tmp;
1448 
1449     cond = (insn >> 8) & 0xf;
1450     gen_cc_cond(&c, s, cond);
1451 
1452     tmp = tcg_temp_new();
1453     tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
1454 
1455     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
1456 }
1457 
1458 DISAS_INSN(dbcc)
1459 {
1460     TCGLabel *l1;
1461     TCGv reg;
1462     TCGv tmp;
1463     int16_t offset;
1464     uint32_t base;
1465 
1466     reg = DREG(insn, 0);
1467     base = s->pc;
1468     offset = (int16_t)read_im16(env, s);
1469     l1 = gen_new_label();
1470     gen_jmpcc(s, (insn >> 8) & 0xf, l1);
1471 
1472     tmp = tcg_temp_new();
1473     tcg_gen_ext16s_i32(tmp, reg);
1474     tcg_gen_addi_i32(tmp, tmp, -1);
1475     gen_partset_reg(OS_WORD, reg, tmp);
1476     tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, -1, l1);
1477     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
1478     gen_set_label(l1);
1479     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
1480 }
1481 
1482 DISAS_INSN(undef_mac)
1483 {
1484     gen_exception(s, s->base.pc_next, EXCP_LINEA);
1485 }
1486 
1487 DISAS_INSN(undef_fpu)
1488 {
1489     gen_exception(s, s->base.pc_next, EXCP_LINEF);
1490 }
1491 
1492 DISAS_INSN(undef)
1493 {
1494     /*
1495      * ??? This is both instructions that are as yet unimplemented
1496      * for the 680x0 series, as well as those that are implemented
1497      * but actually illegal for CPU32 or pre-68020.
1498      */
1499     qemu_log_mask(LOG_UNIMP, "Illegal instruction: %04x @ %" VADDR_PRIx "\n",
1500                   insn, s->base.pc_next);
1501     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1502 }
1503 
1504 DISAS_INSN(mulw)
1505 {
1506     TCGv reg;
1507     TCGv tmp;
1508     TCGv src;
1509     int sign;
1510 
1511     sign = (insn & 0x100) != 0;
1512     reg = DREG(insn, 9);
1513     tmp = tcg_temp_new();
1514     if (sign)
1515         tcg_gen_ext16s_i32(tmp, reg);
1516     else
1517         tcg_gen_ext16u_i32(tmp, reg);
1518     SRC_EA(env, src, OS_WORD, sign, NULL);
1519     tcg_gen_mul_i32(tmp, tmp, src);
1520     tcg_gen_mov_i32(reg, tmp);
1521     gen_logic_cc(s, tmp, OS_LONG);
1522 }
1523 
1524 DISAS_INSN(divw)
1525 {
1526     int sign;
1527     TCGv src;
1528     TCGv destr;
1529     TCGv ilen;
1530 
1531     /* divX.w <EA>,Dn    32/16 -> 16r:16q */
1532 
1533     sign = (insn & 0x100) != 0;
1534 
1535     /* dest.l / src.w */
1536 
1537     SRC_EA(env, src, OS_WORD, sign, NULL);
1538     destr = tcg_constant_i32(REG(insn, 9));
1539     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1540     if (sign) {
1541         gen_helper_divsw(tcg_env, destr, src, ilen);
1542     } else {
1543         gen_helper_divuw(tcg_env, destr, src, ilen);
1544     }
1545 
1546     set_cc_op(s, CC_OP_FLAGS);
1547 }
1548 
1549 DISAS_INSN(divl)
1550 {
1551     TCGv num, reg, den, ilen;
1552     int sign;
1553     uint16_t ext;
1554 
1555     ext = read_im16(env, s);
1556 
1557     sign = (ext & 0x0800) != 0;
1558 
1559     if (ext & 0x400) {
1560         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
1561             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
1562             return;
1563         }
1564 
1565         /* divX.l <EA>, Dr:Dq    64/32 -> 32r:32q */
1566 
1567         SRC_EA(env, den, OS_LONG, 0, NULL);
1568         num = tcg_constant_i32(REG(ext, 12));
1569         reg = tcg_constant_i32(REG(ext, 0));
1570         ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1571         if (sign) {
1572             gen_helper_divsll(tcg_env, num, reg, den, ilen);
1573         } else {
1574             gen_helper_divull(tcg_env, num, reg, den, ilen);
1575         }
1576         set_cc_op(s, CC_OP_FLAGS);
1577         return;
1578     }
1579 
1580     /* divX.l <EA>, Dq        32/32 -> 32q     */
1581     /* divXl.l <EA>, Dr:Dq    32/32 -> 32r:32q */
1582 
1583     SRC_EA(env, den, OS_LONG, 0, NULL);
1584     num = tcg_constant_i32(REG(ext, 12));
1585     reg = tcg_constant_i32(REG(ext, 0));
1586     ilen = tcg_constant_i32(s->pc - s->base.pc_next);
1587     if (sign) {
1588         gen_helper_divsl(tcg_env, num, reg, den, ilen);
1589     } else {
1590         gen_helper_divul(tcg_env, num, reg, den, ilen);
1591     }
1592 
1593     set_cc_op(s, CC_OP_FLAGS);
1594 }
1595 
1596 static void bcd_add(TCGv dest, TCGv src)
1597 {
1598     TCGv t0, t1;
1599 
1600     /*
1601      * dest10 = dest10 + src10 + X
1602      *
1603      *        t1 = src
1604      *        t2 = t1 + 0x066
1605      *        t3 = t2 + dest + X
1606      *        t4 = t2 ^ dest
1607      *        t5 = t3 ^ t4
1608      *        t6 = ~t5 & 0x110
1609      *        t7 = (t6 >> 2) | (t6 >> 3)
1610      *        return t3 - t7
1611      */
1612 
1613     /*
1614      * t1 = (src + 0x066) + dest + X
1615      *    = result with some possible exceeding 0x6
1616      */
1617 
1618     t0 = tcg_temp_new();
1619     tcg_gen_addi_i32(t0, src, 0x066);
1620 
1621     t1 = tcg_temp_new();
1622     tcg_gen_add_i32(t1, t0, dest);
1623     tcg_gen_add_i32(t1, t1, QREG_CC_X);
1624 
1625     /* we will remove exceeding 0x6 where there is no carry */
1626 
1627     /*
1628      * t0 = (src + 0x0066) ^ dest
1629      *    = t1 without carries
1630      */
1631 
1632     tcg_gen_xor_i32(t0, t0, dest);
1633 
1634     /*
1635      * extract the carries
1636      * t0 = t0 ^ t1
1637      *    = only the carries
1638      */
1639 
1640     tcg_gen_xor_i32(t0, t0, t1);
1641 
1642     /*
1643      * generate 0x1 where there is no carry
1644      * and for each 0x10, generate a 0x6
1645      */
1646 
1647     tcg_gen_shri_i32(t0, t0, 3);
1648     tcg_gen_not_i32(t0, t0);
1649     tcg_gen_andi_i32(t0, t0, 0x22);
1650     tcg_gen_add_i32(dest, t0, t0);
1651     tcg_gen_add_i32(dest, dest, t0);
1652 
1653     /*
1654      * remove the exceeding 0x6
1655      * for digits that have not generated a carry
1656      */
1657 
1658     tcg_gen_sub_i32(dest, t1, dest);
1659 }
1660 
1661 static void bcd_sub(TCGv dest, TCGv src)
1662 {
1663     TCGv t0, t1, t2;
1664 
1665     /*
1666      *  dest10 = dest10 - src10 - X
1667      *         = bcd_add(dest + 1 - X, 0x199 - src)
1668      */
1669 
1670     /* t0 = 0x066 + (0x199 - src) */
1671 
1672     t0 = tcg_temp_new();
1673     tcg_gen_subfi_i32(t0, 0x1ff, src);
1674 
1675     /* t1 = t0 + dest + 1 - X*/
1676 
1677     t1 = tcg_temp_new();
1678     tcg_gen_add_i32(t1, t0, dest);
1679     tcg_gen_addi_i32(t1, t1, 1);
1680     tcg_gen_sub_i32(t1, t1, QREG_CC_X);
1681 
1682     /* t2 = t0 ^ dest */
1683 
1684     t2 = tcg_temp_new();
1685     tcg_gen_xor_i32(t2, t0, dest);
1686 
1687     /* t0 = t1 ^ t2 */
1688 
1689     tcg_gen_xor_i32(t0, t1, t2);
1690 
1691     /*
1692      * t2 = ~t0 & 0x110
1693      * t0 = (t2 >> 2) | (t2 >> 3)
1694      *
1695      * to fit on 8bit operands, changed in:
1696      *
1697      * t2 = ~(t0 >> 3) & 0x22
1698      * t0 = t2 + t2
1699      * t0 = t0 + t2
1700      */
1701 
1702     tcg_gen_shri_i32(t2, t0, 3);
1703     tcg_gen_not_i32(t2, t2);
1704     tcg_gen_andi_i32(t2, t2, 0x22);
1705     tcg_gen_add_i32(t0, t2, t2);
1706     tcg_gen_add_i32(t0, t0, t2);
1707 
1708     /* return t1 - t0 */
1709 
1710     tcg_gen_sub_i32(dest, t1, t0);
1711 }
1712 
1713 static void bcd_flags(TCGv val)
1714 {
1715     tcg_gen_andi_i32(QREG_CC_C, val, 0x0ff);
1716     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_C);
1717 
1718     tcg_gen_extract_i32(QREG_CC_C, val, 8, 1);
1719 
1720     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
1721 }
1722 
1723 DISAS_INSN(abcd_reg)
1724 {
1725     TCGv src;
1726     TCGv dest;
1727 
1728     gen_flush_flags(s); /* !Z is sticky */
1729 
1730     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1731     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1732     bcd_add(dest, src);
1733     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1734 
1735     bcd_flags(dest);
1736 }
1737 
1738 DISAS_INSN(abcd_mem)
1739 {
1740     TCGv src, dest, addr;
1741 
1742     gen_flush_flags(s); /* !Z is sticky */
1743 
1744     /* Indirect pre-decrement load (mode 4) */
1745 
1746     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1747                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1748     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1749                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1750 
1751     bcd_add(dest, src);
1752 
1753     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1754                 EA_STORE, IS_USER(s));
1755 
1756     bcd_flags(dest);
1757 }
1758 
1759 DISAS_INSN(sbcd_reg)
1760 {
1761     TCGv src, dest;
1762 
1763     gen_flush_flags(s); /* !Z is sticky */
1764 
1765     src = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
1766     dest = gen_extend(s, DREG(insn, 9), OS_BYTE, 0);
1767 
1768     bcd_sub(dest, src);
1769 
1770     gen_partset_reg(OS_BYTE, DREG(insn, 9), dest);
1771 
1772     bcd_flags(dest);
1773 }
1774 
1775 DISAS_INSN(sbcd_mem)
1776 {
1777     TCGv src, dest, addr;
1778 
1779     gen_flush_flags(s); /* !Z is sticky */
1780 
1781     /* Indirect pre-decrement load (mode 4) */
1782 
1783     src = gen_ea_mode(env, s, 4, REG(insn, 0), OS_BYTE,
1784                       NULL_QREG, NULL, EA_LOADU, IS_USER(s));
1785     dest = gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE,
1786                        NULL_QREG, &addr, EA_LOADU, IS_USER(s));
1787 
1788     bcd_sub(dest, src);
1789 
1790     gen_ea_mode(env, s, 4, REG(insn, 9), OS_BYTE, dest, &addr,
1791                 EA_STORE, IS_USER(s));
1792 
1793     bcd_flags(dest);
1794 }
1795 
1796 DISAS_INSN(nbcd)
1797 {
1798     TCGv src, dest;
1799     TCGv addr;
1800 
1801     gen_flush_flags(s); /* !Z is sticky */
1802 
1803     SRC_EA(env, src, OS_BYTE, 0, &addr);
1804 
1805     dest = tcg_temp_new();
1806     tcg_gen_movi_i32(dest, 0);
1807     bcd_sub(dest, src);
1808 
1809     DEST_EA(env, insn, OS_BYTE, dest, &addr);
1810 
1811     bcd_flags(dest);
1812 }
1813 
1814 DISAS_INSN(addsub)
1815 {
1816     TCGv reg;
1817     TCGv dest;
1818     TCGv src;
1819     TCGv tmp;
1820     TCGv addr;
1821     int add;
1822     int opsize;
1823 
1824     add = (insn & 0x4000) != 0;
1825     opsize = insn_opsize(insn);
1826     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
1827     dest = tcg_temp_new();
1828     if (insn & 0x100) {
1829         SRC_EA(env, tmp, opsize, 1, &addr);
1830         src = reg;
1831     } else {
1832         tmp = reg;
1833         SRC_EA(env, src, opsize, 1, NULL);
1834     }
1835     if (add) {
1836         tcg_gen_add_i32(dest, tmp, src);
1837         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, src);
1838         set_cc_op(s, CC_OP_ADDB + opsize);
1839     } else {
1840         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, tmp, src);
1841         tcg_gen_sub_i32(dest, tmp, src);
1842         set_cc_op(s, CC_OP_SUBB + opsize);
1843     }
1844     gen_update_cc_add(dest, src, opsize);
1845     if (insn & 0x100) {
1846         DEST_EA(env, insn, opsize, dest, &addr);
1847     } else {
1848         gen_partset_reg(opsize, DREG(insn, 9), dest);
1849     }
1850 }
1851 
1852 /* Reverse the order of the bits in REG.  */
1853 DISAS_INSN(bitrev)
1854 {
1855     TCGv reg;
1856     reg = DREG(insn, 0);
1857     gen_helper_bitrev(reg, reg);
1858 }
1859 
1860 DISAS_INSN(bitop_reg)
1861 {
1862     int opsize;
1863     int op;
1864     TCGv src1;
1865     TCGv src2;
1866     TCGv tmp;
1867     TCGv addr;
1868     TCGv dest;
1869 
1870     if ((insn & 0x38) != 0)
1871         opsize = OS_BYTE;
1872     else
1873         opsize = OS_LONG;
1874     op = (insn >> 6) & 3;
1875     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
1876 
1877     gen_flush_flags(s);
1878     src2 = tcg_temp_new();
1879     if (opsize == OS_BYTE)
1880         tcg_gen_andi_i32(src2, DREG(insn, 9), 7);
1881     else
1882         tcg_gen_andi_i32(src2, DREG(insn, 9), 31);
1883 
1884     tmp = tcg_temp_new();
1885     tcg_gen_shl_i32(tmp, tcg_constant_i32(1), src2);
1886 
1887     tcg_gen_and_i32(QREG_CC_Z, src1, tmp);
1888 
1889     dest = tcg_temp_new();
1890     switch (op) {
1891     case 1: /* bchg */
1892         tcg_gen_xor_i32(dest, src1, tmp);
1893         break;
1894     case 2: /* bclr */
1895         tcg_gen_andc_i32(dest, src1, tmp);
1896         break;
1897     case 3: /* bset */
1898         tcg_gen_or_i32(dest, src1, tmp);
1899         break;
1900     default: /* btst */
1901         break;
1902     }
1903     if (op) {
1904         DEST_EA(env, insn, opsize, dest, &addr);
1905     }
1906 }
1907 
1908 DISAS_INSN(sats)
1909 {
1910     TCGv reg;
1911     reg = DREG(insn, 0);
1912     gen_flush_flags(s);
1913     gen_helper_sats(reg, reg, QREG_CC_V);
1914     gen_logic_cc(s, reg, OS_LONG);
1915 }
1916 
1917 static void gen_push(DisasContext *s, TCGv val)
1918 {
1919     TCGv tmp;
1920 
1921     tmp = tcg_temp_new();
1922     tcg_gen_subi_i32(tmp, QREG_SP, 4);
1923     gen_store(s, OS_LONG, tmp, val, IS_USER(s));
1924     tcg_gen_mov_i32(QREG_SP, tmp);
1925 }
1926 
1927 static TCGv mreg(int reg)
1928 {
1929     if (reg < 8) {
1930         /* Dx */
1931         return cpu_dregs[reg];
1932     }
1933     /* Ax */
1934     return cpu_aregs[reg & 7];
1935 }
1936 
1937 DISAS_INSN(movem)
1938 {
1939     TCGv addr, incr, tmp, r[16];
1940     int is_load = (insn & 0x0400) != 0;
1941     int opsize = (insn & 0x40) != 0 ? OS_LONG : OS_WORD;
1942     uint16_t mask = read_im16(env, s);
1943     int mode = extract32(insn, 3, 3);
1944     int reg0 = REG(insn, 0);
1945     int i;
1946 
1947     tmp = cpu_aregs[reg0];
1948 
1949     switch (mode) {
1950     case 0: /* data register direct */
1951     case 1: /* addr register direct */
1952     do_addr_fault:
1953         gen_addr_fault(s);
1954         return;
1955 
1956     case 2: /* indirect */
1957         break;
1958 
1959     case 3: /* indirect post-increment */
1960         if (!is_load) {
1961             /* post-increment is not allowed */
1962             goto do_addr_fault;
1963         }
1964         break;
1965 
1966     case 4: /* indirect pre-decrement */
1967         if (is_load) {
1968             /* pre-decrement is not allowed */
1969             goto do_addr_fault;
1970         }
1971         /*
1972          * We want a bare copy of the address reg, without any pre-decrement
1973          * adjustment, as gen_lea would provide.
1974          */
1975         break;
1976 
1977     default:
1978         tmp = gen_lea_mode(env, s, mode, reg0, opsize);
1979         if (IS_NULL_QREG(tmp)) {
1980             goto do_addr_fault;
1981         }
1982         break;
1983     }
1984 
1985     addr = tcg_temp_new();
1986     tcg_gen_mov_i32(addr, tmp);
1987     incr = tcg_constant_i32(opsize_bytes(opsize));
1988 
1989     if (is_load) {
1990         /* memory to register */
1991         for (i = 0; i < 16; i++) {
1992             if (mask & (1 << i)) {
1993                 r[i] = gen_load(s, opsize, addr, 1, IS_USER(s));
1994                 tcg_gen_add_i32(addr, addr, incr);
1995             }
1996         }
1997         for (i = 0; i < 16; i++) {
1998             if (mask & (1 << i)) {
1999                 tcg_gen_mov_i32(mreg(i), r[i]);
2000             }
2001         }
2002         if (mode == 3) {
2003             /* post-increment: movem (An)+,X */
2004             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2005         }
2006     } else {
2007         /* register to memory */
2008         if (mode == 4) {
2009             /* pre-decrement: movem X,-(An) */
2010             for (i = 15; i >= 0; i--) {
2011                 if ((mask << i) & 0x8000) {
2012                     tcg_gen_sub_i32(addr, addr, incr);
2013                     if (reg0 + 8 == i &&
2014                         m68k_feature(s->env, M68K_FEATURE_EXT_FULL)) {
2015                         /*
2016                          * M68020+: if the addressing register is the
2017                          * register moved to memory, the value written
2018                          * is the initial value decremented by the size of
2019                          * the operation, regardless of how many actual
2020                          * stores have been performed until this point.
2021                          * M68000/M68010: the value is the initial value.
2022                          */
2023                         tmp = tcg_temp_new();
2024                         tcg_gen_sub_i32(tmp, cpu_aregs[reg0], incr);
2025                         gen_store(s, opsize, addr, tmp, IS_USER(s));
2026                     } else {
2027                         gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2028                     }
2029                 }
2030             }
2031             tcg_gen_mov_i32(cpu_aregs[reg0], addr);
2032         } else {
2033             for (i = 0; i < 16; i++) {
2034                 if (mask & (1 << i)) {
2035                     gen_store(s, opsize, addr, mreg(i), IS_USER(s));
2036                     tcg_gen_add_i32(addr, addr, incr);
2037                 }
2038             }
2039         }
2040     }
2041 }
2042 
2043 DISAS_INSN(movep)
2044 {
2045     uint8_t i;
2046     int16_t displ;
2047     TCGv reg;
2048     TCGv addr;
2049     TCGv abuf;
2050     TCGv dbuf;
2051 
2052     displ = read_im16(env, s);
2053 
2054     addr = AREG(insn, 0);
2055     reg = DREG(insn, 9);
2056 
2057     abuf = tcg_temp_new();
2058     tcg_gen_addi_i32(abuf, addr, displ);
2059     dbuf = tcg_temp_new();
2060 
2061     if (insn & 0x40) {
2062         i = 4;
2063     } else {
2064         i = 2;
2065     }
2066 
2067     if (insn & 0x80) {
2068         for ( ; i > 0 ; i--) {
2069             tcg_gen_shri_i32(dbuf, reg, (i - 1) * 8);
2070             tcg_gen_qemu_st_i32(dbuf, abuf, IS_USER(s), MO_UB);
2071             if (i > 1) {
2072                 tcg_gen_addi_i32(abuf, abuf, 2);
2073             }
2074         }
2075     } else {
2076         for ( ; i > 0 ; i--) {
2077             tcg_gen_qemu_ld_tl(dbuf, abuf, IS_USER(s), MO_UB);
2078             tcg_gen_deposit_i32(reg, reg, dbuf, (i - 1) * 8, 8);
2079             if (i > 1) {
2080                 tcg_gen_addi_i32(abuf, abuf, 2);
2081             }
2082         }
2083     }
2084 }
2085 
2086 DISAS_INSN(bitop_im)
2087 {
2088     int opsize;
2089     int op;
2090     TCGv src1;
2091     uint32_t mask;
2092     int bitnum;
2093     TCGv tmp;
2094     TCGv addr;
2095 
2096     if ((insn & 0x38) != 0)
2097         opsize = OS_BYTE;
2098     else
2099         opsize = OS_LONG;
2100     op = (insn >> 6) & 3;
2101 
2102     bitnum = read_im16(env, s);
2103     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2104         if (bitnum & 0xfe00) {
2105             disas_undef(env, s, insn);
2106             return;
2107         }
2108     } else {
2109         if (bitnum & 0xff00) {
2110             disas_undef(env, s, insn);
2111             return;
2112         }
2113     }
2114 
2115     SRC_EA(env, src1, opsize, 0, op ? &addr: NULL);
2116 
2117     gen_flush_flags(s);
2118     if (opsize == OS_BYTE)
2119         bitnum &= 7;
2120     else
2121         bitnum &= 31;
2122     mask = 1 << bitnum;
2123 
2124    tcg_gen_andi_i32(QREG_CC_Z, src1, mask);
2125 
2126     if (op) {
2127         tmp = tcg_temp_new();
2128         switch (op) {
2129         case 1: /* bchg */
2130             tcg_gen_xori_i32(tmp, src1, mask);
2131             break;
2132         case 2: /* bclr */
2133             tcg_gen_andi_i32(tmp, src1, ~mask);
2134             break;
2135         case 3: /* bset */
2136             tcg_gen_ori_i32(tmp, src1, mask);
2137             break;
2138         default: /* btst */
2139             break;
2140         }
2141         DEST_EA(env, insn, opsize, tmp, &addr);
2142     }
2143 }
2144 
2145 static TCGv gen_get_ccr(DisasContext *s)
2146 {
2147     TCGv dest;
2148 
2149     update_cc_op(s);
2150     dest = tcg_temp_new();
2151     gen_helper_get_ccr(dest, tcg_env);
2152     return dest;
2153 }
2154 
2155 static TCGv gen_get_sr(DisasContext *s)
2156 {
2157     TCGv ccr;
2158     TCGv sr;
2159 
2160     ccr = gen_get_ccr(s);
2161     sr = tcg_temp_new();
2162     tcg_gen_andi_i32(sr, QREG_SR, 0xffe0);
2163     tcg_gen_or_i32(sr, sr, ccr);
2164     return sr;
2165 }
2166 
2167 static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only)
2168 {
2169     if (ccr_only) {
2170         tcg_gen_movi_i32(QREG_CC_C, val & CCF_C ? 1 : 0);
2171         tcg_gen_movi_i32(QREG_CC_V, val & CCF_V ? -1 : 0);
2172         tcg_gen_movi_i32(QREG_CC_Z, val & CCF_Z ? 0 : 1);
2173         tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0);
2174         tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0);
2175     } else {
2176         /* Must writeback before changing security state. */
2177         do_writebacks(s);
2178         gen_helper_set_sr(tcg_env, tcg_constant_i32(val));
2179     }
2180     set_cc_op(s, CC_OP_FLAGS);
2181 }
2182 
2183 static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only)
2184 {
2185     if (ccr_only) {
2186         gen_helper_set_ccr(tcg_env, val);
2187     } else {
2188         /* Must writeback before changing security state. */
2189         do_writebacks(s);
2190         gen_helper_set_sr(tcg_env, val);
2191     }
2192     set_cc_op(s, CC_OP_FLAGS);
2193 }
2194 
2195 static void gen_move_to_sr(CPUM68KState *env, DisasContext *s, uint16_t insn,
2196                            bool ccr_only)
2197 {
2198     if ((insn & 0x3f) == 0x3c) {
2199         uint16_t val;
2200         val = read_im16(env, s);
2201         gen_set_sr_im(s, val, ccr_only);
2202     } else {
2203         TCGv src;
2204         SRC_EA(env, src, OS_WORD, 0, NULL);
2205         gen_set_sr(s, src, ccr_only);
2206     }
2207 }
2208 
2209 DISAS_INSN(arith_im)
2210 {
2211     int op;
2212     TCGv im;
2213     TCGv src1;
2214     TCGv dest;
2215     TCGv addr;
2216     int opsize;
2217     bool with_SR = ((insn & 0x3f) == 0x3c);
2218 
2219     op = (insn >> 9) & 7;
2220     opsize = insn_opsize(insn);
2221     switch (opsize) {
2222     case OS_BYTE:
2223         im = tcg_constant_i32((int8_t)read_im8(env, s));
2224         break;
2225     case OS_WORD:
2226         im = tcg_constant_i32((int16_t)read_im16(env, s));
2227         break;
2228     case OS_LONG:
2229         im = tcg_constant_i32(read_im32(env, s));
2230         break;
2231     default:
2232         g_assert_not_reached();
2233     }
2234 
2235     if (with_SR) {
2236         /* SR/CCR can only be used with andi/eori/ori */
2237         if (op == 2 || op == 3 || op == 6) {
2238             disas_undef(env, s, insn);
2239             return;
2240         }
2241         switch (opsize) {
2242         case OS_BYTE:
2243             src1 = gen_get_ccr(s);
2244             break;
2245         case OS_WORD:
2246             if (IS_USER(s)) {
2247                 gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2248                 return;
2249             }
2250             src1 = gen_get_sr(s);
2251             break;
2252         default:
2253             /* OS_LONG; others already g_assert_not_reached.  */
2254             disas_undef(env, s, insn);
2255             return;
2256         }
2257     } else {
2258         SRC_EA(env, src1, opsize, 1, (op == 6) ? NULL : &addr);
2259     }
2260     dest = tcg_temp_new();
2261     switch (op) {
2262     case 0: /* ori */
2263         tcg_gen_or_i32(dest, src1, im);
2264         if (with_SR) {
2265             gen_set_sr(s, dest, opsize == OS_BYTE);
2266             gen_exit_tb(s);
2267         } else {
2268             DEST_EA(env, insn, opsize, dest, &addr);
2269             gen_logic_cc(s, dest, opsize);
2270         }
2271         break;
2272     case 1: /* andi */
2273         tcg_gen_and_i32(dest, src1, im);
2274         if (with_SR) {
2275             gen_set_sr(s, dest, opsize == OS_BYTE);
2276             gen_exit_tb(s);
2277         } else {
2278             DEST_EA(env, insn, opsize, dest, &addr);
2279             gen_logic_cc(s, dest, opsize);
2280         }
2281         break;
2282     case 2: /* subi */
2283         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, src1, im);
2284         tcg_gen_sub_i32(dest, src1, im);
2285         gen_update_cc_add(dest, im, opsize);
2286         set_cc_op(s, CC_OP_SUBB + opsize);
2287         DEST_EA(env, insn, opsize, dest, &addr);
2288         break;
2289     case 3: /* addi */
2290         tcg_gen_add_i32(dest, src1, im);
2291         gen_update_cc_add(dest, im, opsize);
2292         tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, im);
2293         set_cc_op(s, CC_OP_ADDB + opsize);
2294         DEST_EA(env, insn, opsize, dest, &addr);
2295         break;
2296     case 5: /* eori */
2297         tcg_gen_xor_i32(dest, src1, im);
2298         if (with_SR) {
2299             gen_set_sr(s, dest, opsize == OS_BYTE);
2300             gen_exit_tb(s);
2301         } else {
2302             DEST_EA(env, insn, opsize, dest, &addr);
2303             gen_logic_cc(s, dest, opsize);
2304         }
2305         break;
2306     case 6: /* cmpi */
2307         gen_update_cc_cmp(s, src1, im, opsize);
2308         break;
2309     default:
2310         abort();
2311     }
2312 }
2313 
2314 DISAS_INSN(cas)
2315 {
2316     int opsize;
2317     TCGv addr;
2318     uint16_t ext;
2319     TCGv load;
2320     TCGv cmp;
2321     MemOp opc;
2322 
2323     switch ((insn >> 9) & 3) {
2324     case 1:
2325         opsize = OS_BYTE;
2326         opc = MO_SB;
2327         break;
2328     case 2:
2329         opsize = OS_WORD;
2330         opc = MO_TESW;
2331         break;
2332     case 3:
2333         opsize = OS_LONG;
2334         opc = MO_TESL;
2335         break;
2336     default:
2337         g_assert_not_reached();
2338     }
2339 
2340     ext = read_im16(env, s);
2341 
2342     /* cas Dc,Du,<EA> */
2343 
2344     addr = gen_lea(env, s, insn, opsize);
2345     if (IS_NULL_QREG(addr)) {
2346         gen_addr_fault(s);
2347         return;
2348     }
2349 
2350     cmp = gen_extend(s, DREG(ext, 0), opsize, 1);
2351 
2352     /*
2353      * if  <EA> == Dc then
2354      *     <EA> = Du
2355      *     Dc = <EA> (because <EA> == Dc)
2356      * else
2357      *     Dc = <EA>
2358      */
2359 
2360     load = tcg_temp_new();
2361     tcg_gen_atomic_cmpxchg_i32(load, addr, cmp, DREG(ext, 6),
2362                                IS_USER(s), opc);
2363     /* update flags before setting cmp to load */
2364     gen_update_cc_cmp(s, load, cmp, opsize);
2365     gen_partset_reg(opsize, DREG(ext, 0), load);
2366 
2367     switch (extract32(insn, 3, 3)) {
2368     case 3: /* Indirect postincrement.  */
2369         tcg_gen_addi_i32(AREG(insn, 0), addr, opsize_bytes(opsize));
2370         break;
2371     case 4: /* Indirect predecrememnt.  */
2372         tcg_gen_mov_i32(AREG(insn, 0), addr);
2373         break;
2374     }
2375 }
2376 
2377 DISAS_INSN(cas2w)
2378 {
2379     uint16_t ext1, ext2;
2380     TCGv addr1, addr2;
2381 
2382     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2383 
2384     ext1 = read_im16(env, s);
2385 
2386     if (ext1 & 0x8000) {
2387         /* Address Register */
2388         addr1 = AREG(ext1, 12);
2389     } else {
2390         /* Data Register */
2391         addr1 = DREG(ext1, 12);
2392     }
2393 
2394     ext2 = read_im16(env, s);
2395     if (ext2 & 0x8000) {
2396         /* Address Register */
2397         addr2 = AREG(ext2, 12);
2398     } else {
2399         /* Data Register */
2400         addr2 = DREG(ext2, 12);
2401     }
2402 
2403     /*
2404      * if (R1) == Dc1 && (R2) == Dc2 then
2405      *     (R1) = Du1
2406      *     (R2) = Du2
2407      * else
2408      *     Dc1 = (R1)
2409      *     Dc2 = (R2)
2410      */
2411 
2412     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2413         gen_helper_exit_atomic(tcg_env);
2414     } else {
2415         TCGv regs = tcg_constant_i32(REG(ext2, 6) |
2416                                      (REG(ext1, 6) << 3) |
2417                                      (REG(ext2, 0) << 6) |
2418                                      (REG(ext1, 0) << 9));
2419         gen_helper_cas2w(tcg_env, regs, addr1, addr2);
2420     }
2421 
2422     /* Note that cas2w also assigned to env->cc_op.  */
2423     s->cc_op = CC_OP_CMPW;
2424     s->cc_op_synced = 1;
2425 }
2426 
2427 DISAS_INSN(cas2l)
2428 {
2429     uint16_t ext1, ext2;
2430     TCGv addr1, addr2, regs;
2431 
2432     /* cas2 Dc1:Dc2,Du1:Du2,(Rn1):(Rn2) */
2433 
2434     ext1 = read_im16(env, s);
2435 
2436     if (ext1 & 0x8000) {
2437         /* Address Register */
2438         addr1 = AREG(ext1, 12);
2439     } else {
2440         /* Data Register */
2441         addr1 = DREG(ext1, 12);
2442     }
2443 
2444     ext2 = read_im16(env, s);
2445     if (ext2 & 0x8000) {
2446         /* Address Register */
2447         addr2 = AREG(ext2, 12);
2448     } else {
2449         /* Data Register */
2450         addr2 = DREG(ext2, 12);
2451     }
2452 
2453     /*
2454      * if (R1) == Dc1 && (R2) == Dc2 then
2455      *     (R1) = Du1
2456      *     (R2) = Du2
2457      * else
2458      *     Dc1 = (R1)
2459      *     Dc2 = (R2)
2460      */
2461 
2462     regs = tcg_constant_i32(REG(ext2, 6) |
2463                             (REG(ext1, 6) << 3) |
2464                             (REG(ext2, 0) << 6) |
2465                             (REG(ext1, 0) << 9));
2466     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
2467         gen_helper_cas2l_parallel(tcg_env, regs, addr1, addr2);
2468     } else {
2469         gen_helper_cas2l(tcg_env, regs, addr1, addr2);
2470     }
2471 
2472     /* Note that cas2l also assigned to env->cc_op.  */
2473     s->cc_op = CC_OP_CMPL;
2474     s->cc_op_synced = 1;
2475 }
2476 
2477 DISAS_INSN(byterev)
2478 {
2479     TCGv reg;
2480 
2481     reg = DREG(insn, 0);
2482     tcg_gen_bswap32_i32(reg, reg);
2483 }
2484 
2485 DISAS_INSN(move)
2486 {
2487     TCGv src;
2488     TCGv dest;
2489     int op;
2490     int opsize;
2491 
2492     switch (insn >> 12) {
2493     case 1: /* move.b */
2494         opsize = OS_BYTE;
2495         break;
2496     case 2: /* move.l */
2497         opsize = OS_LONG;
2498         break;
2499     case 3: /* move.w */
2500         opsize = OS_WORD;
2501         break;
2502     default:
2503         abort();
2504     }
2505     SRC_EA(env, src, opsize, 1, NULL);
2506     op = (insn >> 6) & 7;
2507     if (op == 1) {
2508         /* movea */
2509         /* The value will already have been sign extended.  */
2510         dest = AREG(insn, 9);
2511         tcg_gen_mov_i32(dest, src);
2512     } else {
2513         /* normal move */
2514         uint16_t dest_ea;
2515         dest_ea = ((insn >> 9) & 7) | (op << 3);
2516         DEST_EA(env, dest_ea, opsize, src, NULL);
2517         /* This will be correct because loads sign extend.  */
2518         gen_logic_cc(s, src, opsize);
2519     }
2520 }
2521 
2522 DISAS_INSN(negx)
2523 {
2524     TCGv z;
2525     TCGv src;
2526     TCGv addr;
2527     int opsize;
2528 
2529     opsize = insn_opsize(insn);
2530     SRC_EA(env, src, opsize, 1, &addr);
2531 
2532     gen_flush_flags(s); /* compute old Z */
2533 
2534     /*
2535      * Perform subtract with borrow.
2536      * (X, N) =  -(src + X);
2537      */
2538 
2539     z = tcg_constant_i32(0);
2540     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, z, QREG_CC_X, z);
2541     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, z, z, QREG_CC_N, QREG_CC_X);
2542     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
2543 
2544     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
2545 
2546     /*
2547      * Compute signed-overflow for negation.  The normal formula for
2548      * subtraction is (res ^ src) & (src ^ dest), but with dest==0
2549      * this simplifies to res & src.
2550      */
2551 
2552     tcg_gen_and_i32(QREG_CC_V, QREG_CC_N, src);
2553 
2554     /* Copy the rest of the results into place.  */
2555     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
2556     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
2557 
2558     set_cc_op(s, CC_OP_FLAGS);
2559 
2560     /* result is in QREG_CC_N */
2561 
2562     DEST_EA(env, insn, opsize, QREG_CC_N, &addr);
2563 }
2564 
2565 DISAS_INSN(lea)
2566 {
2567     TCGv reg;
2568     TCGv tmp;
2569 
2570     reg = AREG(insn, 9);
2571     tmp = gen_lea(env, s, insn, OS_LONG);
2572     if (IS_NULL_QREG(tmp)) {
2573         gen_addr_fault(s);
2574         return;
2575     }
2576     tcg_gen_mov_i32(reg, tmp);
2577 }
2578 
2579 DISAS_INSN(clr)
2580 {
2581     int opsize;
2582     TCGv zero;
2583 
2584     zero = tcg_constant_i32(0);
2585     opsize = insn_opsize(insn);
2586     DEST_EA(env, insn, opsize, zero, NULL);
2587     gen_logic_cc(s, zero, opsize);
2588 }
2589 
2590 DISAS_INSN(move_from_ccr)
2591 {
2592     TCGv ccr;
2593 
2594     ccr = gen_get_ccr(s);
2595     DEST_EA(env, insn, OS_WORD, ccr, NULL);
2596 }
2597 
2598 DISAS_INSN(neg)
2599 {
2600     TCGv src1;
2601     TCGv dest;
2602     TCGv addr;
2603     int opsize;
2604 
2605     opsize = insn_opsize(insn);
2606     SRC_EA(env, src1, opsize, 1, &addr);
2607     dest = tcg_temp_new();
2608     tcg_gen_neg_i32(dest, src1);
2609     set_cc_op(s, CC_OP_SUBB + opsize);
2610     gen_update_cc_add(dest, src1, opsize);
2611     tcg_gen_setcondi_i32(TCG_COND_NE, QREG_CC_X, dest, 0);
2612     DEST_EA(env, insn, opsize, dest, &addr);
2613 }
2614 
2615 DISAS_INSN(move_to_ccr)
2616 {
2617     gen_move_to_sr(env, s, insn, true);
2618 }
2619 
2620 DISAS_INSN(not)
2621 {
2622     TCGv src1;
2623     TCGv dest;
2624     TCGv addr;
2625     int opsize;
2626 
2627     opsize = insn_opsize(insn);
2628     SRC_EA(env, src1, opsize, 1, &addr);
2629     dest = tcg_temp_new();
2630     tcg_gen_not_i32(dest, src1);
2631     DEST_EA(env, insn, opsize, dest, &addr);
2632     gen_logic_cc(s, dest, opsize);
2633 }
2634 
2635 DISAS_INSN(swap)
2636 {
2637     TCGv src1;
2638     TCGv src2;
2639     TCGv reg;
2640 
2641     src1 = tcg_temp_new();
2642     src2 = tcg_temp_new();
2643     reg = DREG(insn, 0);
2644     tcg_gen_shli_i32(src1, reg, 16);
2645     tcg_gen_shri_i32(src2, reg, 16);
2646     tcg_gen_or_i32(reg, src1, src2);
2647     gen_logic_cc(s, reg, OS_LONG);
2648 }
2649 
2650 DISAS_INSN(bkpt)
2651 {
2652 #if defined(CONFIG_USER_ONLY)
2653     gen_exception(s, s->base.pc_next, EXCP_DEBUG);
2654 #else
2655     /* BKPT #0 is the alternate semihosting instruction. */
2656     if ((insn & 7) == 0 && semihosting_test(s)) {
2657         gen_exception(s, s->pc, EXCP_SEMIHOSTING);
2658         return;
2659     }
2660     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2661 #endif
2662 }
2663 
2664 DISAS_INSN(pea)
2665 {
2666     TCGv tmp;
2667 
2668     tmp = gen_lea(env, s, insn, OS_LONG);
2669     if (IS_NULL_QREG(tmp)) {
2670         gen_addr_fault(s);
2671         return;
2672     }
2673     gen_push(s, tmp);
2674 }
2675 
2676 DISAS_INSN(ext)
2677 {
2678     int op;
2679     TCGv reg;
2680     TCGv tmp;
2681 
2682     reg = DREG(insn, 0);
2683     op = (insn >> 6) & 7;
2684     tmp = tcg_temp_new();
2685     if (op == 3)
2686         tcg_gen_ext16s_i32(tmp, reg);
2687     else
2688         tcg_gen_ext8s_i32(tmp, reg);
2689     if (op == 2)
2690         gen_partset_reg(OS_WORD, reg, tmp);
2691     else
2692         tcg_gen_mov_i32(reg, tmp);
2693     gen_logic_cc(s, tmp, OS_LONG);
2694 }
2695 
2696 DISAS_INSN(tst)
2697 {
2698     int opsize;
2699     TCGv tmp;
2700 
2701     opsize = insn_opsize(insn);
2702     SRC_EA(env, tmp, opsize, 1, NULL);
2703     gen_logic_cc(s, tmp, opsize);
2704 }
2705 
2706 DISAS_INSN(pulse)
2707 {
2708   /* Implemented as a NOP.  */
2709 }
2710 
2711 DISAS_INSN(illegal)
2712 {
2713     gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2714 }
2715 
2716 DISAS_INSN(tas)
2717 {
2718     int mode = extract32(insn, 3, 3);
2719     int reg0 = REG(insn, 0);
2720 
2721     if (mode == 0) {
2722         /* data register direct */
2723         TCGv dest = cpu_dregs[reg0];
2724         gen_logic_cc(s, dest, OS_BYTE);
2725         tcg_gen_ori_tl(dest, dest, 0x80);
2726     } else {
2727         TCGv src1, addr;
2728 
2729         addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE);
2730         if (IS_NULL_QREG(addr)) {
2731             gen_addr_fault(s);
2732             return;
2733         }
2734         src1 = tcg_temp_new();
2735         tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80),
2736                                    IS_USER(s), MO_SB);
2737         gen_logic_cc(s, src1, OS_BYTE);
2738 
2739         switch (mode) {
2740         case 3: /* Indirect postincrement.  */
2741             tcg_gen_addi_i32(AREG(insn, 0), addr, 1);
2742             break;
2743         case 4: /* Indirect predecrememnt.  */
2744             tcg_gen_mov_i32(AREG(insn, 0), addr);
2745             break;
2746         }
2747     }
2748 }
2749 
2750 DISAS_INSN(mull)
2751 {
2752     uint16_t ext;
2753     TCGv src1;
2754     int sign;
2755 
2756     ext = read_im16(env, s);
2757 
2758     sign = ext & 0x800;
2759 
2760     if (ext & 0x400) {
2761         if (!m68k_feature(s->env, M68K_FEATURE_QUAD_MULDIV)) {
2762             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
2763             return;
2764         }
2765 
2766         SRC_EA(env, src1, OS_LONG, 0, NULL);
2767 
2768         if (sign) {
2769             tcg_gen_muls2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2770         } else {
2771             tcg_gen_mulu2_i32(QREG_CC_Z, QREG_CC_N, src1, DREG(ext, 12));
2772         }
2773         /* if Dl == Dh, 68040 returns low word */
2774         tcg_gen_mov_i32(DREG(ext, 0), QREG_CC_N);
2775         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_Z);
2776         tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N);
2777 
2778         tcg_gen_movi_i32(QREG_CC_V, 0);
2779         tcg_gen_movi_i32(QREG_CC_C, 0);
2780 
2781         set_cc_op(s, CC_OP_FLAGS);
2782         return;
2783     }
2784     SRC_EA(env, src1, OS_LONG, 0, NULL);
2785     if (m68k_feature(s->env, M68K_FEATURE_M68K)) {
2786         tcg_gen_movi_i32(QREG_CC_C, 0);
2787         if (sign) {
2788             tcg_gen_muls2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2789             /* QREG_CC_V is -(QREG_CC_V != (QREG_CC_N >> 31)) */
2790             tcg_gen_sari_i32(QREG_CC_Z, QREG_CC_N, 31);
2791             tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2792                                    QREG_CC_V, QREG_CC_Z);
2793         } else {
2794             tcg_gen_mulu2_i32(QREG_CC_N, QREG_CC_V, src1, DREG(ext, 12));
2795             /* QREG_CC_V is -(QREG_CC_V != 0), use QREG_CC_C as 0 */
2796             tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V,
2797                                    QREG_CC_V, QREG_CC_C);
2798         }
2799         tcg_gen_mov_i32(DREG(ext, 12), QREG_CC_N);
2800 
2801         tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
2802 
2803         set_cc_op(s, CC_OP_FLAGS);
2804     } else {
2805         /*
2806          * The upper 32 bits of the product are discarded, so
2807          * muls.l and mulu.l are functionally equivalent.
2808          */
2809         tcg_gen_mul_i32(DREG(ext, 12), src1, DREG(ext, 12));
2810         gen_logic_cc(s, DREG(ext, 12), OS_LONG);
2811     }
2812 }
2813 
2814 static void gen_link(DisasContext *s, uint16_t insn, int32_t offset)
2815 {
2816     TCGv reg;
2817     TCGv tmp;
2818 
2819     reg = AREG(insn, 0);
2820     tmp = tcg_temp_new();
2821     tcg_gen_subi_i32(tmp, QREG_SP, 4);
2822     gen_store(s, OS_LONG, tmp, reg, IS_USER(s));
2823     if ((insn & 7) != 7) {
2824         tcg_gen_mov_i32(reg, tmp);
2825     }
2826     tcg_gen_addi_i32(QREG_SP, tmp, offset);
2827 }
2828 
2829 DISAS_INSN(link)
2830 {
2831     int16_t offset;
2832 
2833     offset = read_im16(env, s);
2834     gen_link(s, insn, offset);
2835 }
2836 
2837 DISAS_INSN(linkl)
2838 {
2839     int32_t offset;
2840 
2841     offset = read_im32(env, s);
2842     gen_link(s, insn, offset);
2843 }
2844 
2845 DISAS_INSN(unlk)
2846 {
2847     TCGv src;
2848     TCGv reg;
2849     TCGv tmp;
2850 
2851     src = tcg_temp_new();
2852     reg = AREG(insn, 0);
2853     tcg_gen_mov_i32(src, reg);
2854     tmp = gen_load(s, OS_LONG, src, 0, IS_USER(s));
2855     tcg_gen_mov_i32(reg, tmp);
2856     tcg_gen_addi_i32(QREG_SP, src, 4);
2857 }
2858 
2859 #if !defined(CONFIG_USER_ONLY)
2860 DISAS_INSN(reset)
2861 {
2862     if (IS_USER(s)) {
2863         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
2864         return;
2865     }
2866 
2867     gen_helper_reset(tcg_env);
2868 }
2869 #endif
2870 
2871 DISAS_INSN(nop)
2872 {
2873 }
2874 
2875 DISAS_INSN(rtd)
2876 {
2877     TCGv tmp;
2878     int16_t offset = read_im16(env, s);
2879 
2880     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2881     tcg_gen_addi_i32(QREG_SP, QREG_SP, offset + 4);
2882     gen_jmp(s, tmp);
2883 }
2884 
2885 DISAS_INSN(rtr)
2886 {
2887     TCGv tmp;
2888     TCGv ccr;
2889     TCGv sp;
2890 
2891     sp = tcg_temp_new();
2892     ccr = gen_load(s, OS_WORD, QREG_SP, 0, IS_USER(s));
2893     tcg_gen_addi_i32(sp, QREG_SP, 2);
2894     tmp = gen_load(s, OS_LONG, sp, 0, IS_USER(s));
2895     tcg_gen_addi_i32(QREG_SP, sp, 4);
2896 
2897     gen_set_sr(s, ccr, true);
2898 
2899     gen_jmp(s, tmp);
2900 }
2901 
2902 DISAS_INSN(rts)
2903 {
2904     TCGv tmp;
2905 
2906     tmp = gen_load(s, OS_LONG, QREG_SP, 0, IS_USER(s));
2907     tcg_gen_addi_i32(QREG_SP, QREG_SP, 4);
2908     gen_jmp(s, tmp);
2909 }
2910 
2911 DISAS_INSN(jump)
2912 {
2913     TCGv tmp;
2914 
2915     /*
2916      * Load the target address first to ensure correct exception
2917      * behavior.
2918      */
2919     tmp = gen_lea(env, s, insn, OS_LONG);
2920     if (IS_NULL_QREG(tmp)) {
2921         gen_addr_fault(s);
2922         return;
2923     }
2924     if ((insn & 0x40) == 0) {
2925         /* jsr */
2926         gen_push(s, tcg_constant_i32(s->pc));
2927     }
2928     gen_jmp(s, tmp);
2929 }
2930 
2931 DISAS_INSN(addsubq)
2932 {
2933     TCGv src;
2934     TCGv dest;
2935     TCGv val;
2936     int imm;
2937     TCGv addr;
2938     int opsize;
2939 
2940     if ((insn & 070) == 010) {
2941         /* Operation on address register is always long.  */
2942         opsize = OS_LONG;
2943     } else {
2944         opsize = insn_opsize(insn);
2945     }
2946     SRC_EA(env, src, opsize, 1, &addr);
2947     imm = (insn >> 9) & 7;
2948     if (imm == 0) {
2949         imm = 8;
2950     }
2951     val = tcg_constant_i32(imm);
2952     dest = tcg_temp_new();
2953     tcg_gen_mov_i32(dest, src);
2954     if ((insn & 0x38) == 0x08) {
2955         /*
2956          * Don't update condition codes if the destination is an
2957          * address register.
2958          */
2959         if (insn & 0x0100) {
2960             tcg_gen_sub_i32(dest, dest, val);
2961         } else {
2962             tcg_gen_add_i32(dest, dest, val);
2963         }
2964     } else {
2965         if (insn & 0x0100) {
2966             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2967             tcg_gen_sub_i32(dest, dest, val);
2968             set_cc_op(s, CC_OP_SUBB + opsize);
2969         } else {
2970             tcg_gen_add_i32(dest, dest, val);
2971             tcg_gen_setcond_i32(TCG_COND_LTU, QREG_CC_X, dest, val);
2972             set_cc_op(s, CC_OP_ADDB + opsize);
2973         }
2974         gen_update_cc_add(dest, val, opsize);
2975     }
2976     DEST_EA(env, insn, opsize, dest, &addr);
2977 }
2978 
2979 DISAS_INSN(branch)
2980 {
2981     int32_t offset;
2982     uint32_t base;
2983     int op;
2984 
2985     base = s->pc;
2986     op = (insn >> 8) & 0xf;
2987     offset = (int8_t)insn;
2988     if (offset == 0) {
2989         offset = (int16_t)read_im16(env, s);
2990     } else if (offset == -1) {
2991         offset = read_im32(env, s);
2992     }
2993     if (op == 1) {
2994         /* bsr */
2995         gen_push(s, tcg_constant_i32(s->pc));
2996     }
2997     if (op > 1) {
2998         /* Bcc */
2999         TCGLabel *l1 = gen_new_label();
3000         gen_jmpcc(s, ((insn >> 8) & 0xf) ^ 1, l1);
3001         gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
3002         gen_set_label(l1);
3003         gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
3004     } else {
3005         /* Unconditional branch.  */
3006         update_cc_op(s);
3007         gen_jmp_tb(s, 0, base + offset, s->base.pc_next);
3008     }
3009 }
3010 
3011 DISAS_INSN(moveq)
3012 {
3013     tcg_gen_movi_i32(DREG(insn, 9), (int8_t)insn);
3014     gen_logic_cc(s, DREG(insn, 9), OS_LONG);
3015 }
3016 
3017 DISAS_INSN(mvzs)
3018 {
3019     int opsize;
3020     TCGv src;
3021     TCGv reg;
3022 
3023     if (insn & 0x40)
3024         opsize = OS_WORD;
3025     else
3026         opsize = OS_BYTE;
3027     SRC_EA(env, src, opsize, (insn & 0x80) == 0, NULL);
3028     reg = DREG(insn, 9);
3029     tcg_gen_mov_i32(reg, src);
3030     gen_logic_cc(s, src, opsize);
3031 }
3032 
3033 DISAS_INSN(or)
3034 {
3035     TCGv reg;
3036     TCGv dest;
3037     TCGv src;
3038     TCGv addr;
3039     int opsize;
3040 
3041     opsize = insn_opsize(insn);
3042     reg = gen_extend(s, DREG(insn, 9), opsize, 0);
3043     dest = tcg_temp_new();
3044     if (insn & 0x100) {
3045         SRC_EA(env, src, opsize, 0, &addr);
3046         tcg_gen_or_i32(dest, src, reg);
3047         DEST_EA(env, insn, opsize, dest, &addr);
3048     } else {
3049         SRC_EA(env, src, opsize, 0, NULL);
3050         tcg_gen_or_i32(dest, src, reg);
3051         gen_partset_reg(opsize, DREG(insn, 9), dest);
3052     }
3053     gen_logic_cc(s, dest, opsize);
3054 }
3055 
3056 DISAS_INSN(suba)
3057 {
3058     TCGv src;
3059     TCGv reg;
3060 
3061     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3062     reg = AREG(insn, 9);
3063     tcg_gen_sub_i32(reg, reg, src);
3064 }
3065 
3066 static inline void gen_subx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3067 {
3068     TCGv tmp, zero;
3069 
3070     gen_flush_flags(s); /* compute old Z */
3071 
3072     /*
3073      * Perform subtract with borrow.
3074      * (X, N) = dest - (src + X);
3075      */
3076 
3077     zero = tcg_constant_i32(0);
3078     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, src, zero, QREG_CC_X, zero);
3079     tcg_gen_sub2_i32(QREG_CC_N, QREG_CC_X, dest, zero, QREG_CC_N, QREG_CC_X);
3080     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3081     tcg_gen_andi_i32(QREG_CC_X, QREG_CC_X, 1);
3082 
3083     /* Compute signed-overflow for subtract.  */
3084 
3085     tmp = tcg_temp_new();
3086     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, dest);
3087     tcg_gen_xor_i32(tmp, dest, src);
3088     tcg_gen_and_i32(QREG_CC_V, QREG_CC_V, tmp);
3089 
3090     /* Copy the rest of the results into place.  */
3091     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3092     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3093 
3094     set_cc_op(s, CC_OP_FLAGS);
3095 
3096     /* result is in QREG_CC_N */
3097 }
3098 
3099 DISAS_INSN(subx_reg)
3100 {
3101     TCGv dest;
3102     TCGv src;
3103     int opsize;
3104 
3105     opsize = insn_opsize(insn);
3106 
3107     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3108     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3109 
3110     gen_subx(s, src, dest, opsize);
3111 
3112     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3113 }
3114 
3115 DISAS_INSN(subx_mem)
3116 {
3117     TCGv src;
3118     TCGv addr_src;
3119     TCGv dest;
3120     TCGv addr_dest;
3121     int opsize;
3122 
3123     opsize = insn_opsize(insn);
3124 
3125     addr_src = AREG(insn, 0);
3126     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3127     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3128 
3129     addr_dest = AREG(insn, 9);
3130     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3131     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3132 
3133     gen_subx(s, src, dest, opsize);
3134 
3135     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3136 }
3137 
3138 DISAS_INSN(mov3q)
3139 {
3140     TCGv src;
3141     int val;
3142 
3143     val = (insn >> 9) & 7;
3144     if (val == 0) {
3145         val = -1;
3146     }
3147     src = tcg_constant_i32(val);
3148     gen_logic_cc(s, src, OS_LONG);
3149     DEST_EA(env, insn, OS_LONG, src, NULL);
3150 }
3151 
3152 DISAS_INSN(cmp)
3153 {
3154     TCGv src;
3155     TCGv reg;
3156     int opsize;
3157 
3158     opsize = insn_opsize(insn);
3159     SRC_EA(env, src, opsize, 1, NULL);
3160     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
3161     gen_update_cc_cmp(s, reg, src, opsize);
3162 }
3163 
3164 DISAS_INSN(cmpa)
3165 {
3166     int opsize;
3167     TCGv src;
3168     TCGv reg;
3169 
3170     if (insn & 0x100) {
3171         opsize = OS_LONG;
3172     } else {
3173         opsize = OS_WORD;
3174     }
3175     SRC_EA(env, src, opsize, 1, NULL);
3176     reg = AREG(insn, 9);
3177     gen_update_cc_cmp(s, reg, src, OS_LONG);
3178 }
3179 
3180 DISAS_INSN(cmpm)
3181 {
3182     int opsize = insn_opsize(insn);
3183     TCGv src, dst;
3184 
3185     /* Post-increment load (mode 3) from Ay.  */
3186     src = gen_ea_mode(env, s, 3, REG(insn, 0), opsize,
3187                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3188     /* Post-increment load (mode 3) from Ax.  */
3189     dst = gen_ea_mode(env, s, 3, REG(insn, 9), opsize,
3190                       NULL_QREG, NULL, EA_LOADS, IS_USER(s));
3191 
3192     gen_update_cc_cmp(s, dst, src, opsize);
3193 }
3194 
3195 DISAS_INSN(eor)
3196 {
3197     TCGv src;
3198     TCGv dest;
3199     TCGv addr;
3200     int opsize;
3201 
3202     opsize = insn_opsize(insn);
3203 
3204     SRC_EA(env, src, opsize, 0, &addr);
3205     dest = tcg_temp_new();
3206     tcg_gen_xor_i32(dest, src, DREG(insn, 9));
3207     gen_logic_cc(s, dest, opsize);
3208     DEST_EA(env, insn, opsize, dest, &addr);
3209 }
3210 
3211 static void do_exg(TCGv reg1, TCGv reg2)
3212 {
3213     TCGv temp = tcg_temp_new();
3214     tcg_gen_mov_i32(temp, reg1);
3215     tcg_gen_mov_i32(reg1, reg2);
3216     tcg_gen_mov_i32(reg2, temp);
3217 }
3218 
3219 DISAS_INSN(exg_dd)
3220 {
3221     /* exchange Dx and Dy */
3222     do_exg(DREG(insn, 9), DREG(insn, 0));
3223 }
3224 
3225 DISAS_INSN(exg_aa)
3226 {
3227     /* exchange Ax and Ay */
3228     do_exg(AREG(insn, 9), AREG(insn, 0));
3229 }
3230 
3231 DISAS_INSN(exg_da)
3232 {
3233     /* exchange Dx and Ay */
3234     do_exg(DREG(insn, 9), AREG(insn, 0));
3235 }
3236 
3237 DISAS_INSN(and)
3238 {
3239     TCGv src;
3240     TCGv reg;
3241     TCGv dest;
3242     TCGv addr;
3243     int opsize;
3244 
3245     dest = tcg_temp_new();
3246 
3247     opsize = insn_opsize(insn);
3248     reg = DREG(insn, 9);
3249     if (insn & 0x100) {
3250         SRC_EA(env, src, opsize, 0, &addr);
3251         tcg_gen_and_i32(dest, src, reg);
3252         DEST_EA(env, insn, opsize, dest, &addr);
3253     } else {
3254         SRC_EA(env, src, opsize, 0, NULL);
3255         tcg_gen_and_i32(dest, src, reg);
3256         gen_partset_reg(opsize, reg, dest);
3257     }
3258     gen_logic_cc(s, dest, opsize);
3259 }
3260 
3261 DISAS_INSN(adda)
3262 {
3263     TCGv src;
3264     TCGv reg;
3265 
3266     SRC_EA(env, src, (insn & 0x100) ? OS_LONG : OS_WORD, 1, NULL);
3267     reg = AREG(insn, 9);
3268     tcg_gen_add_i32(reg, reg, src);
3269 }
3270 
3271 static inline void gen_addx(DisasContext *s, TCGv src, TCGv dest, int opsize)
3272 {
3273     TCGv tmp, zero;
3274 
3275     gen_flush_flags(s); /* compute old Z */
3276 
3277     /*
3278      * Perform addition with carry.
3279      * (X, N) = src + dest + X;
3280      */
3281 
3282     zero = tcg_constant_i32(0);
3283     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_X, zero, dest, zero);
3284     tcg_gen_add2_i32(QREG_CC_N, QREG_CC_X, QREG_CC_N, QREG_CC_X, src, zero);
3285     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3286 
3287     /* Compute signed-overflow for addition.  */
3288 
3289     tmp = tcg_temp_new();
3290     tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3291     tcg_gen_xor_i32(tmp, dest, src);
3292     tcg_gen_andc_i32(QREG_CC_V, QREG_CC_V, tmp);
3293 
3294     /* Copy the rest of the results into place.  */
3295     tcg_gen_or_i32(QREG_CC_Z, QREG_CC_Z, QREG_CC_N); /* !Z is sticky */
3296     tcg_gen_mov_i32(QREG_CC_C, QREG_CC_X);
3297 
3298     set_cc_op(s, CC_OP_FLAGS);
3299 
3300     /* result is in QREG_CC_N */
3301 }
3302 
3303 DISAS_INSN(addx_reg)
3304 {
3305     TCGv dest;
3306     TCGv src;
3307     int opsize;
3308 
3309     opsize = insn_opsize(insn);
3310 
3311     dest = gen_extend(s, DREG(insn, 9), opsize, 1);
3312     src = gen_extend(s, DREG(insn, 0), opsize, 1);
3313 
3314     gen_addx(s, src, dest, opsize);
3315 
3316     gen_partset_reg(opsize, DREG(insn, 9), QREG_CC_N);
3317 }
3318 
3319 DISAS_INSN(addx_mem)
3320 {
3321     TCGv src;
3322     TCGv addr_src;
3323     TCGv dest;
3324     TCGv addr_dest;
3325     int opsize;
3326 
3327     opsize = insn_opsize(insn);
3328 
3329     addr_src = AREG(insn, 0);
3330     tcg_gen_subi_i32(addr_src, addr_src, opsize_bytes(opsize));
3331     src = gen_load(s, opsize, addr_src, 1, IS_USER(s));
3332 
3333     addr_dest = AREG(insn, 9);
3334     tcg_gen_subi_i32(addr_dest, addr_dest, opsize_bytes(opsize));
3335     dest = gen_load(s, opsize, addr_dest, 1, IS_USER(s));
3336 
3337     gen_addx(s, src, dest, opsize);
3338 
3339     gen_store(s, opsize, addr_dest, QREG_CC_N, IS_USER(s));
3340 }
3341 
3342 static inline void shift_im(DisasContext *s, uint16_t insn, int opsize)
3343 {
3344     int count = (insn >> 9) & 7;
3345     int logical = insn & 8;
3346     int left = insn & 0x100;
3347     int bits = opsize_bytes(opsize) * 8;
3348     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3349 
3350     if (count == 0) {
3351         count = 8;
3352     }
3353 
3354     tcg_gen_movi_i32(QREG_CC_V, 0);
3355     if (left) {
3356         tcg_gen_shri_i32(QREG_CC_C, reg, bits - count);
3357         tcg_gen_shli_i32(QREG_CC_N, reg, count);
3358 
3359         /*
3360          * Note that ColdFire always clears V (done above),
3361          * while M68000 sets if the most significant bit is changed at
3362          * any time during the shift operation.
3363          */
3364         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3365             /* if shift count >= bits, V is (reg != 0) */
3366             if (count >= bits) {
3367                 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, reg, QREG_CC_V);
3368             } else {
3369                 TCGv t0 = tcg_temp_new();
3370                 tcg_gen_sari_i32(QREG_CC_V, reg, bits - 1);
3371                 tcg_gen_sari_i32(t0, reg, bits - count - 1);
3372                 tcg_gen_negsetcond_i32(TCG_COND_NE, QREG_CC_V, QREG_CC_V, t0);
3373             }
3374         }
3375     } else {
3376         tcg_gen_shri_i32(QREG_CC_C, reg, count - 1);
3377         if (logical) {
3378             tcg_gen_shri_i32(QREG_CC_N, reg, count);
3379         } else {
3380             tcg_gen_sari_i32(QREG_CC_N, reg, count);
3381         }
3382     }
3383 
3384     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3385     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3386     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3387     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3388 
3389     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3390     set_cc_op(s, CC_OP_FLAGS);
3391 }
3392 
3393 static inline void shift_reg(DisasContext *s, uint16_t insn, int opsize)
3394 {
3395     int logical = insn & 8;
3396     int left = insn & 0x100;
3397     int bits = opsize_bytes(opsize) * 8;
3398     TCGv reg = gen_extend(s, DREG(insn, 0), opsize, !logical);
3399     TCGv s32;
3400     TCGv_i64 t64, s64;
3401 
3402     t64 = tcg_temp_new_i64();
3403     s64 = tcg_temp_new_i64();
3404     s32 = tcg_temp_new();
3405 
3406     /*
3407      * Note that m68k truncates the shift count modulo 64, not 32.
3408      * In addition, a 64-bit shift makes it easy to find "the last
3409      * bit shifted out", for the carry flag.
3410      */
3411     tcg_gen_andi_i32(s32, DREG(insn, 9), 63);
3412     tcg_gen_extu_i32_i64(s64, s32);
3413     tcg_gen_extu_i32_i64(t64, reg);
3414 
3415     /* Optimistically set V=0.  Also used as a zero source below.  */
3416     tcg_gen_movi_i32(QREG_CC_V, 0);
3417     if (left) {
3418         tcg_gen_shl_i64(t64, t64, s64);
3419 
3420         if (opsize == OS_LONG) {
3421             tcg_gen_extr_i64_i32(QREG_CC_N, QREG_CC_C, t64);
3422             /* Note that C=0 if shift count is 0, and we get that for free.  */
3423         } else {
3424             TCGv zero = tcg_constant_i32(0);
3425             tcg_gen_extrl_i64_i32(QREG_CC_N, t64);
3426             tcg_gen_shri_i32(QREG_CC_C, QREG_CC_N, bits);
3427             tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3428                                 s32, zero, zero, QREG_CC_C);
3429         }
3430         tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3431 
3432         /* X = C, but only if the shift count was non-zero.  */
3433         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3434                             QREG_CC_C, QREG_CC_X);
3435 
3436         /*
3437          * M68000 sets V if the most significant bit is changed at
3438          * any time during the shift operation.  Do this via creating
3439          * an extension of the sign bit, comparing, and discarding
3440          * the bits below the sign bit.  I.e.
3441          *     int64_t s = (intN_t)reg;
3442          *     int64_t t = (int64_t)(intN_t)reg << count;
3443          *     V = ((s ^ t) & (-1 << (bits - 1))) != 0
3444          */
3445         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3446             TCGv_i64 tt = tcg_constant_i64(32);
3447             /* if shift is greater than 32, use 32 */
3448             tcg_gen_movcond_i64(TCG_COND_GT, s64, s64, tt, tt, s64);
3449             /* Sign extend the input to 64 bits; re-do the shift.  */
3450             tcg_gen_ext_i32_i64(t64, reg);
3451             tcg_gen_shl_i64(s64, t64, s64);
3452             /* Clear all bits that are unchanged.  */
3453             tcg_gen_xor_i64(t64, t64, s64);
3454             /* Ignore the bits below the sign bit.  */
3455             tcg_gen_andi_i64(t64, t64, -1ULL << (bits - 1));
3456             /* If any bits remain set, we have overflow.  */
3457             tcg_gen_negsetcond_i64(TCG_COND_NE, t64, t64, tcg_constant_i64(0));
3458             tcg_gen_extrl_i64_i32(QREG_CC_V, t64);
3459         }
3460     } else {
3461         tcg_gen_shli_i64(t64, t64, 32);
3462         if (logical) {
3463             tcg_gen_shr_i64(t64, t64, s64);
3464         } else {
3465             tcg_gen_sar_i64(t64, t64, s64);
3466         }
3467         tcg_gen_extr_i64_i32(QREG_CC_C, QREG_CC_N, t64);
3468 
3469         /* Note that C=0 if shift count is 0, and we get that for free.  */
3470         tcg_gen_shri_i32(QREG_CC_C, QREG_CC_C, 31);
3471 
3472         /* X = C, but only if the shift count was non-zero.  */
3473         tcg_gen_movcond_i32(TCG_COND_NE, QREG_CC_X, s32, QREG_CC_V,
3474                             QREG_CC_C, QREG_CC_X);
3475     }
3476     gen_ext(QREG_CC_N, QREG_CC_N, opsize, 1);
3477     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3478 
3479     /* Write back the result.  */
3480     gen_partset_reg(opsize, DREG(insn, 0), QREG_CC_N);
3481     set_cc_op(s, CC_OP_FLAGS);
3482 }
3483 
3484 DISAS_INSN(shift8_im)
3485 {
3486     shift_im(s, insn, OS_BYTE);
3487 }
3488 
3489 DISAS_INSN(shift16_im)
3490 {
3491     shift_im(s, insn, OS_WORD);
3492 }
3493 
3494 DISAS_INSN(shift_im)
3495 {
3496     shift_im(s, insn, OS_LONG);
3497 }
3498 
3499 DISAS_INSN(shift8_reg)
3500 {
3501     shift_reg(s, insn, OS_BYTE);
3502 }
3503 
3504 DISAS_INSN(shift16_reg)
3505 {
3506     shift_reg(s, insn, OS_WORD);
3507 }
3508 
3509 DISAS_INSN(shift_reg)
3510 {
3511     shift_reg(s, insn, OS_LONG);
3512 }
3513 
3514 DISAS_INSN(shift_mem)
3515 {
3516     int logical = insn & 8;
3517     int left = insn & 0x100;
3518     TCGv src;
3519     TCGv addr;
3520 
3521     SRC_EA(env, src, OS_WORD, !logical, &addr);
3522     tcg_gen_movi_i32(QREG_CC_V, 0);
3523     if (left) {
3524         tcg_gen_shri_i32(QREG_CC_C, src, 15);
3525         tcg_gen_shli_i32(QREG_CC_N, src, 1);
3526 
3527         /*
3528          * Note that ColdFire always clears V,
3529          * while M68000 sets if the most significant bit is changed at
3530          * any time during the shift operation
3531          */
3532         if (!logical && m68k_feature(s->env, M68K_FEATURE_M68K)) {
3533             src = gen_extend(s, src, OS_WORD, 1);
3534             tcg_gen_xor_i32(QREG_CC_V, QREG_CC_N, src);
3535         }
3536     } else {
3537         tcg_gen_mov_i32(QREG_CC_C, src);
3538         if (logical) {
3539             tcg_gen_shri_i32(QREG_CC_N, src, 1);
3540         } else {
3541             tcg_gen_sari_i32(QREG_CC_N, src, 1);
3542         }
3543     }
3544 
3545     gen_ext(QREG_CC_N, QREG_CC_N, OS_WORD, 1);
3546     tcg_gen_andi_i32(QREG_CC_C, QREG_CC_C, 1);
3547     tcg_gen_mov_i32(QREG_CC_Z, QREG_CC_N);
3548     tcg_gen_mov_i32(QREG_CC_X, QREG_CC_C);
3549 
3550     DEST_EA(env, insn, OS_WORD, QREG_CC_N, &addr);
3551     set_cc_op(s, CC_OP_FLAGS);
3552 }
3553 
3554 static void rotate(TCGv reg, TCGv shift, int left, int size)
3555 {
3556     switch (size) {
3557     case 8:
3558         /* Replicate the 8-bit input so that a 32-bit rotate works.  */
3559         tcg_gen_ext8u_i32(reg, reg);
3560         tcg_gen_muli_i32(reg, reg, 0x01010101);
3561         goto do_long;
3562     case 16:
3563         /* Replicate the 16-bit input so that a 32-bit rotate works.  */
3564         tcg_gen_deposit_i32(reg, reg, reg, 16, 16);
3565         goto do_long;
3566     do_long:
3567     default:
3568         if (left) {
3569             tcg_gen_rotl_i32(reg, reg, shift);
3570         } else {
3571             tcg_gen_rotr_i32(reg, reg, shift);
3572         }
3573     }
3574 
3575     /* compute flags */
3576 
3577     switch (size) {
3578     case 8:
3579         tcg_gen_ext8s_i32(reg, reg);
3580         break;
3581     case 16:
3582         tcg_gen_ext16s_i32(reg, reg);
3583         break;
3584     default:
3585         break;
3586     }
3587 
3588     /* QREG_CC_X is not affected */
3589 
3590     tcg_gen_mov_i32(QREG_CC_N, reg);
3591     tcg_gen_mov_i32(QREG_CC_Z, reg);
3592 
3593     if (left) {
3594         tcg_gen_andi_i32(QREG_CC_C, reg, 1);
3595     } else {
3596         tcg_gen_shri_i32(QREG_CC_C, reg, 31);
3597     }
3598 
3599     tcg_gen_movi_i32(QREG_CC_V, 0); /* always cleared */
3600 }
3601 
3602 static void rotate_x_flags(TCGv reg, TCGv X, int size)
3603 {
3604     switch (size) {
3605     case 8:
3606         tcg_gen_ext8s_i32(reg, reg);
3607         break;
3608     case 16:
3609         tcg_gen_ext16s_i32(reg, reg);
3610         break;
3611     default:
3612         break;
3613     }
3614     tcg_gen_mov_i32(QREG_CC_N, reg);
3615     tcg_gen_mov_i32(QREG_CC_Z, reg);
3616     tcg_gen_mov_i32(QREG_CC_X, X);
3617     tcg_gen_mov_i32(QREG_CC_C, X);
3618     tcg_gen_movi_i32(QREG_CC_V, 0);
3619 }
3620 
3621 /* Result of rotate_x() is valid if 0 <= shift <= size */
3622 static TCGv rotate_x(TCGv reg, TCGv shift, int left, int size)
3623 {
3624     TCGv X, shl, shr, shx, sz, zero;
3625 
3626     sz = tcg_constant_i32(size);
3627 
3628     shr = tcg_temp_new();
3629     shl = tcg_temp_new();
3630     shx = tcg_temp_new();
3631     if (left) {
3632         tcg_gen_mov_i32(shl, shift);      /* shl = shift */
3633         tcg_gen_movi_i32(shr, size + 1);
3634         tcg_gen_sub_i32(shr, shr, shift); /* shr = size + 1 - shift */
3635         tcg_gen_subi_i32(shx, shift, 1);  /* shx = shift - 1 */
3636         /* shx = shx < 0 ? size : shx; */
3637         zero = tcg_constant_i32(0);
3638         tcg_gen_movcond_i32(TCG_COND_LT, shx, shx, zero, sz, shx);
3639     } else {
3640         tcg_gen_mov_i32(shr, shift);      /* shr = shift */
3641         tcg_gen_movi_i32(shl, size + 1);
3642         tcg_gen_sub_i32(shl, shl, shift); /* shl = size + 1 - shift */
3643         tcg_gen_sub_i32(shx, sz, shift); /* shx = size - shift */
3644     }
3645 
3646     /* reg = (reg << shl) | (reg >> shr) | (x << shx); */
3647 
3648     tcg_gen_shl_i32(shl, reg, shl);
3649     tcg_gen_shr_i32(shr, reg, shr);
3650     tcg_gen_or_i32(reg, shl, shr);
3651     tcg_gen_shl_i32(shx, QREG_CC_X, shx);
3652     tcg_gen_or_i32(reg, reg, shx);
3653 
3654     /* X = (reg >> size) & 1 */
3655 
3656     X = tcg_temp_new();
3657     tcg_gen_extract_i32(X, reg, size, 1);
3658 
3659     return X;
3660 }
3661 
3662 /* Result of rotate32_x() is valid if 0 <= shift < 33 */
3663 static TCGv rotate32_x(TCGv reg, TCGv shift, int left)
3664 {
3665     TCGv_i64 t0, shift64;
3666     TCGv X, lo, hi, zero;
3667 
3668     shift64 = tcg_temp_new_i64();
3669     tcg_gen_extu_i32_i64(shift64, shift);
3670 
3671     t0 = tcg_temp_new_i64();
3672 
3673     X = tcg_temp_new();
3674     lo = tcg_temp_new();
3675     hi = tcg_temp_new();
3676 
3677     if (left) {
3678         /* create [reg:X:..] */
3679 
3680         tcg_gen_shli_i32(lo, QREG_CC_X, 31);
3681         tcg_gen_concat_i32_i64(t0, lo, reg);
3682 
3683         /* rotate */
3684 
3685         tcg_gen_rotl_i64(t0, t0, shift64);
3686 
3687         /* result is [reg:..:reg:X] */
3688 
3689         tcg_gen_extr_i64_i32(lo, hi, t0);
3690         tcg_gen_andi_i32(X, lo, 1);
3691 
3692         tcg_gen_shri_i32(lo, lo, 1);
3693     } else {
3694         /* create [..:X:reg] */
3695 
3696         tcg_gen_concat_i32_i64(t0, reg, QREG_CC_X);
3697 
3698         tcg_gen_rotr_i64(t0, t0, shift64);
3699 
3700         /* result is value: [X:reg:..:reg] */
3701 
3702         tcg_gen_extr_i64_i32(lo, hi, t0);
3703 
3704         /* extract X */
3705 
3706         tcg_gen_shri_i32(X, hi, 31);
3707 
3708         /* extract result */
3709 
3710         tcg_gen_shli_i32(hi, hi, 1);
3711     }
3712     tcg_gen_or_i32(lo, lo, hi);
3713 
3714     /* if shift == 0, register and X are not affected */
3715 
3716     zero = tcg_constant_i32(0);
3717     tcg_gen_movcond_i32(TCG_COND_EQ, X, shift, zero, QREG_CC_X, X);
3718     tcg_gen_movcond_i32(TCG_COND_EQ, reg, shift, zero, reg, lo);
3719 
3720     return X;
3721 }
3722 
3723 DISAS_INSN(rotate_im)
3724 {
3725     TCGv shift;
3726     int tmp;
3727     int left = (insn & 0x100);
3728 
3729     tmp = (insn >> 9) & 7;
3730     if (tmp == 0) {
3731         tmp = 8;
3732     }
3733 
3734     shift = tcg_constant_i32(tmp);
3735     if (insn & 8) {
3736         rotate(DREG(insn, 0), shift, left, 32);
3737     } else {
3738         TCGv X = rotate32_x(DREG(insn, 0), shift, left);
3739         rotate_x_flags(DREG(insn, 0), X, 32);
3740     }
3741 
3742     set_cc_op(s, CC_OP_FLAGS);
3743 }
3744 
3745 DISAS_INSN(rotate8_im)
3746 {
3747     int left = (insn & 0x100);
3748     TCGv reg;
3749     TCGv shift;
3750     int tmp;
3751 
3752     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3753 
3754     tmp = (insn >> 9) & 7;
3755     if (tmp == 0) {
3756         tmp = 8;
3757     }
3758 
3759     shift = tcg_constant_i32(tmp);
3760     if (insn & 8) {
3761         rotate(reg, shift, left, 8);
3762     } else {
3763         TCGv X = rotate_x(reg, shift, left, 8);
3764         rotate_x_flags(reg, X, 8);
3765     }
3766     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3767     set_cc_op(s, CC_OP_FLAGS);
3768 }
3769 
3770 DISAS_INSN(rotate16_im)
3771 {
3772     int left = (insn & 0x100);
3773     TCGv reg;
3774     TCGv shift;
3775     int tmp;
3776 
3777     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3778     tmp = (insn >> 9) & 7;
3779     if (tmp == 0) {
3780         tmp = 8;
3781     }
3782 
3783     shift = tcg_constant_i32(tmp);
3784     if (insn & 8) {
3785         rotate(reg, shift, left, 16);
3786     } else {
3787         TCGv X = rotate_x(reg, shift, left, 16);
3788         rotate_x_flags(reg, X, 16);
3789     }
3790     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3791     set_cc_op(s, CC_OP_FLAGS);
3792 }
3793 
3794 DISAS_INSN(rotate_reg)
3795 {
3796     TCGv reg;
3797     TCGv src;
3798     TCGv t0, t1;
3799     int left = (insn & 0x100);
3800 
3801     reg = DREG(insn, 0);
3802     src = DREG(insn, 9);
3803     /* shift in [0..63] */
3804     t0 = tcg_temp_new();
3805     tcg_gen_andi_i32(t0, src, 63);
3806     t1 = tcg_temp_new_i32();
3807     if (insn & 8) {
3808         tcg_gen_andi_i32(t1, src, 31);
3809         rotate(reg, t1, left, 32);
3810         /* if shift == 0, clear C */
3811         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3812                             t0, QREG_CC_V /* 0 */,
3813                             QREG_CC_V /* 0 */, QREG_CC_C);
3814     } else {
3815         TCGv X;
3816         /* modulo 33 */
3817         tcg_gen_movi_i32(t1, 33);
3818         tcg_gen_remu_i32(t1, t0, t1);
3819         X = rotate32_x(DREG(insn, 0), t1, left);
3820         rotate_x_flags(DREG(insn, 0), X, 32);
3821     }
3822     set_cc_op(s, CC_OP_FLAGS);
3823 }
3824 
3825 DISAS_INSN(rotate8_reg)
3826 {
3827     TCGv reg;
3828     TCGv src;
3829     TCGv t0, t1;
3830     int left = (insn & 0x100);
3831 
3832     reg = gen_extend(s, DREG(insn, 0), OS_BYTE, 0);
3833     src = DREG(insn, 9);
3834     /* shift in [0..63] */
3835     t0 = tcg_temp_new_i32();
3836     tcg_gen_andi_i32(t0, src, 63);
3837     t1 = tcg_temp_new_i32();
3838     if (insn & 8) {
3839         tcg_gen_andi_i32(t1, src, 7);
3840         rotate(reg, t1, left, 8);
3841         /* if shift == 0, clear C */
3842         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3843                             t0, QREG_CC_V /* 0 */,
3844                             QREG_CC_V /* 0 */, QREG_CC_C);
3845     } else {
3846         TCGv X;
3847         /* modulo 9 */
3848         tcg_gen_movi_i32(t1, 9);
3849         tcg_gen_remu_i32(t1, t0, t1);
3850         X = rotate_x(reg, t1, left, 8);
3851         rotate_x_flags(reg, X, 8);
3852     }
3853     gen_partset_reg(OS_BYTE, DREG(insn, 0), reg);
3854     set_cc_op(s, CC_OP_FLAGS);
3855 }
3856 
3857 DISAS_INSN(rotate16_reg)
3858 {
3859     TCGv reg;
3860     TCGv src;
3861     TCGv t0, t1;
3862     int left = (insn & 0x100);
3863 
3864     reg = gen_extend(s, DREG(insn, 0), OS_WORD, 0);
3865     src = DREG(insn, 9);
3866     /* shift in [0..63] */
3867     t0 = tcg_temp_new_i32();
3868     tcg_gen_andi_i32(t0, src, 63);
3869     t1 = tcg_temp_new_i32();
3870     if (insn & 8) {
3871         tcg_gen_andi_i32(t1, src, 15);
3872         rotate(reg, t1, left, 16);
3873         /* if shift == 0, clear C */
3874         tcg_gen_movcond_i32(TCG_COND_EQ, QREG_CC_C,
3875                             t0, QREG_CC_V /* 0 */,
3876                             QREG_CC_V /* 0 */, QREG_CC_C);
3877     } else {
3878         TCGv X;
3879         /* modulo 17 */
3880         tcg_gen_movi_i32(t1, 17);
3881         tcg_gen_remu_i32(t1, t0, t1);
3882         X = rotate_x(reg, t1, left, 16);
3883         rotate_x_flags(reg, X, 16);
3884     }
3885     gen_partset_reg(OS_WORD, DREG(insn, 0), reg);
3886     set_cc_op(s, CC_OP_FLAGS);
3887 }
3888 
3889 DISAS_INSN(rotate_mem)
3890 {
3891     TCGv src;
3892     TCGv addr;
3893     TCGv shift;
3894     int left = (insn & 0x100);
3895 
3896     SRC_EA(env, src, OS_WORD, 0, &addr);
3897 
3898     shift = tcg_constant_i32(1);
3899     if (insn & 0x0200) {
3900         rotate(src, shift, left, 16);
3901     } else {
3902         TCGv X = rotate_x(src, shift, left, 16);
3903         rotate_x_flags(src, X, 16);
3904     }
3905     DEST_EA(env, insn, OS_WORD, src, &addr);
3906     set_cc_op(s, CC_OP_FLAGS);
3907 }
3908 
3909 DISAS_INSN(bfext_reg)
3910 {
3911     int ext = read_im16(env, s);
3912     int is_sign = insn & 0x200;
3913     TCGv src = DREG(insn, 0);
3914     TCGv dst = DREG(ext, 12);
3915     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
3916     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
3917     int pos = 32 - ofs - len;        /* little bit-endian */
3918     TCGv tmp = tcg_temp_new();
3919     TCGv shift;
3920 
3921     /*
3922      * In general, we're going to rotate the field so that it's at the
3923      * top of the word and then right-shift by the complement of the
3924      * width to extend the field.
3925      */
3926     if (ext & 0x20) {
3927         /* Variable width.  */
3928         if (ext & 0x800) {
3929             /* Variable offset.  */
3930             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3931             tcg_gen_rotl_i32(tmp, src, tmp);
3932         } else {
3933             tcg_gen_rotli_i32(tmp, src, ofs);
3934         }
3935 
3936         shift = tcg_temp_new();
3937         tcg_gen_neg_i32(shift, DREG(ext, 0));
3938         tcg_gen_andi_i32(shift, shift, 31);
3939         tcg_gen_sar_i32(QREG_CC_N, tmp, shift);
3940         if (is_sign) {
3941             tcg_gen_mov_i32(dst, QREG_CC_N);
3942         } else {
3943             tcg_gen_shr_i32(dst, tmp, shift);
3944         }
3945     } else {
3946         /* Immediate width.  */
3947         if (ext & 0x800) {
3948             /* Variable offset */
3949             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
3950             tcg_gen_rotl_i32(tmp, src, tmp);
3951             src = tmp;
3952             pos = 32 - len;
3953         } else {
3954             /*
3955              * Immediate offset.  If the field doesn't wrap around the
3956              * end of the word, rely on (s)extract completely.
3957              */
3958             if (pos < 0) {
3959                 tcg_gen_rotli_i32(tmp, src, ofs);
3960                 src = tmp;
3961                 pos = 32 - len;
3962             }
3963         }
3964 
3965         tcg_gen_sextract_i32(QREG_CC_N, src, pos, len);
3966         if (is_sign) {
3967             tcg_gen_mov_i32(dst, QREG_CC_N);
3968         } else {
3969             tcg_gen_extract_i32(dst, src, pos, len);
3970         }
3971     }
3972 
3973     set_cc_op(s, CC_OP_LOGIC);
3974 }
3975 
3976 DISAS_INSN(bfext_mem)
3977 {
3978     int ext = read_im16(env, s);
3979     int is_sign = insn & 0x200;
3980     TCGv dest = DREG(ext, 12);
3981     TCGv addr, len, ofs;
3982 
3983     addr = gen_lea(env, s, insn, OS_UNSIZED);
3984     if (IS_NULL_QREG(addr)) {
3985         gen_addr_fault(s);
3986         return;
3987     }
3988 
3989     if (ext & 0x20) {
3990         len = DREG(ext, 0);
3991     } else {
3992         len = tcg_constant_i32(extract32(ext, 0, 5));
3993     }
3994     if (ext & 0x800) {
3995         ofs = DREG(ext, 6);
3996     } else {
3997         ofs = tcg_constant_i32(extract32(ext, 6, 5));
3998     }
3999 
4000     if (is_sign) {
4001         gen_helper_bfexts_mem(dest, tcg_env, addr, ofs, len);
4002         tcg_gen_mov_i32(QREG_CC_N, dest);
4003     } else {
4004         TCGv_i64 tmp = tcg_temp_new_i64();
4005         gen_helper_bfextu_mem(tmp, tcg_env, addr, ofs, len);
4006         tcg_gen_extr_i64_i32(dest, QREG_CC_N, tmp);
4007     }
4008     set_cc_op(s, CC_OP_LOGIC);
4009 }
4010 
4011 DISAS_INSN(bfop_reg)
4012 {
4013     int ext = read_im16(env, s);
4014     TCGv src = DREG(insn, 0);
4015     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4016     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4017     TCGv mask, tofs = NULL, tlen = NULL;
4018     bool is_bfffo = (insn & 0x0f00) == 0x0d00;
4019 
4020     if ((ext & 0x820) == 0) {
4021         /* Immediate width and offset.  */
4022         uint32_t maski = 0x7fffffffu >> (len - 1);
4023         if (ofs + len <= 32) {
4024             tcg_gen_shli_i32(QREG_CC_N, src, ofs);
4025         } else {
4026             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4027         }
4028         tcg_gen_andi_i32(QREG_CC_N, QREG_CC_N, ~maski);
4029 
4030         mask = tcg_constant_i32(ror32(maski, ofs));
4031         if (is_bfffo) {
4032             tofs = tcg_constant_i32(ofs);
4033             tlen = tcg_constant_i32(len);
4034         }
4035     } else {
4036         TCGv tmp = tcg_temp_new();
4037 
4038         mask = tcg_temp_new();
4039         if (ext & 0x20) {
4040             /* Variable width */
4041             tcg_gen_subi_i32(tmp, DREG(ext, 0), 1);
4042             tcg_gen_andi_i32(tmp, tmp, 31);
4043             tcg_gen_shr_i32(mask, tcg_constant_i32(0x7fffffffu), tmp);
4044             if (is_bfffo) {
4045                 tlen = tcg_temp_new();
4046                 tcg_gen_addi_i32(tlen, tmp, 1);
4047             }
4048         } else {
4049             /* Immediate width */
4050             tcg_gen_movi_i32(mask, 0x7fffffffu >> (len - 1));
4051             if (is_bfffo) {
4052                 tlen = tcg_constant_i32(len);
4053             }
4054         }
4055 
4056         if (ext & 0x800) {
4057             /* Variable offset */
4058             tcg_gen_andi_i32(tmp, DREG(ext, 6), 31);
4059             tcg_gen_rotl_i32(QREG_CC_N, src, tmp);
4060             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4061             tcg_gen_rotr_i32(mask, mask, tmp);
4062             if (is_bfffo) {
4063                 tofs = tmp;
4064             }
4065         } else {
4066             /* Immediate offset (and variable width) */
4067             tcg_gen_rotli_i32(QREG_CC_N, src, ofs);
4068             tcg_gen_andc_i32(QREG_CC_N, QREG_CC_N, mask);
4069             tcg_gen_rotri_i32(mask, mask, ofs);
4070             if (is_bfffo) {
4071                 tofs = tcg_constant_i32(ofs);
4072             }
4073         }
4074     }
4075     set_cc_op(s, CC_OP_LOGIC);
4076 
4077     switch (insn & 0x0f00) {
4078     case 0x0a00: /* bfchg */
4079         tcg_gen_eqv_i32(src, src, mask);
4080         break;
4081     case 0x0c00: /* bfclr */
4082         tcg_gen_and_i32(src, src, mask);
4083         break;
4084     case 0x0d00: /* bfffo */
4085         gen_helper_bfffo_reg(DREG(ext, 12), QREG_CC_N, tofs, tlen);
4086         break;
4087     case 0x0e00: /* bfset */
4088         tcg_gen_orc_i32(src, src, mask);
4089         break;
4090     case 0x0800: /* bftst */
4091         /* flags already set; no other work to do.  */
4092         break;
4093     default:
4094         g_assert_not_reached();
4095     }
4096 }
4097 
4098 DISAS_INSN(bfop_mem)
4099 {
4100     int ext = read_im16(env, s);
4101     TCGv addr, len, ofs;
4102     TCGv_i64 t64;
4103 
4104     addr = gen_lea(env, s, insn, OS_UNSIZED);
4105     if (IS_NULL_QREG(addr)) {
4106         gen_addr_fault(s);
4107         return;
4108     }
4109 
4110     if (ext & 0x20) {
4111         len = DREG(ext, 0);
4112     } else {
4113         len = tcg_constant_i32(extract32(ext, 0, 5));
4114     }
4115     if (ext & 0x800) {
4116         ofs = DREG(ext, 6);
4117     } else {
4118         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4119     }
4120 
4121     switch (insn & 0x0f00) {
4122     case 0x0a00: /* bfchg */
4123         gen_helper_bfchg_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4124         break;
4125     case 0x0c00: /* bfclr */
4126         gen_helper_bfclr_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4127         break;
4128     case 0x0d00: /* bfffo */
4129         t64 = tcg_temp_new_i64();
4130         gen_helper_bfffo_mem(t64, tcg_env, addr, ofs, len);
4131         tcg_gen_extr_i64_i32(DREG(ext, 12), QREG_CC_N, t64);
4132         break;
4133     case 0x0e00: /* bfset */
4134         gen_helper_bfset_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4135         break;
4136     case 0x0800: /* bftst */
4137         gen_helper_bfexts_mem(QREG_CC_N, tcg_env, addr, ofs, len);
4138         break;
4139     default:
4140         g_assert_not_reached();
4141     }
4142     set_cc_op(s, CC_OP_LOGIC);
4143 }
4144 
4145 DISAS_INSN(bfins_reg)
4146 {
4147     int ext = read_im16(env, s);
4148     TCGv dst = DREG(insn, 0);
4149     TCGv src = DREG(ext, 12);
4150     int len = ((extract32(ext, 0, 5) - 1) & 31) + 1;
4151     int ofs = extract32(ext, 6, 5);  /* big bit-endian */
4152     int pos = 32 - ofs - len;        /* little bit-endian */
4153     TCGv tmp;
4154 
4155     tmp = tcg_temp_new();
4156 
4157     if (ext & 0x20) {
4158         /* Variable width */
4159         tcg_gen_neg_i32(tmp, DREG(ext, 0));
4160         tcg_gen_andi_i32(tmp, tmp, 31);
4161         tcg_gen_shl_i32(QREG_CC_N, src, tmp);
4162     } else {
4163         /* Immediate width */
4164         tcg_gen_shli_i32(QREG_CC_N, src, 32 - len);
4165     }
4166     set_cc_op(s, CC_OP_LOGIC);
4167 
4168     /* Immediate width and offset */
4169     if ((ext & 0x820) == 0) {
4170         /* Check for suitability for deposit.  */
4171         if (pos >= 0) {
4172             tcg_gen_deposit_i32(dst, dst, src, pos, len);
4173         } else {
4174             uint32_t maski = -2U << (len - 1);
4175             uint32_t roti = (ofs + len) & 31;
4176             tcg_gen_andi_i32(tmp, src, ~maski);
4177             tcg_gen_rotri_i32(tmp, tmp, roti);
4178             tcg_gen_andi_i32(dst, dst, ror32(maski, roti));
4179             tcg_gen_or_i32(dst, dst, tmp);
4180         }
4181     } else {
4182         TCGv mask = tcg_temp_new();
4183         TCGv rot = tcg_temp_new();
4184 
4185         if (ext & 0x20) {
4186             /* Variable width */
4187             tcg_gen_subi_i32(rot, DREG(ext, 0), 1);
4188             tcg_gen_andi_i32(rot, rot, 31);
4189             tcg_gen_movi_i32(mask, -2);
4190             tcg_gen_shl_i32(mask, mask, rot);
4191             tcg_gen_mov_i32(rot, DREG(ext, 0));
4192             tcg_gen_andc_i32(tmp, src, mask);
4193         } else {
4194             /* Immediate width (variable offset) */
4195             uint32_t maski = -2U << (len - 1);
4196             tcg_gen_andi_i32(tmp, src, ~maski);
4197             tcg_gen_movi_i32(mask, maski);
4198             tcg_gen_movi_i32(rot, len & 31);
4199         }
4200         if (ext & 0x800) {
4201             /* Variable offset */
4202             tcg_gen_add_i32(rot, rot, DREG(ext, 6));
4203         } else {
4204             /* Immediate offset (variable width) */
4205             tcg_gen_addi_i32(rot, rot, ofs);
4206         }
4207         tcg_gen_andi_i32(rot, rot, 31);
4208         tcg_gen_rotr_i32(mask, mask, rot);
4209         tcg_gen_rotr_i32(tmp, tmp, rot);
4210         tcg_gen_and_i32(dst, dst, mask);
4211         tcg_gen_or_i32(dst, dst, tmp);
4212     }
4213 }
4214 
4215 DISAS_INSN(bfins_mem)
4216 {
4217     int ext = read_im16(env, s);
4218     TCGv src = DREG(ext, 12);
4219     TCGv addr, len, ofs;
4220 
4221     addr = gen_lea(env, s, insn, OS_UNSIZED);
4222     if (IS_NULL_QREG(addr)) {
4223         gen_addr_fault(s);
4224         return;
4225     }
4226 
4227     if (ext & 0x20) {
4228         len = DREG(ext, 0);
4229     } else {
4230         len = tcg_constant_i32(extract32(ext, 0, 5));
4231     }
4232     if (ext & 0x800) {
4233         ofs = DREG(ext, 6);
4234     } else {
4235         ofs = tcg_constant_i32(extract32(ext, 6, 5));
4236     }
4237 
4238     gen_helper_bfins_mem(QREG_CC_N, tcg_env, addr, src, ofs, len);
4239     set_cc_op(s, CC_OP_LOGIC);
4240 }
4241 
4242 DISAS_INSN(ff1)
4243 {
4244     TCGv reg;
4245     reg = DREG(insn, 0);
4246     gen_logic_cc(s, reg, OS_LONG);
4247     gen_helper_ff1(reg, reg);
4248 }
4249 
4250 DISAS_INSN(chk)
4251 {
4252     TCGv src, reg;
4253     int opsize;
4254 
4255     switch ((insn >> 7) & 3) {
4256     case 3:
4257         opsize = OS_WORD;
4258         break;
4259     case 2:
4260         if (m68k_feature(env, M68K_FEATURE_CHK2)) {
4261             opsize = OS_LONG;
4262             break;
4263         }
4264         /* fallthru */
4265     default:
4266         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4267         return;
4268     }
4269     SRC_EA(env, src, opsize, 1, NULL);
4270     reg = gen_extend(s, DREG(insn, 9), opsize, 1);
4271 
4272     gen_flush_flags(s);
4273     gen_helper_chk(tcg_env, reg, src);
4274 }
4275 
4276 DISAS_INSN(chk2)
4277 {
4278     uint16_t ext;
4279     TCGv addr1, addr2, bound1, bound2, reg;
4280     int opsize;
4281 
4282     switch ((insn >> 9) & 3) {
4283     case 0:
4284         opsize = OS_BYTE;
4285         break;
4286     case 1:
4287         opsize = OS_WORD;
4288         break;
4289     case 2:
4290         opsize = OS_LONG;
4291         break;
4292     default:
4293         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4294         return;
4295     }
4296 
4297     ext = read_im16(env, s);
4298     if ((ext & 0x0800) == 0) {
4299         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4300         return;
4301     }
4302 
4303     addr1 = gen_lea(env, s, insn, OS_UNSIZED);
4304     addr2 = tcg_temp_new();
4305     tcg_gen_addi_i32(addr2, addr1, opsize_bytes(opsize));
4306 
4307     bound1 = gen_load(s, opsize, addr1, 1, IS_USER(s));
4308     bound2 = gen_load(s, opsize, addr2, 1, IS_USER(s));
4309 
4310     reg = tcg_temp_new();
4311     if (ext & 0x8000) {
4312         tcg_gen_mov_i32(reg, AREG(ext, 12));
4313     } else {
4314         gen_ext(reg, DREG(ext, 12), opsize, 1);
4315     }
4316 
4317     gen_flush_flags(s);
4318     gen_helper_chk2(tcg_env, reg, bound1, bound2);
4319 }
4320 
4321 static void m68k_copy_line(TCGv dst, TCGv src, int index)
4322 {
4323     TCGv addr;
4324     TCGv_i64 t0, t1;
4325 
4326     addr = tcg_temp_new();
4327 
4328     t0 = tcg_temp_new_i64();
4329     t1 = tcg_temp_new_i64();
4330 
4331     tcg_gen_andi_i32(addr, src, ~15);
4332     tcg_gen_qemu_ld_i64(t0, addr, index, MO_TEUQ);
4333     tcg_gen_addi_i32(addr, addr, 8);
4334     tcg_gen_qemu_ld_i64(t1, addr, index, MO_TEUQ);
4335 
4336     tcg_gen_andi_i32(addr, dst, ~15);
4337     tcg_gen_qemu_st_i64(t0, addr, index, MO_TEUQ);
4338     tcg_gen_addi_i32(addr, addr, 8);
4339     tcg_gen_qemu_st_i64(t1, addr, index, MO_TEUQ);
4340 }
4341 
4342 DISAS_INSN(move16_reg)
4343 {
4344     int index = IS_USER(s);
4345     TCGv tmp;
4346     uint16_t ext;
4347 
4348     ext = read_im16(env, s);
4349     if ((ext & (1 << 15)) == 0) {
4350         gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4351     }
4352 
4353     m68k_copy_line(AREG(ext, 12), AREG(insn, 0), index);
4354 
4355     /* Ax can be Ay, so save Ay before incrementing Ax */
4356     tmp = tcg_temp_new();
4357     tcg_gen_mov_i32(tmp, AREG(ext, 12));
4358     tcg_gen_addi_i32(AREG(insn, 0), AREG(insn, 0), 16);
4359     tcg_gen_addi_i32(AREG(ext, 12), tmp, 16);
4360 }
4361 
4362 DISAS_INSN(move16_mem)
4363 {
4364     int index = IS_USER(s);
4365     TCGv reg, addr;
4366 
4367     reg = AREG(insn, 0);
4368     addr = tcg_constant_i32(read_im32(env, s));
4369 
4370     if ((insn >> 3) & 1) {
4371         /* MOVE16 (xxx).L, (Ay) */
4372         m68k_copy_line(reg, addr, index);
4373     } else {
4374         /* MOVE16 (Ay), (xxx).L */
4375         m68k_copy_line(addr, reg, index);
4376     }
4377 
4378     if (((insn >> 3) & 2) == 0) {
4379         /* (Ay)+ */
4380         tcg_gen_addi_i32(reg, reg, 16);
4381     }
4382 }
4383 
4384 DISAS_INSN(strldsr)
4385 {
4386     uint16_t ext;
4387     uint32_t addr;
4388 
4389     addr = s->pc - 2;
4390     ext = read_im16(env, s);
4391     if (ext != 0x46FC) {
4392         gen_exception(s, addr, EXCP_ILLEGAL);
4393         return;
4394     }
4395     ext = read_im16(env, s);
4396     if (IS_USER(s) || (ext & SR_S) == 0) {
4397         gen_exception(s, addr, EXCP_PRIVILEGE);
4398         return;
4399     }
4400     gen_push(s, gen_get_sr(s));
4401     gen_set_sr_im(s, ext, 0);
4402     gen_exit_tb(s);
4403 }
4404 
4405 DISAS_INSN(move_from_sr)
4406 {
4407     TCGv sr;
4408 
4409     if (IS_USER(s) && m68k_feature(env, M68K_FEATURE_MOVEFROMSR_PRIV)) {
4410         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4411         return;
4412     }
4413     sr = gen_get_sr(s);
4414     DEST_EA(env, insn, OS_WORD, sr, NULL);
4415 }
4416 
4417 #if !defined(CONFIG_USER_ONLY)
4418 DISAS_INSN(moves)
4419 {
4420     int opsize;
4421     uint16_t ext;
4422     TCGv reg;
4423     TCGv addr;
4424     int extend;
4425 
4426     if (IS_USER(s)) {
4427         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4428         return;
4429     }
4430 
4431     ext = read_im16(env, s);
4432 
4433     opsize = insn_opsize(insn);
4434 
4435     if (ext & 0x8000) {
4436         /* address register */
4437         reg = AREG(ext, 12);
4438         extend = 1;
4439     } else {
4440         /* data register */
4441         reg = DREG(ext, 12);
4442         extend = 0;
4443     }
4444 
4445     addr = gen_lea(env, s, insn, opsize);
4446     if (IS_NULL_QREG(addr)) {
4447         gen_addr_fault(s);
4448         return;
4449     }
4450 
4451     if (ext & 0x0800) {
4452         /* from reg to ea */
4453         gen_store(s, opsize, addr, reg, DFC_INDEX(s));
4454     } else {
4455         /* from ea to reg */
4456         TCGv tmp = gen_load(s, opsize, addr, 0, SFC_INDEX(s));
4457         if (extend) {
4458             gen_ext(reg, tmp, opsize, 1);
4459         } else {
4460             gen_partset_reg(opsize, reg, tmp);
4461         }
4462     }
4463     switch (extract32(insn, 3, 3)) {
4464     case 3: /* Indirect postincrement.  */
4465         tcg_gen_addi_i32(AREG(insn, 0), addr,
4466                          REG(insn, 0) == 7 && opsize == OS_BYTE
4467                          ? 2
4468                          : opsize_bytes(opsize));
4469         break;
4470     case 4: /* Indirect predecrememnt.  */
4471         tcg_gen_mov_i32(AREG(insn, 0), addr);
4472         break;
4473     }
4474 }
4475 
4476 DISAS_INSN(move_to_sr)
4477 {
4478     if (IS_USER(s)) {
4479         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4480         return;
4481     }
4482     gen_move_to_sr(env, s, insn, false);
4483     gen_exit_tb(s);
4484 }
4485 
4486 DISAS_INSN(move_from_usp)
4487 {
4488     if (IS_USER(s)) {
4489         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4490         return;
4491     }
4492     tcg_gen_ld_i32(AREG(insn, 0), tcg_env,
4493                    offsetof(CPUM68KState, sp[M68K_USP]));
4494 }
4495 
4496 DISAS_INSN(move_to_usp)
4497 {
4498     if (IS_USER(s)) {
4499         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4500         return;
4501     }
4502     tcg_gen_st_i32(AREG(insn, 0), tcg_env,
4503                    offsetof(CPUM68KState, sp[M68K_USP]));
4504 }
4505 
4506 DISAS_INSN(halt)
4507 {
4508     if (IS_USER(s)) {
4509         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4510         return;
4511     }
4512     if (semihosting_test(s)) {
4513         gen_exception(s, s->pc, EXCP_SEMIHOSTING);
4514         return;
4515     }
4516     tcg_gen_movi_i32(cpu_halted, 1);
4517     gen_exception(s, s->pc, EXCP_HLT);
4518 }
4519 
4520 DISAS_INSN(stop)
4521 {
4522     uint16_t ext;
4523 
4524     if (IS_USER(s)) {
4525         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4526         return;
4527     }
4528 
4529     ext = read_im16(env, s);
4530 
4531     gen_set_sr_im(s, ext, 0);
4532     tcg_gen_movi_i32(cpu_halted, 1);
4533     gen_exception(s, s->pc, EXCP_HLT);
4534 }
4535 
4536 DISAS_INSN(rte)
4537 {
4538     if (IS_USER(s)) {
4539         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4540         return;
4541     }
4542     gen_exception(s, s->base.pc_next, EXCP_RTE);
4543 }
4544 
4545 DISAS_INSN(cf_movec)
4546 {
4547     uint16_t ext;
4548     TCGv reg;
4549 
4550     if (IS_USER(s)) {
4551         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4552         return;
4553     }
4554 
4555     ext = read_im16(env, s);
4556 
4557     if (ext & 0x8000) {
4558         reg = AREG(ext, 12);
4559     } else {
4560         reg = DREG(ext, 12);
4561     }
4562     gen_helper_cf_movec_to(tcg_env, tcg_constant_i32(ext & 0xfff), reg);
4563     gen_exit_tb(s);
4564 }
4565 
4566 DISAS_INSN(m68k_movec)
4567 {
4568     uint16_t ext;
4569     TCGv reg, creg;
4570 
4571     if (IS_USER(s)) {
4572         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4573         return;
4574     }
4575 
4576     ext = read_im16(env, s);
4577 
4578     if (ext & 0x8000) {
4579         reg = AREG(ext, 12);
4580     } else {
4581         reg = DREG(ext, 12);
4582     }
4583     creg = tcg_constant_i32(ext & 0xfff);
4584     if (insn & 1) {
4585         gen_helper_m68k_movec_to(tcg_env, creg, reg);
4586     } else {
4587         gen_helper_m68k_movec_from(reg, tcg_env, creg);
4588     }
4589     gen_exit_tb(s);
4590 }
4591 
4592 DISAS_INSN(intouch)
4593 {
4594     if (IS_USER(s)) {
4595         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4596         return;
4597     }
4598     /* ICache fetch.  Implement as no-op.  */
4599 }
4600 
4601 DISAS_INSN(cpushl)
4602 {
4603     if (IS_USER(s)) {
4604         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4605         return;
4606     }
4607     /* Cache push/invalidate.  Implement as no-op.  */
4608 }
4609 
4610 DISAS_INSN(cpush)
4611 {
4612     if (IS_USER(s)) {
4613         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4614         return;
4615     }
4616     /* Cache push/invalidate.  Implement as no-op.  */
4617 }
4618 
4619 DISAS_INSN(cinv)
4620 {
4621     if (IS_USER(s)) {
4622         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4623         return;
4624     }
4625     /* Invalidate cache line.  Implement as no-op.  */
4626 }
4627 
4628 #if !defined(CONFIG_USER_ONLY)
4629 DISAS_INSN(pflush)
4630 {
4631     TCGv opmode;
4632 
4633     if (IS_USER(s)) {
4634         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4635         return;
4636     }
4637 
4638     opmode = tcg_constant_i32((insn >> 3) & 3);
4639     gen_helper_pflush(tcg_env, AREG(insn, 0), opmode);
4640 }
4641 
4642 DISAS_INSN(ptest)
4643 {
4644     TCGv is_read;
4645 
4646     if (IS_USER(s)) {
4647         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4648         return;
4649     }
4650     is_read = tcg_constant_i32((insn >> 5) & 1);
4651     gen_helper_ptest(tcg_env, AREG(insn, 0), is_read);
4652 }
4653 #endif
4654 
4655 DISAS_INSN(wddata)
4656 {
4657     gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4658 }
4659 
4660 DISAS_INSN(wdebug)
4661 {
4662     if (IS_USER(s)) {
4663         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
4664         return;
4665     }
4666     /* TODO: Implement wdebug.  */
4667     cpu_abort(env_cpu(env), "WDEBUG not implemented");
4668 }
4669 #endif
4670 
4671 DISAS_INSN(trap)
4672 {
4673     gen_exception(s, s->pc, EXCP_TRAP0 + (insn & 0xf));
4674 }
4675 
4676 static void do_trapcc(DisasContext *s, DisasCompare *c)
4677 {
4678     if (c->tcond != TCG_COND_NEVER) {
4679         TCGLabel *over = NULL;
4680 
4681         update_cc_op(s);
4682 
4683         if (c->tcond != TCG_COND_ALWAYS) {
4684             /* Jump over if !c. */
4685             over = gen_new_label();
4686             tcg_gen_brcond_i32(tcg_invert_cond(c->tcond), c->v1, c->v2, over);
4687         }
4688 
4689         tcg_gen_movi_i32(QREG_PC, s->pc);
4690         gen_raise_exception_format2(s, EXCP_TRAPCC, s->base.pc_next);
4691 
4692         if (over != NULL) {
4693             gen_set_label(over);
4694             s->base.is_jmp = DISAS_NEXT;
4695         }
4696     }
4697 }
4698 
4699 DISAS_INSN(trapcc)
4700 {
4701     DisasCompare c;
4702 
4703     /* Consume and discard the immediate operand. */
4704     switch (extract32(insn, 0, 3)) {
4705     case 2: /* trapcc.w */
4706         (void)read_im16(env, s);
4707         break;
4708     case 3: /* trapcc.l */
4709         (void)read_im32(env, s);
4710         break;
4711     case 4: /* trapcc (no operand) */
4712         break;
4713     default:
4714         /* trapcc registered with only valid opmodes */
4715         g_assert_not_reached();
4716     }
4717 
4718     gen_cc_cond(&c, s, extract32(insn, 8, 4));
4719     do_trapcc(s, &c);
4720 }
4721 
4722 DISAS_INSN(trapv)
4723 {
4724     DisasCompare c;
4725 
4726     gen_cc_cond(&c, s, 9); /* V set */
4727     do_trapcc(s, &c);
4728 }
4729 
4730 static void gen_load_fcr(DisasContext *s, TCGv res, int reg)
4731 {
4732     switch (reg) {
4733     case M68K_FPIAR:
4734         tcg_gen_movi_i32(res, 0);
4735         break;
4736     case M68K_FPSR:
4737         gen_helper_get_fpsr(res, tcg_env);
4738         break;
4739     case M68K_FPCR:
4740         tcg_gen_ld_i32(res, tcg_env, offsetof(CPUM68KState, fpcr));
4741         break;
4742     }
4743 }
4744 
4745 static void gen_store_fcr(DisasContext *s, TCGv val, int reg)
4746 {
4747     switch (reg) {
4748     case M68K_FPIAR:
4749         break;
4750     case M68K_FPSR:
4751         gen_helper_set_fpsr(tcg_env, val);
4752         break;
4753     case M68K_FPCR:
4754         gen_helper_set_fpcr(tcg_env, val);
4755         break;
4756     }
4757 }
4758 
4759 static void gen_qemu_store_fcr(DisasContext *s, TCGv addr, int reg)
4760 {
4761     int index = IS_USER(s);
4762     TCGv tmp;
4763 
4764     tmp = tcg_temp_new();
4765     gen_load_fcr(s, tmp, reg);
4766     tcg_gen_qemu_st_tl(tmp, addr, index, MO_TEUL);
4767 }
4768 
4769 static void gen_qemu_load_fcr(DisasContext *s, TCGv addr, int reg)
4770 {
4771     int index = IS_USER(s);
4772     TCGv tmp;
4773 
4774     tmp = tcg_temp_new();
4775     tcg_gen_qemu_ld_tl(tmp, addr, index, MO_TEUL);
4776     gen_store_fcr(s, tmp, reg);
4777 }
4778 
4779 
4780 static void gen_op_fmove_fcr(CPUM68KState *env, DisasContext *s,
4781                              uint32_t insn, uint32_t ext)
4782 {
4783     int mask = (ext >> 10) & 7;
4784     int is_write = (ext >> 13) & 1;
4785     int mode = extract32(insn, 3, 3);
4786     int i;
4787     TCGv addr, tmp;
4788 
4789     switch (mode) {
4790     case 0: /* Dn */
4791         if (mask != M68K_FPIAR && mask != M68K_FPSR && mask != M68K_FPCR) {
4792             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4793             return;
4794         }
4795         if (is_write) {
4796             gen_load_fcr(s, DREG(insn, 0), mask);
4797         } else {
4798             gen_store_fcr(s, DREG(insn, 0), mask);
4799         }
4800         return;
4801     case 1: /* An, only with FPIAR */
4802         if (mask != M68K_FPIAR) {
4803             gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4804             return;
4805         }
4806         if (is_write) {
4807             gen_load_fcr(s, AREG(insn, 0), mask);
4808         } else {
4809             gen_store_fcr(s, AREG(insn, 0), mask);
4810         }
4811         return;
4812     case 7: /* Immediate */
4813         if (REG(insn, 0) == 4) {
4814             if (is_write ||
4815                 (mask != M68K_FPIAR && mask != M68K_FPSR &&
4816                  mask != M68K_FPCR)) {
4817                 gen_exception(s, s->base.pc_next, EXCP_ILLEGAL);
4818                 return;
4819             }
4820             tmp = tcg_constant_i32(read_im32(env, s));
4821             gen_store_fcr(s, tmp, mask);
4822             return;
4823         }
4824         break;
4825     default:
4826         break;
4827     }
4828 
4829     tmp = gen_lea(env, s, insn, OS_LONG);
4830     if (IS_NULL_QREG(tmp)) {
4831         gen_addr_fault(s);
4832         return;
4833     }
4834 
4835     addr = tcg_temp_new();
4836     tcg_gen_mov_i32(addr, tmp);
4837 
4838     /*
4839      * mask:
4840      *
4841      * 0b100 Floating-Point Control Register
4842      * 0b010 Floating-Point Status Register
4843      * 0b001 Floating-Point Instruction Address Register
4844      *
4845      */
4846 
4847     if (is_write && mode == 4) {
4848         for (i = 2; i >= 0; i--, mask >>= 1) {
4849             if (mask & 1) {
4850                 gen_qemu_store_fcr(s, addr, 1 << i);
4851                 if (mask != 1) {
4852                     tcg_gen_subi_i32(addr, addr, opsize_bytes(OS_LONG));
4853                 }
4854             }
4855        }
4856        tcg_gen_mov_i32(AREG(insn, 0), addr);
4857     } else {
4858         for (i = 0; i < 3; i++, mask >>= 1) {
4859             if (mask & 1) {
4860                 if (is_write) {
4861                     gen_qemu_store_fcr(s, addr, 1 << i);
4862                 } else {
4863                     gen_qemu_load_fcr(s, addr, 1 << i);
4864                 }
4865                 if (mask != 1 || mode == 3) {
4866                     tcg_gen_addi_i32(addr, addr, opsize_bytes(OS_LONG));
4867                 }
4868             }
4869         }
4870         if (mode == 3) {
4871             tcg_gen_mov_i32(AREG(insn, 0), addr);
4872         }
4873     }
4874 }
4875 
4876 static void gen_op_fmovem(CPUM68KState *env, DisasContext *s,
4877                           uint32_t insn, uint32_t ext)
4878 {
4879     int opsize;
4880     TCGv addr, tmp;
4881     int mode = (ext >> 11) & 0x3;
4882     int is_load = ((ext & 0x2000) == 0);
4883 
4884     if (m68k_feature(s->env, M68K_FEATURE_FPU)) {
4885         opsize = OS_EXTENDED;
4886     } else {
4887         opsize = OS_DOUBLE;  /* FIXME */
4888     }
4889 
4890     addr = gen_lea(env, s, insn, opsize);
4891     if (IS_NULL_QREG(addr)) {
4892         gen_addr_fault(s);
4893         return;
4894     }
4895 
4896     tmp = tcg_temp_new();
4897     if (mode & 0x1) {
4898         /* Dynamic register list */
4899         tcg_gen_ext8u_i32(tmp, DREG(ext, 4));
4900     } else {
4901         /* Static register list */
4902         tcg_gen_movi_i32(tmp, ext & 0xff);
4903     }
4904 
4905     if (!is_load && (mode & 2) == 0) {
4906         /*
4907          * predecrement addressing mode
4908          * only available to store register to memory
4909          */
4910         if (opsize == OS_EXTENDED) {
4911             gen_helper_fmovemx_st_predec(tmp, tcg_env, addr, tmp);
4912         } else {
4913             gen_helper_fmovemd_st_predec(tmp, tcg_env, addr, tmp);
4914         }
4915     } else {
4916         /* postincrement addressing mode */
4917         if (opsize == OS_EXTENDED) {
4918             if (is_load) {
4919                 gen_helper_fmovemx_ld_postinc(tmp, tcg_env, addr, tmp);
4920             } else {
4921                 gen_helper_fmovemx_st_postinc(tmp, tcg_env, addr, tmp);
4922             }
4923         } else {
4924             if (is_load) {
4925                 gen_helper_fmovemd_ld_postinc(tmp, tcg_env, addr, tmp);
4926             } else {
4927                 gen_helper_fmovemd_st_postinc(tmp, tcg_env, addr, tmp);
4928             }
4929         }
4930     }
4931     if ((insn & 070) == 030 || (insn & 070) == 040) {
4932         tcg_gen_mov_i32(AREG(insn, 0), tmp);
4933     }
4934 }
4935 
4936 /*
4937  * ??? FP exceptions are not implemented.  Most exceptions are deferred until
4938  * immediately before the next FP instruction is executed.
4939  */
4940 DISAS_INSN(fpu)
4941 {
4942     uint16_t ext;
4943     int opmode;
4944     int opsize;
4945     TCGv_ptr cpu_src, cpu_dest;
4946 
4947     ext = read_im16(env, s);
4948     opmode = ext & 0x7f;
4949     switch ((ext >> 13) & 7) {
4950     case 0:
4951         break;
4952     case 1:
4953         goto undef;
4954     case 2:
4955         if (insn == 0xf200 && (ext & 0xfc00) == 0x5c00) {
4956             /* fmovecr */
4957             TCGv rom_offset = tcg_constant_i32(opmode);
4958             cpu_dest = gen_fp_ptr(REG(ext, 7));
4959             gen_helper_fconst(tcg_env, cpu_dest, rom_offset);
4960             return;
4961         }
4962         break;
4963     case 3: /* fmove out */
4964         cpu_src = gen_fp_ptr(REG(ext, 7));
4965         opsize = ext_opsize(ext, 10);
4966         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4967                       EA_STORE, IS_USER(s)) == -1) {
4968             gen_addr_fault(s);
4969         }
4970         gen_helper_ftst(tcg_env, cpu_src);
4971         return;
4972     case 4: /* fmove to control register.  */
4973     case 5: /* fmove from control register.  */
4974         gen_op_fmove_fcr(env, s, insn, ext);
4975         return;
4976     case 6: /* fmovem */
4977     case 7:
4978         if ((ext & 0x1000) == 0 && !m68k_feature(s->env, M68K_FEATURE_FPU)) {
4979             goto undef;
4980         }
4981         gen_op_fmovem(env, s, insn, ext);
4982         return;
4983     }
4984     if (ext & (1 << 14)) {
4985         /* Source effective address.  */
4986         opsize = ext_opsize(ext, 10);
4987         cpu_src = gen_fp_result_ptr();
4988         if (gen_ea_fp(env, s, insn, opsize, cpu_src,
4989                       EA_LOADS, IS_USER(s)) == -1) {
4990             gen_addr_fault(s);
4991             return;
4992         }
4993     } else {
4994         /* Source register.  */
4995         opsize = OS_EXTENDED;
4996         cpu_src = gen_fp_ptr(REG(ext, 10));
4997     }
4998     cpu_dest = gen_fp_ptr(REG(ext, 7));
4999     switch (opmode) {
5000     case 0: /* fmove */
5001         gen_fp_move(cpu_dest, cpu_src);
5002         break;
5003     case 0x40: /* fsmove */
5004         gen_helper_fsround(tcg_env, cpu_dest, cpu_src);
5005         break;
5006     case 0x44: /* fdmove */
5007         gen_helper_fdround(tcg_env, cpu_dest, cpu_src);
5008         break;
5009     case 1: /* fint */
5010         gen_helper_firound(tcg_env, cpu_dest, cpu_src);
5011         break;
5012     case 2: /* fsinh */
5013         gen_helper_fsinh(tcg_env, cpu_dest, cpu_src);
5014         break;
5015     case 3: /* fintrz */
5016         gen_helper_fitrunc(tcg_env, cpu_dest, cpu_src);
5017         break;
5018     case 4: /* fsqrt */
5019         gen_helper_fsqrt(tcg_env, cpu_dest, cpu_src);
5020         break;
5021     case 0x41: /* fssqrt */
5022         gen_helper_fssqrt(tcg_env, cpu_dest, cpu_src);
5023         break;
5024     case 0x45: /* fdsqrt */
5025         gen_helper_fdsqrt(tcg_env, cpu_dest, cpu_src);
5026         break;
5027     case 0x06: /* flognp1 */
5028         gen_helper_flognp1(tcg_env, cpu_dest, cpu_src);
5029         break;
5030     case 0x08: /* fetoxm1 */
5031         gen_helper_fetoxm1(tcg_env, cpu_dest, cpu_src);
5032         break;
5033     case 0x09: /* ftanh */
5034         gen_helper_ftanh(tcg_env, cpu_dest, cpu_src);
5035         break;
5036     case 0x0a: /* fatan */
5037         gen_helper_fatan(tcg_env, cpu_dest, cpu_src);
5038         break;
5039     case 0x0c: /* fasin */
5040         gen_helper_fasin(tcg_env, cpu_dest, cpu_src);
5041         break;
5042     case 0x0d: /* fatanh */
5043         gen_helper_fatanh(tcg_env, cpu_dest, cpu_src);
5044         break;
5045     case 0x0e: /* fsin */
5046         gen_helper_fsin(tcg_env, cpu_dest, cpu_src);
5047         break;
5048     case 0x0f: /* ftan */
5049         gen_helper_ftan(tcg_env, cpu_dest, cpu_src);
5050         break;
5051     case 0x10: /* fetox */
5052         gen_helper_fetox(tcg_env, cpu_dest, cpu_src);
5053         break;
5054     case 0x11: /* ftwotox */
5055         gen_helper_ftwotox(tcg_env, cpu_dest, cpu_src);
5056         break;
5057     case 0x12: /* ftentox */
5058         gen_helper_ftentox(tcg_env, cpu_dest, cpu_src);
5059         break;
5060     case 0x14: /* flogn */
5061         gen_helper_flogn(tcg_env, cpu_dest, cpu_src);
5062         break;
5063     case 0x15: /* flog10 */
5064         gen_helper_flog10(tcg_env, cpu_dest, cpu_src);
5065         break;
5066     case 0x16: /* flog2 */
5067         gen_helper_flog2(tcg_env, cpu_dest, cpu_src);
5068         break;
5069     case 0x18: /* fabs */
5070         gen_helper_fabs(tcg_env, cpu_dest, cpu_src);
5071         break;
5072     case 0x58: /* fsabs */
5073         gen_helper_fsabs(tcg_env, cpu_dest, cpu_src);
5074         break;
5075     case 0x5c: /* fdabs */
5076         gen_helper_fdabs(tcg_env, cpu_dest, cpu_src);
5077         break;
5078     case 0x19: /* fcosh */
5079         gen_helper_fcosh(tcg_env, cpu_dest, cpu_src);
5080         break;
5081     case 0x1a: /* fneg */
5082         gen_helper_fneg(tcg_env, cpu_dest, cpu_src);
5083         break;
5084     case 0x5a: /* fsneg */
5085         gen_helper_fsneg(tcg_env, cpu_dest, cpu_src);
5086         break;
5087     case 0x5e: /* fdneg */
5088         gen_helper_fdneg(tcg_env, cpu_dest, cpu_src);
5089         break;
5090     case 0x1c: /* facos */
5091         gen_helper_facos(tcg_env, cpu_dest, cpu_src);
5092         break;
5093     case 0x1d: /* fcos */
5094         gen_helper_fcos(tcg_env, cpu_dest, cpu_src);
5095         break;
5096     case 0x1e: /* fgetexp */
5097         gen_helper_fgetexp(tcg_env, cpu_dest, cpu_src);
5098         break;
5099     case 0x1f: /* fgetman */
5100         gen_helper_fgetman(tcg_env, cpu_dest, cpu_src);
5101         break;
5102     case 0x20: /* fdiv */
5103         gen_helper_fdiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5104         break;
5105     case 0x60: /* fsdiv */
5106         gen_helper_fsdiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5107         break;
5108     case 0x64: /* fddiv */
5109         gen_helper_fddiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5110         break;
5111     case 0x21: /* fmod */
5112         gen_helper_fmod(tcg_env, cpu_dest, cpu_src, cpu_dest);
5113         break;
5114     case 0x22: /* fadd */
5115         gen_helper_fadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5116         break;
5117     case 0x62: /* fsadd */
5118         gen_helper_fsadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5119         break;
5120     case 0x66: /* fdadd */
5121         gen_helper_fdadd(tcg_env, cpu_dest, cpu_src, cpu_dest);
5122         break;
5123     case 0x23: /* fmul */
5124         gen_helper_fmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5125         break;
5126     case 0x63: /* fsmul */
5127         gen_helper_fsmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5128         break;
5129     case 0x67: /* fdmul */
5130         gen_helper_fdmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5131         break;
5132     case 0x24: /* fsgldiv */
5133         gen_helper_fsgldiv(tcg_env, cpu_dest, cpu_src, cpu_dest);
5134         break;
5135     case 0x25: /* frem */
5136         gen_helper_frem(tcg_env, cpu_dest, cpu_src, cpu_dest);
5137         break;
5138     case 0x26: /* fscale */
5139         gen_helper_fscale(tcg_env, cpu_dest, cpu_src, cpu_dest);
5140         break;
5141     case 0x27: /* fsglmul */
5142         gen_helper_fsglmul(tcg_env, cpu_dest, cpu_src, cpu_dest);
5143         break;
5144     case 0x28: /* fsub */
5145         gen_helper_fsub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5146         break;
5147     case 0x68: /* fssub */
5148         gen_helper_fssub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5149         break;
5150     case 0x6c: /* fdsub */
5151         gen_helper_fdsub(tcg_env, cpu_dest, cpu_src, cpu_dest);
5152         break;
5153     case 0x30: case 0x31: case 0x32:
5154     case 0x33: case 0x34: case 0x35:
5155     case 0x36: case 0x37: {
5156             TCGv_ptr cpu_dest2 = gen_fp_ptr(REG(ext, 0));
5157             gen_helper_fsincos(tcg_env, cpu_dest, cpu_dest2, cpu_src);
5158         }
5159         break;
5160     case 0x38: /* fcmp */
5161         gen_helper_fcmp(tcg_env, cpu_src, cpu_dest);
5162         return;
5163     case 0x3a: /* ftst */
5164         gen_helper_ftst(tcg_env, cpu_src);
5165         return;
5166     default:
5167         goto undef;
5168     }
5169     gen_helper_ftst(tcg_env, cpu_dest);
5170     return;
5171 undef:
5172     /* FIXME: Is this right for offset addressing modes?  */
5173     s->pc -= 2;
5174     disas_undef_fpu(env, s, insn);
5175 }
5176 
5177 static void gen_fcc_cond(DisasCompare *c, DisasContext *s, int cond)
5178 {
5179     TCGv fpsr;
5180     int imm = 0;
5181 
5182     /* TODO: Raise BSUN exception.  */
5183     fpsr = tcg_temp_new();
5184     gen_load_fcr(s, fpsr, M68K_FPSR);
5185     c->v1 = fpsr;
5186 
5187     switch (cond) {
5188     case 0:  /* False */
5189     case 16: /* Signaling False */
5190         c->tcond = TCG_COND_NEVER;
5191         break;
5192     case 1:  /* EQual Z */
5193     case 17: /* Signaling EQual Z */
5194         imm = FPSR_CC_Z;
5195         c->tcond = TCG_COND_TSTNE;
5196         break;
5197     case 2:  /* Ordered Greater Than !(A || Z || N) */
5198     case 18: /* Greater Than !(A || Z || N) */
5199         imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5200         c->tcond = TCG_COND_TSTEQ;
5201         break;
5202     case 3:  /* Ordered Greater than or Equal Z || !(A || N) */
5203     case 19: /* Greater than or Equal Z || !(A || N) */
5204         c->v1 = tcg_temp_new();
5205         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5206         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5207         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5208         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5209         imm = FPSR_CC_Z | FPSR_CC_N;
5210         c->tcond = TCG_COND_TSTNE;
5211         break;
5212     case 4:  /* Ordered Less Than !(!N || A || Z); */
5213     case 20: /* Less Than !(!N || A || Z); */
5214         c->v1 = tcg_temp_new();
5215         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5216         imm = FPSR_CC_N | FPSR_CC_A | FPSR_CC_Z;
5217         c->tcond = TCG_COND_TSTEQ;
5218         break;
5219     case 5:  /* Ordered Less than or Equal Z || (N && !A) */
5220     case 21: /* Less than or Equal Z || (N && !A) */
5221         c->v1 = tcg_temp_new();
5222         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_A);
5223         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_A));
5224         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5225         imm = FPSR_CC_Z | FPSR_CC_N;
5226         c->tcond = TCG_COND_TSTNE;
5227         break;
5228     case 6:  /* Ordered Greater or Less than !(A || Z) */
5229     case 22: /* Greater or Less than !(A || Z) */
5230         imm = FPSR_CC_A | FPSR_CC_Z;
5231         c->tcond = TCG_COND_TSTEQ;
5232         break;
5233     case 7:  /* Ordered !A */
5234     case 23: /* Greater, Less or Equal !A */
5235         imm = FPSR_CC_A;
5236         c->tcond = TCG_COND_TSTEQ;
5237         break;
5238     case 8:  /* Unordered A */
5239     case 24: /* Not Greater, Less or Equal A */
5240         imm = FPSR_CC_A;
5241         c->tcond = TCG_COND_TSTNE;
5242         break;
5243     case 9:  /* Unordered or Equal A || Z */
5244     case 25: /* Not Greater or Less then A || Z */
5245         imm = FPSR_CC_A | FPSR_CC_Z;
5246         c->tcond = TCG_COND_TSTNE;
5247         break;
5248     case 10: /* Unordered or Greater Than A || !(N || Z)) */
5249     case 26: /* Not Less or Equal A || !(N || Z)) */
5250         c->v1 = tcg_temp_new();
5251         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5252         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5253         tcg_gen_or_i32(c->v1, c->v1, fpsr);
5254         tcg_gen_xori_i32(c->v1, c->v1, FPSR_CC_N);
5255         imm = FPSR_CC_A | FPSR_CC_N;
5256         c->tcond = TCG_COND_TSTNE;
5257         break;
5258     case 11: /* Unordered or Greater or Equal A || Z || !N */
5259     case 27: /* Not Less Than A || Z || !N */
5260         c->v1 = tcg_temp_new();
5261         tcg_gen_xori_i32(c->v1, fpsr, FPSR_CC_N);
5262         imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5263         c->tcond = TCG_COND_TSTNE;
5264         break;
5265     case 12: /* Unordered or Less Than A || (N && !Z) */
5266     case 28: /* Not Greater than or Equal A || (N && !Z) */
5267         c->v1 = tcg_temp_new();
5268         tcg_gen_andi_i32(c->v1, fpsr, FPSR_CC_Z);
5269         tcg_gen_shli_i32(c->v1, c->v1, ctz32(FPSR_CC_N) - ctz32(FPSR_CC_Z));
5270         tcg_gen_andc_i32(c->v1, fpsr, c->v1);
5271         imm = FPSR_CC_A | FPSR_CC_N;
5272         c->tcond = TCG_COND_TSTNE;
5273         break;
5274     case 13: /* Unordered or Less or Equal A || Z || N */
5275     case 29: /* Not Greater Than A || Z || N */
5276         imm = FPSR_CC_A | FPSR_CC_Z | FPSR_CC_N;
5277         c->tcond = TCG_COND_TSTNE;
5278         break;
5279     case 14: /* Not Equal !Z */
5280     case 30: /* Signaling Not Equal !Z */
5281         imm = FPSR_CC_Z;
5282         c->tcond = TCG_COND_TSTEQ;
5283         break;
5284     case 15: /* True */
5285     case 31: /* Signaling True */
5286         c->tcond = TCG_COND_ALWAYS;
5287         break;
5288     }
5289     c->v2 = tcg_constant_i32(imm);
5290 }
5291 
5292 static void gen_fjmpcc(DisasContext *s, int cond, TCGLabel *l1)
5293 {
5294     DisasCompare c;
5295 
5296     gen_fcc_cond(&c, s, cond);
5297     update_cc_op(s);
5298     tcg_gen_brcond_i32(c.tcond, c.v1, c.v2, l1);
5299 }
5300 
5301 DISAS_INSN(fbcc)
5302 {
5303     uint32_t offset;
5304     uint32_t base;
5305     TCGLabel *l1;
5306 
5307     base = s->pc;
5308     offset = (int16_t)read_im16(env, s);
5309     if (insn & (1 << 6)) {
5310         offset = (offset << 16) | read_im16(env, s);
5311     }
5312 
5313     l1 = gen_new_label();
5314     update_cc_op(s);
5315     gen_fjmpcc(s, insn & 0x3f, l1);
5316     gen_jmp_tb(s, 0, s->pc, s->base.pc_next);
5317     gen_set_label(l1);
5318     gen_jmp_tb(s, 1, base + offset, s->base.pc_next);
5319 }
5320 
5321 DISAS_INSN(fscc)
5322 {
5323     DisasCompare c;
5324     int cond;
5325     TCGv tmp;
5326     uint16_t ext;
5327 
5328     ext = read_im16(env, s);
5329     cond = ext & 0x3f;
5330     gen_fcc_cond(&c, s, cond);
5331 
5332     tmp = tcg_temp_new();
5333     tcg_gen_negsetcond_i32(c.tcond, tmp, c.v1, c.v2);
5334 
5335     DEST_EA(env, insn, OS_BYTE, tmp, NULL);
5336 }
5337 
5338 DISAS_INSN(ftrapcc)
5339 {
5340     DisasCompare c;
5341     uint16_t ext;
5342     int cond;
5343 
5344     ext = read_im16(env, s);
5345     cond = ext & 0x3f;
5346 
5347     /* Consume and discard the immediate operand. */
5348     switch (extract32(insn, 0, 3)) {
5349     case 2: /* ftrapcc.w */
5350         (void)read_im16(env, s);
5351         break;
5352     case 3: /* ftrapcc.l */
5353         (void)read_im32(env, s);
5354         break;
5355     case 4: /* ftrapcc (no operand) */
5356         break;
5357     default:
5358         /* ftrapcc registered with only valid opmodes */
5359         g_assert_not_reached();
5360     }
5361 
5362     gen_fcc_cond(&c, s, cond);
5363     do_trapcc(s, &c);
5364 }
5365 
5366 #if !defined(CONFIG_USER_ONLY)
5367 DISAS_INSN(frestore)
5368 {
5369     TCGv addr;
5370 
5371     if (IS_USER(s)) {
5372         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5373         return;
5374     }
5375     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5376         SRC_EA(env, addr, OS_LONG, 0, NULL);
5377         /* FIXME: check the state frame */
5378     } else {
5379         disas_undef(env, s, insn);
5380     }
5381 }
5382 
5383 DISAS_INSN(fsave)
5384 {
5385     if (IS_USER(s)) {
5386         gen_exception(s, s->base.pc_next, EXCP_PRIVILEGE);
5387         return;
5388     }
5389 
5390     if (m68k_feature(s->env, M68K_FEATURE_M68040)) {
5391         /* always write IDLE */
5392         TCGv idle = tcg_constant_i32(0x41000000);
5393         DEST_EA(env, insn, OS_LONG, idle, NULL);
5394     } else {
5395         disas_undef(env, s, insn);
5396     }
5397 }
5398 #endif
5399 
5400 static inline TCGv gen_mac_extract_word(DisasContext *s, TCGv val, int upper)
5401 {
5402     TCGv tmp = tcg_temp_new();
5403     if (s->env->macsr & MACSR_FI) {
5404         if (upper)
5405             tcg_gen_andi_i32(tmp, val, 0xffff0000);
5406         else
5407             tcg_gen_shli_i32(tmp, val, 16);
5408     } else if (s->env->macsr & MACSR_SU) {
5409         if (upper)
5410             tcg_gen_sari_i32(tmp, val, 16);
5411         else
5412             tcg_gen_ext16s_i32(tmp, val);
5413     } else {
5414         if (upper)
5415             tcg_gen_shri_i32(tmp, val, 16);
5416         else
5417             tcg_gen_ext16u_i32(tmp, val);
5418     }
5419     return tmp;
5420 }
5421 
5422 static void gen_mac_clear_flags(void)
5423 {
5424     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR,
5425                      ~(MACSR_V | MACSR_Z | MACSR_N | MACSR_EV));
5426 }
5427 
5428 DISAS_INSN(mac)
5429 {
5430     TCGv rx;
5431     TCGv ry;
5432     uint16_t ext;
5433     int acc;
5434     TCGv tmp;
5435     TCGv addr;
5436     TCGv loadval;
5437     int dual;
5438     TCGv saved_flags;
5439 
5440     if (!s->done_mac) {
5441         s->mactmp = tcg_temp_new_i64();
5442         s->done_mac = 1;
5443     }
5444 
5445     ext = read_im16(env, s);
5446 
5447     acc = ((insn >> 7) & 1) | ((ext >> 3) & 2);
5448     dual = ((insn & 0x30) != 0 && (ext & 3) != 0);
5449     if (dual && !m68k_feature(s->env, M68K_FEATURE_CF_EMAC_B)) {
5450         disas_undef(env, s, insn);
5451         return;
5452     }
5453     if (insn & 0x30) {
5454         /* MAC with load.  */
5455         tmp = gen_lea(env, s, insn, OS_LONG);
5456         addr = tcg_temp_new();
5457         tcg_gen_and_i32(addr, tmp, QREG_MAC_MASK);
5458         /*
5459          * Load the value now to ensure correct exception behavior.
5460          * Perform writeback after reading the MAC inputs.
5461          */
5462         loadval = gen_load(s, OS_LONG, addr, 0, IS_USER(s));
5463 
5464         acc ^= 1;
5465         rx = (ext & 0x8000) ? AREG(ext, 12) : DREG(insn, 12);
5466         ry = (ext & 8) ? AREG(ext, 0) : DREG(ext, 0);
5467     } else {
5468         loadval = addr = NULL_QREG;
5469         rx = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5470         ry = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5471     }
5472 
5473     gen_mac_clear_flags();
5474 #if 0
5475     l1 = -1;
5476     /* Disabled because conditional branches clobber temporary vars.  */
5477     if ((s->env->macsr & MACSR_OMC) != 0 && !dual) {
5478         /* Skip the multiply if we know we will ignore it.  */
5479         l1 = gen_new_label();
5480         tmp = tcg_temp_new();
5481         tcg_gen_andi_i32(tmp, QREG_MACSR, 1 << (acc + 8));
5482         gen_op_jmp_nz32(tmp, l1);
5483     }
5484 #endif
5485 
5486     if ((ext & 0x0800) == 0) {
5487         /* Word.  */
5488         rx = gen_mac_extract_word(s, rx, (ext & 0x80) != 0);
5489         ry = gen_mac_extract_word(s, ry, (ext & 0x40) != 0);
5490     }
5491     if (s->env->macsr & MACSR_FI) {
5492         gen_helper_macmulf(s->mactmp, tcg_env, rx, ry);
5493     } else {
5494         if (s->env->macsr & MACSR_SU)
5495             gen_helper_macmuls(s->mactmp, tcg_env, rx, ry);
5496         else
5497             gen_helper_macmulu(s->mactmp, tcg_env, rx, ry);
5498         switch ((ext >> 9) & 3) {
5499         case 1:
5500             tcg_gen_shli_i64(s->mactmp, s->mactmp, 1);
5501             break;
5502         case 3:
5503             tcg_gen_shri_i64(s->mactmp, s->mactmp, 1);
5504             break;
5505         }
5506     }
5507 
5508     if (dual) {
5509         /* Save the overflow flag from the multiply.  */
5510         saved_flags = tcg_temp_new();
5511         tcg_gen_mov_i32(saved_flags, QREG_MACSR);
5512     } else {
5513         saved_flags = NULL_QREG;
5514     }
5515 
5516 #if 0
5517     /* Disabled because conditional branches clobber temporary vars.  */
5518     if ((s->env->macsr & MACSR_OMC) != 0 && dual) {
5519         /* Skip the accumulate if the value is already saturated.  */
5520         l1 = gen_new_label();
5521         tmp = tcg_temp_new();
5522         gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5523         gen_op_jmp_nz32(tmp, l1);
5524     }
5525 #endif
5526 
5527     if (insn & 0x100)
5528         tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5529     else
5530         tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5531 
5532     if (s->env->macsr & MACSR_FI)
5533         gen_helper_macsatf(tcg_env, tcg_constant_i32(acc));
5534     else if (s->env->macsr & MACSR_SU)
5535         gen_helper_macsats(tcg_env, tcg_constant_i32(acc));
5536     else
5537         gen_helper_macsatu(tcg_env, tcg_constant_i32(acc));
5538 
5539 #if 0
5540     /* Disabled because conditional branches clobber temporary vars.  */
5541     if (l1 != -1)
5542         gen_set_label(l1);
5543 #endif
5544 
5545     if (dual) {
5546         /* Dual accumulate variant.  */
5547         acc = (ext >> 2) & 3;
5548         /* Restore the overflow flag from the multiplier.  */
5549         tcg_gen_mov_i32(QREG_MACSR, saved_flags);
5550 #if 0
5551         /* Disabled because conditional branches clobber temporary vars.  */
5552         if ((s->env->macsr & MACSR_OMC) != 0) {
5553             /* Skip the accumulate if the value is already saturated.  */
5554             l1 = gen_new_label();
5555             tmp = tcg_temp_new();
5556             gen_op_and32(tmp, QREG_MACSR, tcg_constant_i32(MACSR_PAV0 << acc));
5557             gen_op_jmp_nz32(tmp, l1);
5558         }
5559 #endif
5560         if (ext & 2)
5561             tcg_gen_sub_i64(MACREG(acc), MACREG(acc), s->mactmp);
5562         else
5563             tcg_gen_add_i64(MACREG(acc), MACREG(acc), s->mactmp);
5564         if (s->env->macsr & MACSR_FI)
5565             gen_helper_macsatf(tcg_env, tcg_constant_i32(acc));
5566         else if (s->env->macsr & MACSR_SU)
5567             gen_helper_macsats(tcg_env, tcg_constant_i32(acc));
5568         else
5569             gen_helper_macsatu(tcg_env, tcg_constant_i32(acc));
5570 #if 0
5571         /* Disabled because conditional branches clobber temporary vars.  */
5572         if (l1 != -1)
5573             gen_set_label(l1);
5574 #endif
5575     }
5576     gen_helper_mac_set_flags(tcg_env, tcg_constant_i32(acc));
5577 
5578     if (insn & 0x30) {
5579         TCGv rw;
5580         rw = (insn & 0x40) ? AREG(insn, 9) : DREG(insn, 9);
5581         tcg_gen_mov_i32(rw, loadval);
5582         /*
5583          * FIXME: Should address writeback happen with the masked or
5584          * unmasked value?
5585          */
5586         switch ((insn >> 3) & 7) {
5587         case 3: /* Post-increment.  */
5588             tcg_gen_addi_i32(AREG(insn, 0), addr, 4);
5589             break;
5590         case 4: /* Pre-decrement.  */
5591             tcg_gen_mov_i32(AREG(insn, 0), addr);
5592         }
5593     }
5594 }
5595 
5596 DISAS_INSN(from_mac)
5597 {
5598     TCGv rx;
5599     TCGv_i64 acc;
5600     int accnum;
5601 
5602     rx = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5603     accnum = (insn >> 9) & 3;
5604     acc = MACREG(accnum);
5605     if (s->env->macsr & MACSR_FI) {
5606         gen_helper_get_macf(rx, tcg_env, acc);
5607     } else if ((s->env->macsr & MACSR_OMC) == 0) {
5608         tcg_gen_extrl_i64_i32(rx, acc);
5609     } else if (s->env->macsr & MACSR_SU) {
5610         gen_helper_get_macs(rx, acc);
5611     } else {
5612         gen_helper_get_macu(rx, acc);
5613     }
5614     if (insn & 0x40) {
5615         tcg_gen_movi_i64(acc, 0);
5616         tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5617     }
5618 }
5619 
5620 DISAS_INSN(move_mac)
5621 {
5622     /* FIXME: This can be done without a helper.  */
5623     int src;
5624     TCGv dest;
5625     src = insn & 3;
5626     dest = tcg_constant_i32((insn >> 9) & 3);
5627     gen_helper_mac_move(tcg_env, dest, tcg_constant_i32(src));
5628     gen_mac_clear_flags();
5629     gen_helper_mac_set_flags(tcg_env, dest);
5630 }
5631 
5632 DISAS_INSN(from_macsr)
5633 {
5634     TCGv reg;
5635 
5636     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5637     tcg_gen_mov_i32(reg, QREG_MACSR);
5638 }
5639 
5640 DISAS_INSN(from_mask)
5641 {
5642     TCGv reg;
5643     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5644     tcg_gen_mov_i32(reg, QREG_MAC_MASK);
5645 }
5646 
5647 DISAS_INSN(from_mext)
5648 {
5649     TCGv reg;
5650     TCGv acc;
5651     reg = (insn & 8) ? AREG(insn, 0) : DREG(insn, 0);
5652     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5653     if (s->env->macsr & MACSR_FI)
5654         gen_helper_get_mac_extf(reg, tcg_env, acc);
5655     else
5656         gen_helper_get_mac_exti(reg, tcg_env, acc);
5657 }
5658 
5659 DISAS_INSN(macsr_to_ccr)
5660 {
5661     TCGv tmp = tcg_temp_new();
5662 
5663     /* Note that X and C are always cleared. */
5664     tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V);
5665     gen_helper_set_ccr(tcg_env, tmp);
5666     set_cc_op(s, CC_OP_FLAGS);
5667 }
5668 
5669 DISAS_INSN(to_mac)
5670 {
5671     TCGv_i64 acc;
5672     TCGv val;
5673     int accnum;
5674     accnum = (insn >> 9) & 3;
5675     acc = MACREG(accnum);
5676     SRC_EA(env, val, OS_LONG, 0, NULL);
5677     if (s->env->macsr & MACSR_FI) {
5678         tcg_gen_ext_i32_i64(acc, val);
5679         tcg_gen_shli_i64(acc, acc, 8);
5680     } else if (s->env->macsr & MACSR_SU) {
5681         tcg_gen_ext_i32_i64(acc, val);
5682     } else {
5683         tcg_gen_extu_i32_i64(acc, val);
5684     }
5685     tcg_gen_andi_i32(QREG_MACSR, QREG_MACSR, ~(MACSR_PAV0 << accnum));
5686     gen_mac_clear_flags();
5687     gen_helper_mac_set_flags(tcg_env, tcg_constant_i32(accnum));
5688 }
5689 
5690 DISAS_INSN(to_macsr)
5691 {
5692     TCGv val;
5693     SRC_EA(env, val, OS_LONG, 0, NULL);
5694     gen_helper_set_macsr(tcg_env, val);
5695     gen_exit_tb(s);
5696 }
5697 
5698 DISAS_INSN(to_mask)
5699 {
5700     TCGv val;
5701     SRC_EA(env, val, OS_LONG, 0, NULL);
5702     tcg_gen_ori_i32(QREG_MAC_MASK, val, 0xffff0000);
5703 }
5704 
5705 DISAS_INSN(to_mext)
5706 {
5707     TCGv val;
5708     TCGv acc;
5709     SRC_EA(env, val, OS_LONG, 0, NULL);
5710     acc = tcg_constant_i32((insn & 0x400) ? 2 : 0);
5711     if (s->env->macsr & MACSR_FI)
5712         gen_helper_set_mac_extf(tcg_env, val, acc);
5713     else if (s->env->macsr & MACSR_SU)
5714         gen_helper_set_mac_exts(tcg_env, val, acc);
5715     else
5716         gen_helper_set_mac_extu(tcg_env, val, acc);
5717 }
5718 
5719 static disas_proc opcode_table[65536];
5720 
5721 static void
5722 register_opcode (disas_proc proc, uint16_t opcode, uint16_t mask)
5723 {
5724   int i;
5725   int from;
5726   int to;
5727 
5728   /* Sanity check.  All set bits must be included in the mask.  */
5729   if (opcode & ~mask) {
5730       fprintf(stderr,
5731               "qemu internal error: bogus opcode definition %04x/%04x\n",
5732               opcode, mask);
5733       abort();
5734   }
5735   /*
5736    * This could probably be cleverer.  For now just optimize the case where
5737    * the top bits are known.
5738    */
5739   /* Find the first zero bit in the mask.  */
5740   i = 0x8000;
5741   while ((i & mask) != 0)
5742       i >>= 1;
5743   /* Iterate over all combinations of this and lower bits.  */
5744   if (i == 0)
5745       i = 1;
5746   else
5747       i <<= 1;
5748   from = opcode & ~(i - 1);
5749   to = from + i;
5750   for (i = from; i < to; i++) {
5751       if ((i & mask) == opcode)
5752           opcode_table[i] = proc;
5753   }
5754 }
5755 
5756 /*
5757  * Register m68k opcode handlers.  Order is important.
5758  * Later insn override earlier ones.
5759  */
5760 void register_m68k_insns (CPUM68KState *env)
5761 {
5762     /*
5763      * Build the opcode table only once to avoid
5764      * multithreading issues.
5765      */
5766     if (opcode_table[0] != NULL) {
5767         return;
5768     }
5769 
5770     /*
5771      * use BASE() for instruction available
5772      * for CF_ISA_A and M68000.
5773      */
5774 #define BASE(name, opcode, mask) \
5775     register_opcode(disas_##name, 0x##opcode, 0x##mask)
5776 #define INSN(name, opcode, mask, feature) do { \
5777     if (m68k_feature(env, M68K_FEATURE_##feature)) \
5778         BASE(name, opcode, mask); \
5779     } while(0)
5780     BASE(undef,     0000, 0000);
5781     INSN(arith_im,  0080, fff8, CF_ISA_A);
5782     INSN(arith_im,  0000, ff00, M68K);
5783     INSN(chk2,      00c0, f9c0, CHK2);
5784     INSN(bitrev,    00c0, fff8, CF_ISA_APLUSC);
5785     BASE(bitop_reg, 0100, f1c0);
5786     BASE(bitop_reg, 0140, f1c0);
5787     BASE(bitop_reg, 0180, f1c0);
5788     BASE(bitop_reg, 01c0, f1c0);
5789     INSN(movep,     0108, f138, MOVEP);
5790     INSN(arith_im,  0280, fff8, CF_ISA_A);
5791     INSN(arith_im,  0200, ff00, M68K);
5792     INSN(undef,     02c0, ffc0, M68K);
5793     INSN(byterev,   02c0, fff8, CF_ISA_APLUSC);
5794     INSN(arith_im,  0480, fff8, CF_ISA_A);
5795     INSN(arith_im,  0400, ff00, M68K);
5796     INSN(undef,     04c0, ffc0, M68K);
5797     INSN(arith_im,  0600, ff00, M68K);
5798     INSN(undef,     06c0, ffc0, M68K);
5799     INSN(ff1,       04c0, fff8, CF_ISA_APLUSC);
5800     INSN(arith_im,  0680, fff8, CF_ISA_A);
5801     INSN(arith_im,  0c00, ff38, CF_ISA_A);
5802     INSN(arith_im,  0c00, ff00, M68K);
5803     BASE(bitop_im,  0800, ffc0);
5804     BASE(bitop_im,  0840, ffc0);
5805     BASE(bitop_im,  0880, ffc0);
5806     BASE(bitop_im,  08c0, ffc0);
5807     INSN(arith_im,  0a80, fff8, CF_ISA_A);
5808     INSN(arith_im,  0a00, ff00, M68K);
5809 #if !defined(CONFIG_USER_ONLY)
5810     INSN(moves,     0e00, ff00, M68K);
5811 #endif
5812     INSN(cas,       0ac0, ffc0, CAS);
5813     INSN(cas,       0cc0, ffc0, CAS);
5814     INSN(cas,       0ec0, ffc0, CAS);
5815     INSN(cas2w,     0cfc, ffff, CAS);
5816     INSN(cas2l,     0efc, ffff, CAS);
5817     BASE(move,      1000, f000);
5818     BASE(move,      2000, f000);
5819     BASE(move,      3000, f000);
5820     INSN(chk,       4000, f040, M68K);
5821     INSN(strldsr,   40e7, ffff, CF_ISA_APLUSC);
5822     INSN(negx,      4080, fff8, CF_ISA_A);
5823     INSN(negx,      4000, ff00, M68K);
5824     INSN(undef,     40c0, ffc0, M68K);
5825     INSN(move_from_sr, 40c0, fff8, CF_ISA_A);
5826     INSN(move_from_sr, 40c0, ffc0, M68K);
5827     BASE(lea,       41c0, f1c0);
5828     BASE(clr,       4200, ff00);
5829     BASE(undef,     42c0, ffc0);
5830     INSN(move_from_ccr, 42c0, fff8, CF_ISA_A);
5831     INSN(move_from_ccr, 42c0, ffc0, M68K);
5832     INSN(neg,       4480, fff8, CF_ISA_A);
5833     INSN(neg,       4400, ff00, M68K);
5834     INSN(undef,     44c0, ffc0, M68K);
5835     BASE(move_to_ccr, 44c0, ffc0);
5836     INSN(not,       4680, fff8, CF_ISA_A);
5837     INSN(not,       4600, ff00, M68K);
5838 #if !defined(CONFIG_USER_ONLY)
5839     BASE(move_to_sr, 46c0, ffc0);
5840 #endif
5841     INSN(nbcd,      4800, ffc0, M68K);
5842     INSN(linkl,     4808, fff8, M68K);
5843     BASE(pea,       4840, ffc0);
5844     BASE(swap,      4840, fff8);
5845     INSN(bkpt,      4848, fff8, BKPT);
5846     INSN(movem,     48d0, fbf8, CF_ISA_A);
5847     INSN(movem,     48e8, fbf8, CF_ISA_A);
5848     INSN(movem,     4880, fb80, M68K);
5849     BASE(ext,       4880, fff8);
5850     BASE(ext,       48c0, fff8);
5851     BASE(ext,       49c0, fff8);
5852     BASE(tst,       4a00, ff00);
5853     INSN(tas,       4ac0, ffc0, CF_ISA_B);
5854     INSN(tas,       4ac0, ffc0, M68K);
5855 #if !defined(CONFIG_USER_ONLY)
5856     INSN(halt,      4ac8, ffff, CF_ISA_A);
5857     INSN(halt,      4ac8, ffff, M68K);
5858 #endif
5859     INSN(pulse,     4acc, ffff, CF_ISA_A);
5860     BASE(illegal,   4afc, ffff);
5861     INSN(mull,      4c00, ffc0, CF_ISA_A);
5862     INSN(mull,      4c00, ffc0, LONG_MULDIV);
5863     INSN(divl,      4c40, ffc0, CF_ISA_A);
5864     INSN(divl,      4c40, ffc0, LONG_MULDIV);
5865     INSN(sats,      4c80, fff8, CF_ISA_B);
5866     BASE(trap,      4e40, fff0);
5867     BASE(link,      4e50, fff8);
5868     BASE(unlk,      4e58, fff8);
5869 #if !defined(CONFIG_USER_ONLY)
5870     INSN(move_to_usp, 4e60, fff8, USP);
5871     INSN(move_from_usp, 4e68, fff8, USP);
5872     INSN(reset,     4e70, ffff, M68K);
5873     BASE(stop,      4e72, ffff);
5874     BASE(rte,       4e73, ffff);
5875     INSN(cf_movec,  4e7b, ffff, CF_ISA_A);
5876     INSN(m68k_movec, 4e7a, fffe, MOVEC);
5877 #endif
5878     BASE(nop,       4e71, ffff);
5879     INSN(rtd,       4e74, ffff, RTD);
5880     BASE(rts,       4e75, ffff);
5881     INSN(trapv,     4e76, ffff, M68K);
5882     INSN(rtr,       4e77, ffff, M68K);
5883     BASE(jump,      4e80, ffc0);
5884     BASE(jump,      4ec0, ffc0);
5885     INSN(addsubq,   5000, f080, M68K);
5886     BASE(addsubq,   5080, f0c0);
5887     INSN(scc,       50c0, f0f8, CF_ISA_A); /* Scc.B Dx   */
5888     INSN(scc,       50c0, f0c0, M68K);     /* Scc.B <EA> */
5889     INSN(dbcc,      50c8, f0f8, M68K);
5890     INSN(trapcc,    50fa, f0fe, TRAPCC);   /* opmode 010, 011 */
5891     INSN(trapcc,    50fc, f0ff, TRAPCC);   /* opmode 100 */
5892     INSN(trapcc,    51fa, fffe, CF_ISA_A); /* TPF (trapf) opmode 010, 011 */
5893     INSN(trapcc,    51fc, ffff, CF_ISA_A); /* TPF (trapf) opmode 100 */
5894 
5895     /* Branch instructions.  */
5896     BASE(branch,    6000, f000);
5897     /* Disable long branch instructions, then add back the ones we want.  */
5898     BASE(undef,     60ff, f0ff); /* All long branches.  */
5899     INSN(branch,    60ff, f0ff, CF_ISA_B);
5900     INSN(undef,     60ff, ffff, CF_ISA_B); /* bra.l */
5901     INSN(branch,    60ff, ffff, BRAL);
5902     INSN(branch,    60ff, f0ff, BCCL);
5903 
5904     BASE(moveq,     7000, f100);
5905     INSN(mvzs,      7100, f100, CF_ISA_B);
5906     BASE(or,        8000, f000);
5907     BASE(divw,      80c0, f0c0);
5908     INSN(sbcd_reg,  8100, f1f8, M68K);
5909     INSN(sbcd_mem,  8108, f1f8, M68K);
5910     BASE(addsub,    9000, f000);
5911     INSN(undef,     90c0, f0c0, CF_ISA_A);
5912     INSN(subx_reg,  9180, f1f8, CF_ISA_A);
5913     INSN(subx_reg,  9100, f138, M68K);
5914     INSN(subx_mem,  9108, f138, M68K);
5915     INSN(suba,      91c0, f1c0, CF_ISA_A);
5916     INSN(suba,      90c0, f0c0, M68K);
5917 
5918     BASE(undef_mac, a000, f000);
5919     INSN(mac,       a000, f100, CF_EMAC);
5920     INSN(from_mac,  a180, f9b0, CF_EMAC);
5921     INSN(move_mac,  a110, f9fc, CF_EMAC);
5922     INSN(from_macsr,a980, f9f0, CF_EMAC);
5923     INSN(from_mask, ad80, fff0, CF_EMAC);
5924     INSN(from_mext, ab80, fbf0, CF_EMAC);
5925     INSN(macsr_to_ccr, a9c0, ffff, CF_EMAC);
5926     INSN(to_mac,    a100, f9c0, CF_EMAC);
5927     INSN(to_macsr,  a900, ffc0, CF_EMAC);
5928     INSN(to_mext,   ab00, fbc0, CF_EMAC);
5929     INSN(to_mask,   ad00, ffc0, CF_EMAC);
5930 
5931     INSN(mov3q,     a140, f1c0, CF_ISA_B);
5932     INSN(cmp,       b000, f1c0, CF_ISA_B); /* cmp.b */
5933     INSN(cmp,       b040, f1c0, CF_ISA_B); /* cmp.w */
5934     INSN(cmpa,      b0c0, f1c0, CF_ISA_B); /* cmpa.w */
5935     INSN(cmp,       b080, f1c0, CF_ISA_A);
5936     INSN(cmpa,      b1c0, f1c0, CF_ISA_A);
5937     INSN(cmp,       b000, f100, M68K);
5938     INSN(eor,       b100, f100, M68K);
5939     INSN(cmpm,      b108, f138, M68K);
5940     INSN(cmpa,      b0c0, f0c0, M68K);
5941     INSN(eor,       b180, f1c0, CF_ISA_A);
5942     BASE(and,       c000, f000);
5943     INSN(exg_dd,    c140, f1f8, M68K);
5944     INSN(exg_aa,    c148, f1f8, M68K);
5945     INSN(exg_da,    c188, f1f8, M68K);
5946     BASE(mulw,      c0c0, f0c0);
5947     INSN(abcd_reg,  c100, f1f8, M68K);
5948     INSN(abcd_mem,  c108, f1f8, M68K);
5949     BASE(addsub,    d000, f000);
5950     INSN(undef,     d0c0, f0c0, CF_ISA_A);
5951     INSN(addx_reg,      d180, f1f8, CF_ISA_A);
5952     INSN(addx_reg,  d100, f138, M68K);
5953     INSN(addx_mem,  d108, f138, M68K);
5954     INSN(adda,      d1c0, f1c0, CF_ISA_A);
5955     INSN(adda,      d0c0, f0c0, M68K);
5956     INSN(shift_im,  e080, f0f0, CF_ISA_A);
5957     INSN(shift_reg, e0a0, f0f0, CF_ISA_A);
5958     INSN(shift8_im, e000, f0f0, M68K);
5959     INSN(shift16_im, e040, f0f0, M68K);
5960     INSN(shift_im,  e080, f0f0, M68K);
5961     INSN(shift8_reg, e020, f0f0, M68K);
5962     INSN(shift16_reg, e060, f0f0, M68K);
5963     INSN(shift_reg, e0a0, f0f0, M68K);
5964     INSN(shift_mem, e0c0, fcc0, M68K);
5965     INSN(rotate_im, e090, f0f0, M68K);
5966     INSN(rotate8_im, e010, f0f0, M68K);
5967     INSN(rotate16_im, e050, f0f0, M68K);
5968     INSN(rotate_reg, e0b0, f0f0, M68K);
5969     INSN(rotate8_reg, e030, f0f0, M68K);
5970     INSN(rotate16_reg, e070, f0f0, M68K);
5971     INSN(rotate_mem, e4c0, fcc0, M68K);
5972     INSN(bfext_mem, e9c0, fdc0, BITFIELD);  /* bfextu & bfexts */
5973     INSN(bfext_reg, e9c0, fdf8, BITFIELD);
5974     INSN(bfins_mem, efc0, ffc0, BITFIELD);
5975     INSN(bfins_reg, efc0, fff8, BITFIELD);
5976     INSN(bfop_mem, eac0, ffc0, BITFIELD);   /* bfchg */
5977     INSN(bfop_reg, eac0, fff8, BITFIELD);   /* bfchg */
5978     INSN(bfop_mem, ecc0, ffc0, BITFIELD);   /* bfclr */
5979     INSN(bfop_reg, ecc0, fff8, BITFIELD);   /* bfclr */
5980     INSN(bfop_mem, edc0, ffc0, BITFIELD);   /* bfffo */
5981     INSN(bfop_reg, edc0, fff8, BITFIELD);   /* bfffo */
5982     INSN(bfop_mem, eec0, ffc0, BITFIELD);   /* bfset */
5983     INSN(bfop_reg, eec0, fff8, BITFIELD);   /* bfset */
5984     INSN(bfop_mem, e8c0, ffc0, BITFIELD);   /* bftst */
5985     INSN(bfop_reg, e8c0, fff8, BITFIELD);   /* bftst */
5986     BASE(undef_fpu, f000, f000);
5987     INSN(fpu,       f200, ffc0, CF_FPU);
5988     INSN(fbcc,      f280, ffc0, CF_FPU);
5989     INSN(fpu,       f200, ffc0, FPU);
5990     INSN(fscc,      f240, ffc0, FPU);
5991     INSN(ftrapcc,   f27a, fffe, FPU);       /* opmode 010, 011 */
5992     INSN(ftrapcc,   f27c, ffff, FPU);       /* opmode 100 */
5993     INSN(fbcc,      f280, ff80, FPU);
5994 #if !defined(CONFIG_USER_ONLY)
5995     INSN(frestore,  f340, ffc0, CF_FPU);
5996     INSN(fsave,     f300, ffc0, CF_FPU);
5997     INSN(frestore,  f340, ffc0, FPU);
5998     INSN(fsave,     f300, ffc0, FPU);
5999     INSN(intouch,   f340, ffc0, CF_ISA_A);
6000     INSN(cpushl,    f428, ff38, CF_ISA_A);
6001     INSN(cpush,     f420, ff20, M68040);
6002     INSN(cinv,      f400, ff20, M68040);
6003     INSN(pflush,    f500, ffe0, M68040);
6004     INSN(ptest,     f548, ffd8, M68040);
6005     INSN(wddata,    fb00, ff00, CF_ISA_A);
6006     INSN(wdebug,    fbc0, ffc0, CF_ISA_A);
6007 #endif
6008     INSN(move16_mem, f600, ffe0, M68040);
6009     INSN(move16_reg, f620, fff8, M68040);
6010 #undef INSN
6011 }
6012 
6013 static void m68k_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
6014 {
6015     DisasContext *dc = container_of(dcbase, DisasContext, base);
6016     CPUM68KState *env = cpu_env(cpu);
6017 
6018     dc->env = env;
6019     dc->pc = dc->base.pc_first;
6020     /* This value will always be filled in properly before m68k_tr_tb_stop. */
6021     dc->pc_prev = 0xdeadbeef;
6022     dc->cc_op = CC_OP_DYNAMIC;
6023     dc->cc_op_synced = 1;
6024     dc->done_mac = 0;
6025     dc->writeback_mask = 0;
6026 
6027     dc->ss_active = (M68K_SR_TRACE(env->sr) == M68K_SR_TRACE_ANY_INS);
6028     /* If architectural single step active, limit to 1 */
6029     if (dc->ss_active) {
6030         dc->base.max_insns = 1;
6031     }
6032 }
6033 
6034 static void m68k_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
6035 {
6036 }
6037 
6038 static void m68k_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
6039 {
6040     DisasContext *dc = container_of(dcbase, DisasContext, base);
6041     tcg_gen_insn_start(dc->base.pc_next, dc->cc_op);
6042 }
6043 
6044 static void m68k_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
6045 {
6046     DisasContext *dc = container_of(dcbase, DisasContext, base);
6047     CPUM68KState *env = cpu_env(cpu);
6048     uint16_t insn = read_im16(env, dc);
6049 
6050     opcode_table[insn](env, dc, insn);
6051     do_writebacks(dc);
6052 
6053     dc->pc_prev = dc->base.pc_next;
6054     dc->base.pc_next = dc->pc;
6055 
6056     if (dc->base.is_jmp == DISAS_NEXT) {
6057         /*
6058          * Stop translation when the next insn might touch a new page.
6059          * This ensures that prefetch aborts at the right place.
6060          *
6061          * We cannot determine the size of the next insn without
6062          * completely decoding it.  However, the maximum insn size
6063          * is 32 bytes, so end if we do not have that much remaining.
6064          * This may produce several small TBs at the end of each page,
6065          * but they will all be linked with goto_tb.
6066          *
6067          * ??? ColdFire maximum is 4 bytes; MC68000's maximum is also
6068          * smaller than MC68020's.
6069          */
6070         target_ulong start_page_offset
6071             = dc->pc - (dc->base.pc_first & TARGET_PAGE_MASK);
6072 
6073         if (start_page_offset >= TARGET_PAGE_SIZE - 32) {
6074             dc->base.is_jmp = DISAS_TOO_MANY;
6075         }
6076     }
6077 }
6078 
6079 static void m68k_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
6080 {
6081     DisasContext *dc = container_of(dcbase, DisasContext, base);
6082 
6083     switch (dc->base.is_jmp) {
6084     case DISAS_NORETURN:
6085         break;
6086     case DISAS_TOO_MANY:
6087         update_cc_op(dc);
6088         gen_jmp_tb(dc, 0, dc->pc, dc->pc_prev);
6089         break;
6090     case DISAS_JUMP:
6091         /* We updated CC_OP and PC in gen_jmp/gen_jmp_im.  */
6092         if (dc->ss_active) {
6093             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6094         } else {
6095             tcg_gen_lookup_and_goto_ptr();
6096         }
6097         break;
6098     case DISAS_EXIT:
6099         /*
6100          * We updated CC_OP and PC in gen_exit_tb, but also modified
6101          * other state that may require returning to the main loop.
6102          */
6103         if (dc->ss_active) {
6104             gen_raise_exception_format2(dc, EXCP_TRACE, dc->pc_prev);
6105         } else {
6106             tcg_gen_exit_tb(NULL, 0);
6107         }
6108         break;
6109     default:
6110         g_assert_not_reached();
6111     }
6112 }
6113 
6114 static const TranslatorOps m68k_tr_ops = {
6115     .init_disas_context = m68k_tr_init_disas_context,
6116     .tb_start           = m68k_tr_tb_start,
6117     .insn_start         = m68k_tr_insn_start,
6118     .translate_insn     = m68k_tr_translate_insn,
6119     .tb_stop            = m68k_tr_tb_stop,
6120 };
6121 
6122 void m68k_translate_code(CPUState *cpu, TranslationBlock *tb,
6123                          int *max_insns, vaddr pc, void *host_pc)
6124 {
6125     DisasContext dc;
6126     translator_loop(cpu, tb, max_insns, pc, host_pc, &m68k_tr_ops, &dc.base);
6127 }
6128 
6129 static double floatx80_to_double(CPUM68KState *env, uint16_t high, uint64_t low)
6130 {
6131     floatx80 a = { .high = high, .low = low };
6132     union {
6133         float64 f64;
6134         double d;
6135     } u;
6136 
6137     u.f64 = floatx80_to_float64(a, &env->fp_status);
6138     return u.d;
6139 }
6140 
6141 void m68k_cpu_dump_state(CPUState *cs, FILE *f, int flags)
6142 {
6143     CPUM68KState *env = cpu_env(cs);
6144     int i;
6145     uint16_t sr;
6146     for (i = 0; i < 8; i++) {
6147         qemu_fprintf(f, "D%d = %08x   A%d = %08x   "
6148                      "F%d = %04x %016"PRIx64"  (%12g)\n",
6149                      i, env->dregs[i], i, env->aregs[i],
6150                      i, env->fregs[i].l.upper, env->fregs[i].l.lower,
6151                      floatx80_to_double(env, env->fregs[i].l.upper,
6152                                         env->fregs[i].l.lower));
6153     }
6154     qemu_fprintf(f, "PC = %08x   ", env->pc);
6155     sr = env->sr | cpu_m68k_get_ccr(env);
6156     qemu_fprintf(f, "SR = %04x T:%x I:%x %c%c %c%c%c%c%c\n",
6157                  sr, (sr & SR_T) >> SR_T_SHIFT, (sr & SR_I) >> SR_I_SHIFT,
6158                  (sr & SR_S) ? 'S' : 'U', (sr & SR_M) ? '%' : 'I',
6159                  (sr & CCF_X) ? 'X' : '-', (sr & CCF_N) ? 'N' : '-',
6160                  (sr & CCF_Z) ? 'Z' : '-', (sr & CCF_V) ? 'V' : '-',
6161                  (sr & CCF_C) ? 'C' : '-');
6162     qemu_fprintf(f, "FPSR = %08x %c%c%c%c ", env->fpsr,
6163                  (env->fpsr & FPSR_CC_A) ? 'A' : '-',
6164                  (env->fpsr & FPSR_CC_I) ? 'I' : '-',
6165                  (env->fpsr & FPSR_CC_Z) ? 'Z' : '-',
6166                  (env->fpsr & FPSR_CC_N) ? 'N' : '-');
6167     qemu_fprintf(f, "\n                                "
6168                  "FPCR =     %04x ", env->fpcr);
6169     switch (env->fpcr & FPCR_PREC_MASK) {
6170     case FPCR_PREC_X:
6171         qemu_fprintf(f, "X ");
6172         break;
6173     case FPCR_PREC_S:
6174         qemu_fprintf(f, "S ");
6175         break;
6176     case FPCR_PREC_D:
6177         qemu_fprintf(f, "D ");
6178         break;
6179     }
6180     switch (env->fpcr & FPCR_RND_MASK) {
6181     case FPCR_RND_N:
6182         qemu_fprintf(f, "RN ");
6183         break;
6184     case FPCR_RND_Z:
6185         qemu_fprintf(f, "RZ ");
6186         break;
6187     case FPCR_RND_M:
6188         qemu_fprintf(f, "RM ");
6189         break;
6190     case FPCR_RND_P:
6191         qemu_fprintf(f, "RP ");
6192         break;
6193     }
6194     qemu_fprintf(f, "\n");
6195 #ifndef CONFIG_USER_ONLY
6196     qemu_fprintf(f, "%sA7(MSP) = %08x %sA7(USP) = %08x %sA7(ISP) = %08x\n",
6197                  env->current_sp == M68K_SSP ? "->" : "  ", env->sp[M68K_SSP],
6198                  env->current_sp == M68K_USP ? "->" : "  ", env->sp[M68K_USP],
6199                  env->current_sp == M68K_ISP ? "->" : "  ", env->sp[M68K_ISP]);
6200     qemu_fprintf(f, "VBR = 0x%08x\n", env->vbr);
6201     qemu_fprintf(f, "SFC = %x DFC %x\n", env->sfc, env->dfc);
6202     qemu_fprintf(f, "SSW %08x TCR %08x URP %08x SRP %08x\n",
6203                  env->mmu.ssw, env->mmu.tcr, env->mmu.urp, env->mmu.srp);
6204     qemu_fprintf(f, "DTTR0/1: %08x/%08x ITTR0/1: %08x/%08x\n",
6205                  env->mmu.ttr[M68K_DTTR0], env->mmu.ttr[M68K_DTTR1],
6206                  env->mmu.ttr[M68K_ITTR0], env->mmu.ttr[M68K_ITTR1]);
6207     qemu_fprintf(f, "MMUSR %08x, fault at %08x\n",
6208                  env->mmu.mmusr, env->mmu.ar);
6209 #endif /* !CONFIG_USER_ONLY */
6210 }
6211