xref: /qemu/target/mips/tcg/op_helper.c (revision 0debf1400c000154948e8a6fcb89c3149d4e0880)
1 /*
2  *  MIPS emulation helpers for qemu.
3  *
4  *  Copyright (c) 2004-2005 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/memop.h"
27 #include "fpu_helper.h"
28 
29 /*****************************************************************************/
30 /* Exceptions processing helpers */
31 
32 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
33                                 int error_code)
34 {
35     do_raise_exception_err(env, exception, error_code, 0);
36 }
37 
38 void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
39 {
40     do_raise_exception(env, exception, GETPC());
41 }
42 
43 void helper_raise_exception_debug(CPUMIPSState *env)
44 {
45     do_raise_exception(env, EXCP_DEBUG, 0);
46 }
47 
48 static void raise_exception(CPUMIPSState *env, uint32_t exception)
49 {
50     do_raise_exception(env, exception, 0);
51 }
52 
53 /* 64 bits arithmetic for 32 bits hosts */
54 static inline uint64_t get_HILO(CPUMIPSState *env)
55 {
56     return ((uint64_t)(env->active_tc.HI[0]) << 32) |
57            (uint32_t)env->active_tc.LO[0];
58 }
59 
60 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
61 {
62     env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
63     return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
64 }
65 
66 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
67 {
68     target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
69     env->active_tc.HI[0] = (int32_t)(HILO >> 32);
70     return tmp;
71 }
72 
73 /* Multiplication variants of the vr54xx. */
74 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
75                          target_ulong arg2)
76 {
77     return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
78                                  (int64_t)(int32_t)arg2));
79 }
80 
81 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
82                           target_ulong arg2)
83 {
84     return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
85                        (uint64_t)(uint32_t)arg2);
86 }
87 
88 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
89                          target_ulong arg2)
90 {
91     return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
92                        (int64_t)(int32_t)arg2);
93 }
94 
95 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
96                            target_ulong arg2)
97 {
98     return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
99                        (int64_t)(int32_t)arg2);
100 }
101 
102 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
103                           target_ulong arg2)
104 {
105     return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
106                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
107 }
108 
109 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
110                             target_ulong arg2)
111 {
112     return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
113                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
114 }
115 
116 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
117                          target_ulong arg2)
118 {
119     return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
120                        (int64_t)(int32_t)arg2);
121 }
122 
123 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
124                            target_ulong arg2)
125 {
126     return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
127                        (int64_t)(int32_t)arg2);
128 }
129 
130 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
131                           target_ulong arg2)
132 {
133     return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
134                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
135 }
136 
137 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
138                             target_ulong arg2)
139 {
140     return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
141                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
142 }
143 
144 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
145                           target_ulong arg2)
146 {
147     return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
148 }
149 
150 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
151                            target_ulong arg2)
152 {
153     return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
154                        (uint64_t)(uint32_t)arg2);
155 }
156 
157 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
158                            target_ulong arg2)
159 {
160     return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
161                        (int64_t)(int32_t)arg2);
162 }
163 
164 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
165                             target_ulong arg2)
166 {
167     return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
168                        (uint64_t)(uint32_t)arg2);
169 }
170 
171 static inline target_ulong bitswap(target_ulong v)
172 {
173     v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
174               ((v & (target_ulong)0x5555555555555555ULL) << 1);
175     v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
176               ((v & (target_ulong)0x3333333333333333ULL) << 2);
177     v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
178               ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
179     return v;
180 }
181 
182 #ifdef TARGET_MIPS64
183 target_ulong helper_dbitswap(target_ulong rt)
184 {
185     return bitswap(rt);
186 }
187 #endif
188 
189 target_ulong helper_bitswap(target_ulong rt)
190 {
191     return (int32_t)bitswap(rt);
192 }
193 
194 target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
195                         uint32_t stripe)
196 {
197     int i;
198     uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
199     uint64_t tmp1 = tmp0;
200     for (i = 0; i <= 46; i++) {
201         int s;
202         if (i & 0x8) {
203             s = shift;
204         } else {
205             s = shiftx;
206         }
207 
208         if (stripe != 0 && !(i & 0x4)) {
209             s = ~s;
210         }
211         if (s & 0x10) {
212             if (tmp0 & (1LL << (i + 16))) {
213                 tmp1 |= 1LL << i;
214             } else {
215                 tmp1 &= ~(1LL << i);
216             }
217         }
218     }
219 
220     uint64_t tmp2 = tmp1;
221     for (i = 0; i <= 38; i++) {
222         int s;
223         if (i & 0x4) {
224             s = shift;
225         } else {
226             s = shiftx;
227         }
228 
229         if (s & 0x8) {
230             if (tmp1 & (1LL << (i + 8))) {
231                 tmp2 |= 1LL << i;
232             } else {
233                 tmp2 &= ~(1LL << i);
234             }
235         }
236     }
237 
238     uint64_t tmp3 = tmp2;
239     for (i = 0; i <= 34; i++) {
240         int s;
241         if (i & 0x2) {
242             s = shift;
243         } else {
244             s = shiftx;
245         }
246         if (s & 0x4) {
247             if (tmp2 & (1LL << (i + 4))) {
248                 tmp3 |= 1LL << i;
249             } else {
250                 tmp3 &= ~(1LL << i);
251             }
252         }
253     }
254 
255     uint64_t tmp4 = tmp3;
256     for (i = 0; i <= 32; i++) {
257         int s;
258         if (i & 0x1) {
259             s = shift;
260         } else {
261             s = shiftx;
262         }
263         if (s & 0x2) {
264             if (tmp3 & (1LL << (i + 2))) {
265                 tmp4 |= 1LL << i;
266             } else {
267                 tmp4 &= ~(1LL << i);
268             }
269         }
270     }
271 
272     uint64_t tmp5 = tmp4;
273     for (i = 0; i <= 31; i++) {
274         int s;
275         s = shift;
276         if (s & 0x1) {
277             if (tmp4 & (1LL << (i + 1))) {
278                 tmp5 |= 1LL << i;
279             } else {
280                 tmp5 &= ~(1LL << i);
281             }
282         }
283     }
284 
285     return (int64_t)(int32_t)(uint32_t)tmp5;
286 }
287 
288 #ifndef CONFIG_USER_ONLY
289 
290 #define HELPER_LD_ATOMIC(name, insn, almask, do_cast)                         \
291 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx)  \
292 {                                                                             \
293     if (arg & almask) {                                                       \
294         if (!(env->hflags & MIPS_HFLAG_DM)) {                                 \
295             env->CP0_BadVAddr = arg;                                          \
296         }                                                                     \
297         do_raise_exception(env, EXCP_AdEL, GETPC());                          \
298     }                                                                         \
299     env->CP0_LLAddr = cpu_mips_translate_address(env, arg, MMU_DATA_LOAD,     \
300                                                  GETPC());                    \
301     env->lladdr = arg;                                                        \
302     env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC());  \
303     return env->llval;                                                        \
304 }
305 HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t))
306 #ifdef TARGET_MIPS64
307 HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong))
308 #endif
309 #undef HELPER_LD_ATOMIC
310 #endif
311 
312 #ifdef TARGET_WORDS_BIGENDIAN
313 #define GET_LMASK(v) ((v) & 3)
314 #define GET_OFFSET(addr, offset) (addr + (offset))
315 #else
316 #define GET_LMASK(v) (((v) & 3) ^ 3)
317 #define GET_OFFSET(addr, offset) (addr - (offset))
318 #endif
319 
320 void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
321                 int mem_idx)
322 {
323     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
324 
325     if (GET_LMASK(arg2) <= 2) {
326         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16),
327                           mem_idx, GETPC());
328     }
329 
330     if (GET_LMASK(arg2) <= 1) {
331         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8),
332                           mem_idx, GETPC());
333     }
334 
335     if (GET_LMASK(arg2) == 0) {
336         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1,
337                           mem_idx, GETPC());
338     }
339 }
340 
341 void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
342                 int mem_idx)
343 {
344     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
345 
346     if (GET_LMASK(arg2) >= 1) {
347         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
348                           mem_idx, GETPC());
349     }
350 
351     if (GET_LMASK(arg2) >= 2) {
352         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
353                           mem_idx, GETPC());
354     }
355 
356     if (GET_LMASK(arg2) == 3) {
357         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
358                           mem_idx, GETPC());
359     }
360 }
361 
362 #if defined(TARGET_MIPS64)
363 /*
364  * "half" load and stores.  We must do the memory access inline,
365  * or fault handling won't work.
366  */
367 #ifdef TARGET_WORDS_BIGENDIAN
368 #define GET_LMASK64(v) ((v) & 7)
369 #else
370 #define GET_LMASK64(v) (((v) & 7) ^ 7)
371 #endif
372 
373 void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
374                 int mem_idx)
375 {
376     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
377 
378     if (GET_LMASK64(arg2) <= 6) {
379         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48),
380                           mem_idx, GETPC());
381     }
382 
383     if (GET_LMASK64(arg2) <= 5) {
384         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40),
385                           mem_idx, GETPC());
386     }
387 
388     if (GET_LMASK64(arg2) <= 4) {
389         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32),
390                           mem_idx, GETPC());
391     }
392 
393     if (GET_LMASK64(arg2) <= 3) {
394         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24),
395                           mem_idx, GETPC());
396     }
397 
398     if (GET_LMASK64(arg2) <= 2) {
399         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16),
400                           mem_idx, GETPC());
401     }
402 
403     if (GET_LMASK64(arg2) <= 1) {
404         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8),
405                           mem_idx, GETPC());
406     }
407 
408     if (GET_LMASK64(arg2) <= 0) {
409         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1,
410                           mem_idx, GETPC());
411     }
412 }
413 
414 void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
415                 int mem_idx)
416 {
417     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
418 
419     if (GET_LMASK64(arg2) >= 1) {
420         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
421                           mem_idx, GETPC());
422     }
423 
424     if (GET_LMASK64(arg2) >= 2) {
425         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
426                           mem_idx, GETPC());
427     }
428 
429     if (GET_LMASK64(arg2) >= 3) {
430         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
431                           mem_idx, GETPC());
432     }
433 
434     if (GET_LMASK64(arg2) >= 4) {
435         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32),
436                           mem_idx, GETPC());
437     }
438 
439     if (GET_LMASK64(arg2) >= 5) {
440         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40),
441                           mem_idx, GETPC());
442     }
443 
444     if (GET_LMASK64(arg2) >= 6) {
445         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48),
446                           mem_idx, GETPC());
447     }
448 
449     if (GET_LMASK64(arg2) == 7) {
450         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56),
451                           mem_idx, GETPC());
452     }
453 }
454 #endif /* TARGET_MIPS64 */
455 
456 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
457 
458 void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
459                 uint32_t mem_idx)
460 {
461     target_ulong base_reglist = reglist & 0xf;
462     target_ulong do_r31 = reglist & 0x10;
463 
464     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
465         target_ulong i;
466 
467         for (i = 0; i < base_reglist; i++) {
468             env->active_tc.gpr[multiple_regs[i]] =
469                 (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
470             addr += 4;
471         }
472     }
473 
474     if (do_r31) {
475         env->active_tc.gpr[31] =
476             (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
477     }
478 }
479 
480 void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
481                 uint32_t mem_idx)
482 {
483     target_ulong base_reglist = reglist & 0xf;
484     target_ulong do_r31 = reglist & 0x10;
485 
486     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
487         target_ulong i;
488 
489         for (i = 0; i < base_reglist; i++) {
490             cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
491                               mem_idx, GETPC());
492             addr += 4;
493         }
494     }
495 
496     if (do_r31) {
497         cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
498     }
499 }
500 
501 #if defined(TARGET_MIPS64)
502 void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
503                 uint32_t mem_idx)
504 {
505     target_ulong base_reglist = reglist & 0xf;
506     target_ulong do_r31 = reglist & 0x10;
507 
508     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
509         target_ulong i;
510 
511         for (i = 0; i < base_reglist; i++) {
512             env->active_tc.gpr[multiple_regs[i]] =
513                 cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
514             addr += 8;
515         }
516     }
517 
518     if (do_r31) {
519         env->active_tc.gpr[31] =
520             cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
521     }
522 }
523 
524 void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
525                 uint32_t mem_idx)
526 {
527     target_ulong base_reglist = reglist & 0xf;
528     target_ulong do_r31 = reglist & 0x10;
529 
530     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
531         target_ulong i;
532 
533         for (i = 0; i < base_reglist; i++) {
534             cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
535                               mem_idx, GETPC());
536             addr += 8;
537         }
538     }
539 
540     if (do_r31) {
541         cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
542     }
543 }
544 #endif
545 
546 
547 void helper_fork(target_ulong arg1, target_ulong arg2)
548 {
549     /*
550      * arg1 = rt, arg2 = rs
551      * TODO: store to TC register
552      */
553 }
554 
555 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
556 {
557     target_long arg1 = arg;
558 
559     if (arg1 < 0) {
560         /* No scheduling policy implemented. */
561         if (arg1 != -2) {
562             if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
563                 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
564                 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
565                 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
566                 do_raise_exception(env, EXCP_THREAD, GETPC());
567             }
568         }
569     } else if (arg1 == 0) {
570         if (0) {
571             /* TODO: TC underflow */
572             env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
573             do_raise_exception(env, EXCP_THREAD, GETPC());
574         } else {
575             /* TODO: Deallocate TC */
576         }
577     } else if (arg1 > 0) {
578         /* Yield qualifier inputs not implemented. */
579         env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
580         env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
581         do_raise_exception(env, EXCP_THREAD, GETPC());
582     }
583     return env->CP0_YQMask;
584 }
585 
586 #ifndef CONFIG_USER_ONLY
587 /* TLB management */
588 static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
589 {
590     /* Discard entries from env->tlb[first] onwards.  */
591     while (env->tlb->tlb_in_use > first) {
592         r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
593     }
594 }
595 
596 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
597 {
598 #if defined(TARGET_MIPS64)
599     return extract64(entrylo, 6, 54);
600 #else
601     return extract64(entrylo, 6, 24) | /* PFN */
602            (extract64(entrylo, 32, 32) << 24); /* PFNX */
603 #endif
604 }
605 
606 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
607 {
608     r4k_tlb_t *tlb;
609     uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
610 
611     /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
612     tlb = &env->tlb->mmu.r4k.tlb[idx];
613     if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
614         tlb->EHINV = 1;
615         return;
616     }
617     tlb->EHINV = 0;
618     tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
619 #if defined(TARGET_MIPS64)
620     tlb->VPN &= env->SEGMask;
621 #endif
622     tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
623     tlb->MMID = env->CP0_MemoryMapID;
624     tlb->PageMask = env->CP0_PageMask;
625     tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
626     tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
627     tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
628     tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
629     tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
630     tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
631     tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
632     tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
633     tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
634     tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
635     tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
636     tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
637     tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
638 }
639 
640 void r4k_helper_tlbinv(CPUMIPSState *env)
641 {
642     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
643     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
644     uint32_t MMID = env->CP0_MemoryMapID;
645     uint32_t tlb_mmid;
646     r4k_tlb_t *tlb;
647     int idx;
648 
649     MMID = mi ? MMID : (uint32_t) ASID;
650     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
651         tlb = &env->tlb->mmu.r4k.tlb[idx];
652         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
653         if (!tlb->G && tlb_mmid == MMID) {
654             tlb->EHINV = 1;
655         }
656     }
657     cpu_mips_tlb_flush(env);
658 }
659 
660 void r4k_helper_tlbinvf(CPUMIPSState *env)
661 {
662     int idx;
663 
664     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
665         env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
666     }
667     cpu_mips_tlb_flush(env);
668 }
669 
670 void r4k_helper_tlbwi(CPUMIPSState *env)
671 {
672     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
673     target_ulong VPN;
674     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
675     uint32_t MMID = env->CP0_MemoryMapID;
676     uint32_t tlb_mmid;
677     bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
678     r4k_tlb_t *tlb;
679     int idx;
680 
681     MMID = mi ? MMID : (uint32_t) ASID;
682 
683     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
684     tlb = &env->tlb->mmu.r4k.tlb[idx];
685     VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
686 #if defined(TARGET_MIPS64)
687     VPN &= env->SEGMask;
688 #endif
689     EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
690     G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
691     V0 = (env->CP0_EntryLo0 & 2) != 0;
692     D0 = (env->CP0_EntryLo0 & 4) != 0;
693     XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
694     RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
695     V1 = (env->CP0_EntryLo1 & 2) != 0;
696     D1 = (env->CP0_EntryLo1 & 4) != 0;
697     XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
698     RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
699 
700     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
701     /*
702      * Discard cached TLB entries, unless tlbwi is just upgrading access
703      * permissions on the current entry.
704      */
705     if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
706         (!tlb->EHINV && EHINV) ||
707         (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
708         (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
709         (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
710         (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
711         r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
712     }
713 
714     r4k_invalidate_tlb(env, idx, 0);
715     r4k_fill_tlb(env, idx);
716 }
717 
718 void r4k_helper_tlbwr(CPUMIPSState *env)
719 {
720     int r = cpu_mips_get_random(env);
721 
722     r4k_invalidate_tlb(env, r, 1);
723     r4k_fill_tlb(env, r);
724 }
725 
726 void r4k_helper_tlbp(CPUMIPSState *env)
727 {
728     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
729     r4k_tlb_t *tlb;
730     target_ulong mask;
731     target_ulong tag;
732     target_ulong VPN;
733     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
734     uint32_t MMID = env->CP0_MemoryMapID;
735     uint32_t tlb_mmid;
736     int i;
737 
738     MMID = mi ? MMID : (uint32_t) ASID;
739     for (i = 0; i < env->tlb->nb_tlb; i++) {
740         tlb = &env->tlb->mmu.r4k.tlb[i];
741         /* 1k pages are not supported. */
742         mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
743         tag = env->CP0_EntryHi & ~mask;
744         VPN = tlb->VPN & ~mask;
745 #if defined(TARGET_MIPS64)
746         tag &= env->SEGMask;
747 #endif
748         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
749         /* Check ASID/MMID, virtual page number & size */
750         if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
751             /* TLB match */
752             env->CP0_Index = i;
753             break;
754         }
755     }
756     if (i == env->tlb->nb_tlb) {
757         /* No match.  Discard any shadow entries, if any of them match.  */
758         for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
759             tlb = &env->tlb->mmu.r4k.tlb[i];
760             /* 1k pages are not supported. */
761             mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
762             tag = env->CP0_EntryHi & ~mask;
763             VPN = tlb->VPN & ~mask;
764 #if defined(TARGET_MIPS64)
765             tag &= env->SEGMask;
766 #endif
767             tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
768             /* Check ASID/MMID, virtual page number & size */
769             if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
770                 r4k_mips_tlb_flush_extra(env, i);
771                 break;
772             }
773         }
774 
775         env->CP0_Index |= 0x80000000;
776     }
777 }
778 
779 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
780 {
781 #if defined(TARGET_MIPS64)
782     return tlb_pfn << 6;
783 #else
784     return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
785            (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
786 #endif
787 }
788 
789 void r4k_helper_tlbr(CPUMIPSState *env)
790 {
791     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
792     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
793     uint32_t MMID = env->CP0_MemoryMapID;
794     uint32_t tlb_mmid;
795     r4k_tlb_t *tlb;
796     int idx;
797 
798     MMID = mi ? MMID : (uint32_t) ASID;
799     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
800     tlb = &env->tlb->mmu.r4k.tlb[idx];
801 
802     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
803     /* If this will change the current ASID/MMID, flush qemu's TLB.  */
804     if (MMID != tlb_mmid) {
805         cpu_mips_tlb_flush(env);
806     }
807 
808     r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
809 
810     if (tlb->EHINV) {
811         env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
812         env->CP0_PageMask = 0;
813         env->CP0_EntryLo0 = 0;
814         env->CP0_EntryLo1 = 0;
815     } else {
816         env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
817         env->CP0_MemoryMapID = tlb->MMID;
818         env->CP0_PageMask = tlb->PageMask;
819         env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
820                         ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
821                         ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
822                         get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
823         env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
824                         ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
825                         ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
826                         get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
827     }
828 }
829 
830 void helper_tlbwi(CPUMIPSState *env)
831 {
832     env->tlb->helper_tlbwi(env);
833 }
834 
835 void helper_tlbwr(CPUMIPSState *env)
836 {
837     env->tlb->helper_tlbwr(env);
838 }
839 
840 void helper_tlbp(CPUMIPSState *env)
841 {
842     env->tlb->helper_tlbp(env);
843 }
844 
845 void helper_tlbr(CPUMIPSState *env)
846 {
847     env->tlb->helper_tlbr(env);
848 }
849 
850 void helper_tlbinv(CPUMIPSState *env)
851 {
852     env->tlb->helper_tlbinv(env);
853 }
854 
855 void helper_tlbinvf(CPUMIPSState *env)
856 {
857     env->tlb->helper_tlbinvf(env);
858 }
859 
860 static void global_invalidate_tlb(CPUMIPSState *env,
861                            uint32_t invMsgVPN2,
862                            uint8_t invMsgR,
863                            uint32_t invMsgMMid,
864                            bool invAll,
865                            bool invVAMMid,
866                            bool invMMid,
867                            bool invVA)
868 {
869 
870     int idx;
871     r4k_tlb_t *tlb;
872     bool VAMatch;
873     bool MMidMatch;
874 
875     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
876         tlb = &env->tlb->mmu.r4k.tlb[idx];
877         VAMatch =
878             (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
879 #ifdef TARGET_MIPS64
880             &&
881             (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
882 #endif
883             );
884         MMidMatch = tlb->MMID == invMsgMMid;
885         if ((invAll && (idx > env->CP0_Wired)) ||
886             (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
887             (VAMatch && invVA) ||
888             (MMidMatch && !(tlb->G) && invMMid)) {
889             tlb->EHINV = 1;
890         }
891     }
892     cpu_mips_tlb_flush(env);
893 }
894 
895 void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
896 {
897     bool invAll = type == 0;
898     bool invVA = type == 1;
899     bool invMMid = type == 2;
900     bool invVAMMid = type == 3;
901     uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
902     uint8_t invMsgR = 0;
903     uint32_t invMsgMMid = env->CP0_MemoryMapID;
904     CPUState *other_cs = first_cpu;
905 
906 #ifdef TARGET_MIPS64
907     invMsgR = extract64(arg, 62, 2);
908 #endif
909 
910     CPU_FOREACH(other_cs) {
911         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
912         global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
913                               invAll, invVAMMid, invMMid, invVA);
914     }
915 }
916 
917 /* Specials */
918 target_ulong helper_di(CPUMIPSState *env)
919 {
920     target_ulong t0 = env->CP0_Status;
921 
922     env->CP0_Status = t0 & ~(1 << CP0St_IE);
923     return t0;
924 }
925 
926 target_ulong helper_ei(CPUMIPSState *env)
927 {
928     target_ulong t0 = env->CP0_Status;
929 
930     env->CP0_Status = t0 | (1 << CP0St_IE);
931     return t0;
932 }
933 
934 static void debug_pre_eret(CPUMIPSState *env)
935 {
936     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
937         qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
938                 env->active_tc.PC, env->CP0_EPC);
939         if (env->CP0_Status & (1 << CP0St_ERL)) {
940             qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
941         }
942         if (env->hflags & MIPS_HFLAG_DM) {
943             qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
944         }
945         qemu_log("\n");
946     }
947 }
948 
949 static void debug_post_eret(CPUMIPSState *env)
950 {
951     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
952         qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
953                 env->active_tc.PC, env->CP0_EPC);
954         if (env->CP0_Status & (1 << CP0St_ERL)) {
955             qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
956         }
957         if (env->hflags & MIPS_HFLAG_DM) {
958             qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
959         }
960         switch (cpu_mmu_index(env, false)) {
961         case 3:
962             qemu_log(", ERL\n");
963             break;
964         case MIPS_HFLAG_UM:
965             qemu_log(", UM\n");
966             break;
967         case MIPS_HFLAG_SM:
968             qemu_log(", SM\n");
969             break;
970         case MIPS_HFLAG_KM:
971             qemu_log("\n");
972             break;
973         default:
974             cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
975             break;
976         }
977     }
978 }
979 
980 static inline void exception_return(CPUMIPSState *env)
981 {
982     debug_pre_eret(env);
983     if (env->CP0_Status & (1 << CP0St_ERL)) {
984         mips_env_set_pc(env, env->CP0_ErrorEPC);
985         env->CP0_Status &= ~(1 << CP0St_ERL);
986     } else {
987         mips_env_set_pc(env, env->CP0_EPC);
988         env->CP0_Status &= ~(1 << CP0St_EXL);
989     }
990     compute_hflags(env);
991     debug_post_eret(env);
992 }
993 
994 void helper_eret(CPUMIPSState *env)
995 {
996     exception_return(env);
997     env->CP0_LLAddr = 1;
998     env->lladdr = 1;
999 }
1000 
1001 void helper_eretnc(CPUMIPSState *env)
1002 {
1003     exception_return(env);
1004 }
1005 
1006 void helper_deret(CPUMIPSState *env)
1007 {
1008     debug_pre_eret(env);
1009 
1010     env->hflags &= ~MIPS_HFLAG_DM;
1011     compute_hflags(env);
1012 
1013     mips_env_set_pc(env, env->CP0_DEPC);
1014 
1015     debug_post_eret(env);
1016 }
1017 #endif /* !CONFIG_USER_ONLY */
1018 
1019 static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
1020 {
1021     if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
1022         return;
1023     }
1024     do_raise_exception(env, EXCP_RI, pc);
1025 }
1026 
1027 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
1028 {
1029     check_hwrena(env, 0, GETPC());
1030     return env->CP0_EBase & 0x3ff;
1031 }
1032 
1033 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
1034 {
1035     check_hwrena(env, 1, GETPC());
1036     return env->SYNCI_Step;
1037 }
1038 
1039 target_ulong helper_rdhwr_cc(CPUMIPSState *env)
1040 {
1041     check_hwrena(env, 2, GETPC());
1042 #ifdef CONFIG_USER_ONLY
1043     return env->CP0_Count;
1044 #else
1045     return (int32_t)cpu_mips_get_count(env);
1046 #endif
1047 }
1048 
1049 target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
1050 {
1051     check_hwrena(env, 3, GETPC());
1052     return env->CCRes;
1053 }
1054 
1055 target_ulong helper_rdhwr_performance(CPUMIPSState *env)
1056 {
1057     check_hwrena(env, 4, GETPC());
1058     return env->CP0_Performance0;
1059 }
1060 
1061 target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
1062 {
1063     check_hwrena(env, 5, GETPC());
1064     return (env->CP0_Config5 >> CP0C5_XNP) & 1;
1065 }
1066 
1067 void helper_pmon(CPUMIPSState *env, int function)
1068 {
1069     function /= 2;
1070     switch (function) {
1071     case 2: /* TODO: char inbyte(int waitflag); */
1072         if (env->active_tc.gpr[4] == 0) {
1073             env->active_tc.gpr[2] = -1;
1074         }
1075         /* Fall through */
1076     case 11: /* TODO: char inbyte (void); */
1077         env->active_tc.gpr[2] = -1;
1078         break;
1079     case 3:
1080     case 12:
1081         printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1082         break;
1083     case 17:
1084         break;
1085     case 158:
1086         {
1087             unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
1088             printf("%s", fmt);
1089         }
1090         break;
1091     }
1092 }
1093 
1094 void helper_wait(CPUMIPSState *env)
1095 {
1096     CPUState *cs = env_cpu(env);
1097 
1098     cs->halted = 1;
1099     cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
1100     /*
1101      * Last instruction in the block, PC was updated before
1102      * - no need to recover PC and icount.
1103      */
1104     raise_exception(env, EXCP_HLT);
1105 }
1106 
1107 #if !defined(CONFIG_USER_ONLY)
1108 
1109 void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1110                                   MMUAccessType access_type,
1111                                   int mmu_idx, uintptr_t retaddr)
1112 {
1113     MIPSCPU *cpu = MIPS_CPU(cs);
1114     CPUMIPSState *env = &cpu->env;
1115     int error_code = 0;
1116     int excp;
1117 
1118     if (!(env->hflags & MIPS_HFLAG_DM)) {
1119         env->CP0_BadVAddr = addr;
1120     }
1121 
1122     if (access_type == MMU_DATA_STORE) {
1123         excp = EXCP_AdES;
1124     } else {
1125         excp = EXCP_AdEL;
1126         if (access_type == MMU_INST_FETCH) {
1127             error_code |= EXCP_INST_NOTAVAIL;
1128         }
1129     }
1130 
1131     do_raise_exception_err(env, excp, error_code, retaddr);
1132 }
1133 
1134 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1135                                     vaddr addr, unsigned size,
1136                                     MMUAccessType access_type,
1137                                     int mmu_idx, MemTxAttrs attrs,
1138                                     MemTxResult response, uintptr_t retaddr)
1139 {
1140     MIPSCPU *cpu = MIPS_CPU(cs);
1141     CPUMIPSState *env = &cpu->env;
1142 
1143     if (access_type == MMU_INST_FETCH) {
1144         do_raise_exception(env, EXCP_IBE, retaddr);
1145     } else {
1146         do_raise_exception(env, EXCP_DBE, retaddr);
1147     }
1148 }
1149 #endif /* !CONFIG_USER_ONLY */
1150 
1151 void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
1152 {
1153 #ifndef CONFIG_USER_ONLY
1154     static const char *const type_name[] = {
1155         "Primary Instruction",
1156         "Primary Data or Unified Primary",
1157         "Tertiary",
1158         "Secondary"
1159     };
1160     uint32_t cache_type = extract32(op, 0, 2);
1161     uint32_t cache_operation = extract32(op, 2, 3);
1162     target_ulong index = addr & 0x1fffffff;
1163 
1164     switch (cache_operation) {
1165     case 0b010: /* Index Store Tag */
1166         memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
1167                                      MO_64, MEMTXATTRS_UNSPECIFIED);
1168         break;
1169     case 0b001: /* Index Load Tag */
1170         memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
1171                                     MO_64, MEMTXATTRS_UNSPECIFIED);
1172         break;
1173     case 0b000: /* Index Invalidate */
1174     case 0b100: /* Hit Invalidate */
1175     case 0b110: /* Hit Writeback */
1176         /* no-op */
1177         break;
1178     default:
1179         qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n",
1180                       cache_operation, type_name[cache_type]);
1181         break;
1182     }
1183 #endif
1184 }
1185