xref: /qemu/target/mips/tcg/op_helper.c (revision 533fc64feb96b6aafdb0d604cd1cd97877451878)
1 /*
2  *  MIPS emulation helpers for qemu.
3  *
4  *  Copyright (c) 2004-2005 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/memop.h"
27 #include "fpu_helper.h"
28 
29 /*****************************************************************************/
30 /* Exceptions processing helpers */
31 
32 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
33                                 int error_code)
34 {
35     do_raise_exception_err(env, exception, error_code, 0);
36 }
37 
38 void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
39 {
40     do_raise_exception(env, exception, GETPC());
41 }
42 
43 void helper_raise_exception_debug(CPUMIPSState *env)
44 {
45     do_raise_exception(env, EXCP_DEBUG, 0);
46 }
47 
48 static void raise_exception(CPUMIPSState *env, uint32_t exception)
49 {
50     do_raise_exception(env, exception, 0);
51 }
52 
53 /* 64 bits arithmetic for 32 bits hosts */
54 static inline uint64_t get_HILO(CPUMIPSState *env)
55 {
56     return ((uint64_t)(env->active_tc.HI[0]) << 32) |
57            (uint32_t)env->active_tc.LO[0];
58 }
59 
60 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
61 {
62     env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
63     return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
64 }
65 
66 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
67 {
68     target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
69     env->active_tc.HI[0] = (int32_t)(HILO >> 32);
70     return tmp;
71 }
72 
73 /* Multiplication variants of the vr54xx. */
74 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
75                          target_ulong arg2)
76 {
77     return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
78                                  (int64_t)(int32_t)arg2));
79 }
80 
81 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
82                           target_ulong arg2)
83 {
84     return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
85                        (uint64_t)(uint32_t)arg2);
86 }
87 
88 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
89                          target_ulong arg2)
90 {
91     return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
92                        (int64_t)(int32_t)arg2);
93 }
94 
95 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
96                            target_ulong arg2)
97 {
98     return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
99                        (int64_t)(int32_t)arg2);
100 }
101 
102 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
103                           target_ulong arg2)
104 {
105     return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
106                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
107 }
108 
109 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
110                             target_ulong arg2)
111 {
112     return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
113                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
114 }
115 
116 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
117                          target_ulong arg2)
118 {
119     return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
120                        (int64_t)(int32_t)arg2);
121 }
122 
123 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
124                            target_ulong arg2)
125 {
126     return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
127                        (int64_t)(int32_t)arg2);
128 }
129 
130 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
131                           target_ulong arg2)
132 {
133     return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
134                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
135 }
136 
137 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
138                             target_ulong arg2)
139 {
140     return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
141                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
142 }
143 
144 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
145                           target_ulong arg2)
146 {
147     return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
148 }
149 
150 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
151                            target_ulong arg2)
152 {
153     return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
154                        (uint64_t)(uint32_t)arg2);
155 }
156 
157 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
158                            target_ulong arg2)
159 {
160     return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
161                        (int64_t)(int32_t)arg2);
162 }
163 
164 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
165                             target_ulong arg2)
166 {
167     return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
168                        (uint64_t)(uint32_t)arg2);
169 }
170 
171 static inline target_ulong bitswap(target_ulong v)
172 {
173     v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
174               ((v & (target_ulong)0x5555555555555555ULL) << 1);
175     v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
176               ((v & (target_ulong)0x3333333333333333ULL) << 2);
177     v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
178               ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
179     return v;
180 }
181 
182 #ifdef TARGET_MIPS64
183 target_ulong helper_dbitswap(target_ulong rt)
184 {
185     return bitswap(rt);
186 }
187 #endif
188 
189 target_ulong helper_bitswap(target_ulong rt)
190 {
191     return (int32_t)bitswap(rt);
192 }
193 
194 target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
195                         uint32_t stripe)
196 {
197     int i;
198     uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
199     uint64_t tmp1 = tmp0;
200     for (i = 0; i <= 46; i++) {
201         int s;
202         if (i & 0x8) {
203             s = shift;
204         } else {
205             s = shiftx;
206         }
207 
208         if (stripe != 0 && !(i & 0x4)) {
209             s = ~s;
210         }
211         if (s & 0x10) {
212             if (tmp0 & (1LL << (i + 16))) {
213                 tmp1 |= 1LL << i;
214             } else {
215                 tmp1 &= ~(1LL << i);
216             }
217         }
218     }
219 
220     uint64_t tmp2 = tmp1;
221     for (i = 0; i <= 38; i++) {
222         int s;
223         if (i & 0x4) {
224             s = shift;
225         } else {
226             s = shiftx;
227         }
228 
229         if (s & 0x8) {
230             if (tmp1 & (1LL << (i + 8))) {
231                 tmp2 |= 1LL << i;
232             } else {
233                 tmp2 &= ~(1LL << i);
234             }
235         }
236     }
237 
238     uint64_t tmp3 = tmp2;
239     for (i = 0; i <= 34; i++) {
240         int s;
241         if (i & 0x2) {
242             s = shift;
243         } else {
244             s = shiftx;
245         }
246         if (s & 0x4) {
247             if (tmp2 & (1LL << (i + 4))) {
248                 tmp3 |= 1LL << i;
249             } else {
250                 tmp3 &= ~(1LL << i);
251             }
252         }
253     }
254 
255     uint64_t tmp4 = tmp3;
256     for (i = 0; i <= 32; i++) {
257         int s;
258         if (i & 0x1) {
259             s = shift;
260         } else {
261             s = shiftx;
262         }
263         if (s & 0x2) {
264             if (tmp3 & (1LL << (i + 2))) {
265                 tmp4 |= 1LL << i;
266             } else {
267                 tmp4 &= ~(1LL << i);
268             }
269         }
270     }
271 
272     uint64_t tmp5 = tmp4;
273     for (i = 0; i <= 31; i++) {
274         int s;
275         s = shift;
276         if (s & 0x1) {
277             if (tmp4 & (1LL << (i + 1))) {
278                 tmp5 |= 1LL << i;
279             } else {
280                 tmp5 &= ~(1LL << i);
281             }
282         }
283     }
284 
285     return (int64_t)(int32_t)(uint32_t)tmp5;
286 }
287 
288 #ifndef CONFIG_USER_ONLY
289 
290 static inline hwaddr do_translate_address(CPUMIPSState *env,
291                                           target_ulong address,
292                                           MMUAccessType access_type,
293                                           uintptr_t retaddr)
294 {
295     hwaddr paddr;
296     CPUState *cs = env_cpu(env);
297 
298     paddr = cpu_mips_translate_address(env, address, access_type);
299 
300     if (paddr == -1LL) {
301         cpu_loop_exit_restore(cs, retaddr);
302     } else {
303         return paddr;
304     }
305 }
306 
307 #define HELPER_LD_ATOMIC(name, insn, almask, do_cast)                         \
308 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx)  \
309 {                                                                             \
310     if (arg & almask) {                                                       \
311         if (!(env->hflags & MIPS_HFLAG_DM)) {                                 \
312             env->CP0_BadVAddr = arg;                                          \
313         }                                                                     \
314         do_raise_exception(env, EXCP_AdEL, GETPC());                          \
315     }                                                                         \
316     env->CP0_LLAddr = do_translate_address(env, arg, MMU_DATA_LOAD, GETPC()); \
317     env->lladdr = arg;                                                        \
318     env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC());  \
319     return env->llval;                                                        \
320 }
321 HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t))
322 #ifdef TARGET_MIPS64
323 HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong))
324 #endif
325 #undef HELPER_LD_ATOMIC
326 #endif
327 
328 #ifdef TARGET_WORDS_BIGENDIAN
329 #define GET_LMASK(v) ((v) & 3)
330 #define GET_OFFSET(addr, offset) (addr + (offset))
331 #else
332 #define GET_LMASK(v) (((v) & 3) ^ 3)
333 #define GET_OFFSET(addr, offset) (addr - (offset))
334 #endif
335 
336 void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
337                 int mem_idx)
338 {
339     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
340 
341     if (GET_LMASK(arg2) <= 2) {
342         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16),
343                           mem_idx, GETPC());
344     }
345 
346     if (GET_LMASK(arg2) <= 1) {
347         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8),
348                           mem_idx, GETPC());
349     }
350 
351     if (GET_LMASK(arg2) == 0) {
352         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1,
353                           mem_idx, GETPC());
354     }
355 }
356 
357 void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
358                 int mem_idx)
359 {
360     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
361 
362     if (GET_LMASK(arg2) >= 1) {
363         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
364                           mem_idx, GETPC());
365     }
366 
367     if (GET_LMASK(arg2) >= 2) {
368         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
369                           mem_idx, GETPC());
370     }
371 
372     if (GET_LMASK(arg2) == 3) {
373         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
374                           mem_idx, GETPC());
375     }
376 }
377 
378 #if defined(TARGET_MIPS64)
379 /*
380  * "half" load and stores.  We must do the memory access inline,
381  * or fault handling won't work.
382  */
383 #ifdef TARGET_WORDS_BIGENDIAN
384 #define GET_LMASK64(v) ((v) & 7)
385 #else
386 #define GET_LMASK64(v) (((v) & 7) ^ 7)
387 #endif
388 
389 void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
390                 int mem_idx)
391 {
392     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
393 
394     if (GET_LMASK64(arg2) <= 6) {
395         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48),
396                           mem_idx, GETPC());
397     }
398 
399     if (GET_LMASK64(arg2) <= 5) {
400         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40),
401                           mem_idx, GETPC());
402     }
403 
404     if (GET_LMASK64(arg2) <= 4) {
405         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32),
406                           mem_idx, GETPC());
407     }
408 
409     if (GET_LMASK64(arg2) <= 3) {
410         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24),
411                           mem_idx, GETPC());
412     }
413 
414     if (GET_LMASK64(arg2) <= 2) {
415         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16),
416                           mem_idx, GETPC());
417     }
418 
419     if (GET_LMASK64(arg2) <= 1) {
420         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8),
421                           mem_idx, GETPC());
422     }
423 
424     if (GET_LMASK64(arg2) <= 0) {
425         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1,
426                           mem_idx, GETPC());
427     }
428 }
429 
430 void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
431                 int mem_idx)
432 {
433     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
434 
435     if (GET_LMASK64(arg2) >= 1) {
436         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
437                           mem_idx, GETPC());
438     }
439 
440     if (GET_LMASK64(arg2) >= 2) {
441         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
442                           mem_idx, GETPC());
443     }
444 
445     if (GET_LMASK64(arg2) >= 3) {
446         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
447                           mem_idx, GETPC());
448     }
449 
450     if (GET_LMASK64(arg2) >= 4) {
451         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32),
452                           mem_idx, GETPC());
453     }
454 
455     if (GET_LMASK64(arg2) >= 5) {
456         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40),
457                           mem_idx, GETPC());
458     }
459 
460     if (GET_LMASK64(arg2) >= 6) {
461         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48),
462                           mem_idx, GETPC());
463     }
464 
465     if (GET_LMASK64(arg2) == 7) {
466         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56),
467                           mem_idx, GETPC());
468     }
469 }
470 #endif /* TARGET_MIPS64 */
471 
472 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
473 
474 void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
475                 uint32_t mem_idx)
476 {
477     target_ulong base_reglist = reglist & 0xf;
478     target_ulong do_r31 = reglist & 0x10;
479 
480     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
481         target_ulong i;
482 
483         for (i = 0; i < base_reglist; i++) {
484             env->active_tc.gpr[multiple_regs[i]] =
485                 (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
486             addr += 4;
487         }
488     }
489 
490     if (do_r31) {
491         env->active_tc.gpr[31] =
492             (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
493     }
494 }
495 
496 void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
497                 uint32_t mem_idx)
498 {
499     target_ulong base_reglist = reglist & 0xf;
500     target_ulong do_r31 = reglist & 0x10;
501 
502     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
503         target_ulong i;
504 
505         for (i = 0; i < base_reglist; i++) {
506             cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
507                               mem_idx, GETPC());
508             addr += 4;
509         }
510     }
511 
512     if (do_r31) {
513         cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
514     }
515 }
516 
517 #if defined(TARGET_MIPS64)
518 void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
519                 uint32_t mem_idx)
520 {
521     target_ulong base_reglist = reglist & 0xf;
522     target_ulong do_r31 = reglist & 0x10;
523 
524     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
525         target_ulong i;
526 
527         for (i = 0; i < base_reglist; i++) {
528             env->active_tc.gpr[multiple_regs[i]] =
529                 cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
530             addr += 8;
531         }
532     }
533 
534     if (do_r31) {
535         env->active_tc.gpr[31] =
536             cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
537     }
538 }
539 
540 void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
541                 uint32_t mem_idx)
542 {
543     target_ulong base_reglist = reglist & 0xf;
544     target_ulong do_r31 = reglist & 0x10;
545 
546     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
547         target_ulong i;
548 
549         for (i = 0; i < base_reglist; i++) {
550             cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
551                               mem_idx, GETPC());
552             addr += 8;
553         }
554     }
555 
556     if (do_r31) {
557         cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
558     }
559 }
560 #endif
561 
562 
563 void helper_fork(target_ulong arg1, target_ulong arg2)
564 {
565     /*
566      * arg1 = rt, arg2 = rs
567      * TODO: store to TC register
568      */
569 }
570 
571 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
572 {
573     target_long arg1 = arg;
574 
575     if (arg1 < 0) {
576         /* No scheduling policy implemented. */
577         if (arg1 != -2) {
578             if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
579                 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
580                 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
581                 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
582                 do_raise_exception(env, EXCP_THREAD, GETPC());
583             }
584         }
585     } else if (arg1 == 0) {
586         if (0) {
587             /* TODO: TC underflow */
588             env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
589             do_raise_exception(env, EXCP_THREAD, GETPC());
590         } else {
591             /* TODO: Deallocate TC */
592         }
593     } else if (arg1 > 0) {
594         /* Yield qualifier inputs not implemented. */
595         env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
596         env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
597         do_raise_exception(env, EXCP_THREAD, GETPC());
598     }
599     return env->CP0_YQMask;
600 }
601 
602 #ifndef CONFIG_USER_ONLY
603 /* TLB management */
604 static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
605 {
606     /* Discard entries from env->tlb[first] onwards.  */
607     while (env->tlb->tlb_in_use > first) {
608         r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
609     }
610 }
611 
612 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
613 {
614 #if defined(TARGET_MIPS64)
615     return extract64(entrylo, 6, 54);
616 #else
617     return extract64(entrylo, 6, 24) | /* PFN */
618            (extract64(entrylo, 32, 32) << 24); /* PFNX */
619 #endif
620 }
621 
622 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
623 {
624     r4k_tlb_t *tlb;
625     uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
626 
627     /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
628     tlb = &env->tlb->mmu.r4k.tlb[idx];
629     if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
630         tlb->EHINV = 1;
631         return;
632     }
633     tlb->EHINV = 0;
634     tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
635 #if defined(TARGET_MIPS64)
636     tlb->VPN &= env->SEGMask;
637 #endif
638     tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
639     tlb->MMID = env->CP0_MemoryMapID;
640     tlb->PageMask = env->CP0_PageMask;
641     tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
642     tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
643     tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
644     tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
645     tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
646     tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
647     tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
648     tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
649     tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
650     tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
651     tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
652     tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
653     tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
654 }
655 
656 void r4k_helper_tlbinv(CPUMIPSState *env)
657 {
658     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
659     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
660     uint32_t MMID = env->CP0_MemoryMapID;
661     uint32_t tlb_mmid;
662     r4k_tlb_t *tlb;
663     int idx;
664 
665     MMID = mi ? MMID : (uint32_t) ASID;
666     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
667         tlb = &env->tlb->mmu.r4k.tlb[idx];
668         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
669         if (!tlb->G && tlb_mmid == MMID) {
670             tlb->EHINV = 1;
671         }
672     }
673     cpu_mips_tlb_flush(env);
674 }
675 
676 void r4k_helper_tlbinvf(CPUMIPSState *env)
677 {
678     int idx;
679 
680     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
681         env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
682     }
683     cpu_mips_tlb_flush(env);
684 }
685 
686 void r4k_helper_tlbwi(CPUMIPSState *env)
687 {
688     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
689     target_ulong VPN;
690     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
691     uint32_t MMID = env->CP0_MemoryMapID;
692     uint32_t tlb_mmid;
693     bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
694     r4k_tlb_t *tlb;
695     int idx;
696 
697     MMID = mi ? MMID : (uint32_t) ASID;
698 
699     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
700     tlb = &env->tlb->mmu.r4k.tlb[idx];
701     VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
702 #if defined(TARGET_MIPS64)
703     VPN &= env->SEGMask;
704 #endif
705     EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
706     G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
707     V0 = (env->CP0_EntryLo0 & 2) != 0;
708     D0 = (env->CP0_EntryLo0 & 4) != 0;
709     XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
710     RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
711     V1 = (env->CP0_EntryLo1 & 2) != 0;
712     D1 = (env->CP0_EntryLo1 & 4) != 0;
713     XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
714     RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
715 
716     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
717     /*
718      * Discard cached TLB entries, unless tlbwi is just upgrading access
719      * permissions on the current entry.
720      */
721     if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
722         (!tlb->EHINV && EHINV) ||
723         (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
724         (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
725         (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
726         (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
727         r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
728     }
729 
730     r4k_invalidate_tlb(env, idx, 0);
731     r4k_fill_tlb(env, idx);
732 }
733 
734 void r4k_helper_tlbwr(CPUMIPSState *env)
735 {
736     int r = cpu_mips_get_random(env);
737 
738     r4k_invalidate_tlb(env, r, 1);
739     r4k_fill_tlb(env, r);
740 }
741 
742 void r4k_helper_tlbp(CPUMIPSState *env)
743 {
744     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
745     r4k_tlb_t *tlb;
746     target_ulong mask;
747     target_ulong tag;
748     target_ulong VPN;
749     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
750     uint32_t MMID = env->CP0_MemoryMapID;
751     uint32_t tlb_mmid;
752     int i;
753 
754     MMID = mi ? MMID : (uint32_t) ASID;
755     for (i = 0; i < env->tlb->nb_tlb; i++) {
756         tlb = &env->tlb->mmu.r4k.tlb[i];
757         /* 1k pages are not supported. */
758         mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
759         tag = env->CP0_EntryHi & ~mask;
760         VPN = tlb->VPN & ~mask;
761 #if defined(TARGET_MIPS64)
762         tag &= env->SEGMask;
763 #endif
764         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
765         /* Check ASID/MMID, virtual page number & size */
766         if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
767             /* TLB match */
768             env->CP0_Index = i;
769             break;
770         }
771     }
772     if (i == env->tlb->nb_tlb) {
773         /* No match.  Discard any shadow entries, if any of them match.  */
774         for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
775             tlb = &env->tlb->mmu.r4k.tlb[i];
776             /* 1k pages are not supported. */
777             mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
778             tag = env->CP0_EntryHi & ~mask;
779             VPN = tlb->VPN & ~mask;
780 #if defined(TARGET_MIPS64)
781             tag &= env->SEGMask;
782 #endif
783             tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
784             /* Check ASID/MMID, virtual page number & size */
785             if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
786                 r4k_mips_tlb_flush_extra(env, i);
787                 break;
788             }
789         }
790 
791         env->CP0_Index |= 0x80000000;
792     }
793 }
794 
795 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
796 {
797 #if defined(TARGET_MIPS64)
798     return tlb_pfn << 6;
799 #else
800     return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
801            (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
802 #endif
803 }
804 
805 void r4k_helper_tlbr(CPUMIPSState *env)
806 {
807     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
808     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
809     uint32_t MMID = env->CP0_MemoryMapID;
810     uint32_t tlb_mmid;
811     r4k_tlb_t *tlb;
812     int idx;
813 
814     MMID = mi ? MMID : (uint32_t) ASID;
815     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
816     tlb = &env->tlb->mmu.r4k.tlb[idx];
817 
818     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
819     /* If this will change the current ASID/MMID, flush qemu's TLB.  */
820     if (MMID != tlb_mmid) {
821         cpu_mips_tlb_flush(env);
822     }
823 
824     r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
825 
826     if (tlb->EHINV) {
827         env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
828         env->CP0_PageMask = 0;
829         env->CP0_EntryLo0 = 0;
830         env->CP0_EntryLo1 = 0;
831     } else {
832         env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
833         env->CP0_MemoryMapID = tlb->MMID;
834         env->CP0_PageMask = tlb->PageMask;
835         env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
836                         ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
837                         ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
838                         get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
839         env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
840                         ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
841                         ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
842                         get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
843     }
844 }
845 
846 void helper_tlbwi(CPUMIPSState *env)
847 {
848     env->tlb->helper_tlbwi(env);
849 }
850 
851 void helper_tlbwr(CPUMIPSState *env)
852 {
853     env->tlb->helper_tlbwr(env);
854 }
855 
856 void helper_tlbp(CPUMIPSState *env)
857 {
858     env->tlb->helper_tlbp(env);
859 }
860 
861 void helper_tlbr(CPUMIPSState *env)
862 {
863     env->tlb->helper_tlbr(env);
864 }
865 
866 void helper_tlbinv(CPUMIPSState *env)
867 {
868     env->tlb->helper_tlbinv(env);
869 }
870 
871 void helper_tlbinvf(CPUMIPSState *env)
872 {
873     env->tlb->helper_tlbinvf(env);
874 }
875 
876 static void global_invalidate_tlb(CPUMIPSState *env,
877                            uint32_t invMsgVPN2,
878                            uint8_t invMsgR,
879                            uint32_t invMsgMMid,
880                            bool invAll,
881                            bool invVAMMid,
882                            bool invMMid,
883                            bool invVA)
884 {
885 
886     int idx;
887     r4k_tlb_t *tlb;
888     bool VAMatch;
889     bool MMidMatch;
890 
891     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
892         tlb = &env->tlb->mmu.r4k.tlb[idx];
893         VAMatch =
894             (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
895 #ifdef TARGET_MIPS64
896             &&
897             (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
898 #endif
899             );
900         MMidMatch = tlb->MMID == invMsgMMid;
901         if ((invAll && (idx > env->CP0_Wired)) ||
902             (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
903             (VAMatch && invVA) ||
904             (MMidMatch && !(tlb->G) && invMMid)) {
905             tlb->EHINV = 1;
906         }
907     }
908     cpu_mips_tlb_flush(env);
909 }
910 
911 void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
912 {
913     bool invAll = type == 0;
914     bool invVA = type == 1;
915     bool invMMid = type == 2;
916     bool invVAMMid = type == 3;
917     uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
918     uint8_t invMsgR = 0;
919     uint32_t invMsgMMid = env->CP0_MemoryMapID;
920     CPUState *other_cs = first_cpu;
921 
922 #ifdef TARGET_MIPS64
923     invMsgR = extract64(arg, 62, 2);
924 #endif
925 
926     CPU_FOREACH(other_cs) {
927         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
928         global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
929                               invAll, invVAMMid, invMMid, invVA);
930     }
931 }
932 
933 /* Specials */
934 target_ulong helper_di(CPUMIPSState *env)
935 {
936     target_ulong t0 = env->CP0_Status;
937 
938     env->CP0_Status = t0 & ~(1 << CP0St_IE);
939     return t0;
940 }
941 
942 target_ulong helper_ei(CPUMIPSState *env)
943 {
944     target_ulong t0 = env->CP0_Status;
945 
946     env->CP0_Status = t0 | (1 << CP0St_IE);
947     return t0;
948 }
949 
950 static void debug_pre_eret(CPUMIPSState *env)
951 {
952     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
953         qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
954                 env->active_tc.PC, env->CP0_EPC);
955         if (env->CP0_Status & (1 << CP0St_ERL)) {
956             qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
957         }
958         if (env->hflags & MIPS_HFLAG_DM) {
959             qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
960         }
961         qemu_log("\n");
962     }
963 }
964 
965 static void debug_post_eret(CPUMIPSState *env)
966 {
967     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
968         qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
969                 env->active_tc.PC, env->CP0_EPC);
970         if (env->CP0_Status & (1 << CP0St_ERL)) {
971             qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
972         }
973         if (env->hflags & MIPS_HFLAG_DM) {
974             qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
975         }
976         switch (cpu_mmu_index(env, false)) {
977         case 3:
978             qemu_log(", ERL\n");
979             break;
980         case MIPS_HFLAG_UM:
981             qemu_log(", UM\n");
982             break;
983         case MIPS_HFLAG_SM:
984             qemu_log(", SM\n");
985             break;
986         case MIPS_HFLAG_KM:
987             qemu_log("\n");
988             break;
989         default:
990             cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
991             break;
992         }
993     }
994 }
995 
996 static inline void exception_return(CPUMIPSState *env)
997 {
998     debug_pre_eret(env);
999     if (env->CP0_Status & (1 << CP0St_ERL)) {
1000         mips_env_set_pc(env, env->CP0_ErrorEPC);
1001         env->CP0_Status &= ~(1 << CP0St_ERL);
1002     } else {
1003         mips_env_set_pc(env, env->CP0_EPC);
1004         env->CP0_Status &= ~(1 << CP0St_EXL);
1005     }
1006     compute_hflags(env);
1007     debug_post_eret(env);
1008 }
1009 
1010 void helper_eret(CPUMIPSState *env)
1011 {
1012     exception_return(env);
1013     env->CP0_LLAddr = 1;
1014     env->lladdr = 1;
1015 }
1016 
1017 void helper_eretnc(CPUMIPSState *env)
1018 {
1019     exception_return(env);
1020 }
1021 
1022 void helper_deret(CPUMIPSState *env)
1023 {
1024     debug_pre_eret(env);
1025 
1026     env->hflags &= ~MIPS_HFLAG_DM;
1027     compute_hflags(env);
1028 
1029     mips_env_set_pc(env, env->CP0_DEPC);
1030 
1031     debug_post_eret(env);
1032 }
1033 #endif /* !CONFIG_USER_ONLY */
1034 
1035 static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
1036 {
1037     if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
1038         return;
1039     }
1040     do_raise_exception(env, EXCP_RI, pc);
1041 }
1042 
1043 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
1044 {
1045     check_hwrena(env, 0, GETPC());
1046     return env->CP0_EBase & 0x3ff;
1047 }
1048 
1049 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
1050 {
1051     check_hwrena(env, 1, GETPC());
1052     return env->SYNCI_Step;
1053 }
1054 
1055 target_ulong helper_rdhwr_cc(CPUMIPSState *env)
1056 {
1057     check_hwrena(env, 2, GETPC());
1058 #ifdef CONFIG_USER_ONLY
1059     return env->CP0_Count;
1060 #else
1061     return (int32_t)cpu_mips_get_count(env);
1062 #endif
1063 }
1064 
1065 target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
1066 {
1067     check_hwrena(env, 3, GETPC());
1068     return env->CCRes;
1069 }
1070 
1071 target_ulong helper_rdhwr_performance(CPUMIPSState *env)
1072 {
1073     check_hwrena(env, 4, GETPC());
1074     return env->CP0_Performance0;
1075 }
1076 
1077 target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
1078 {
1079     check_hwrena(env, 5, GETPC());
1080     return (env->CP0_Config5 >> CP0C5_XNP) & 1;
1081 }
1082 
1083 void helper_pmon(CPUMIPSState *env, int function)
1084 {
1085     function /= 2;
1086     switch (function) {
1087     case 2: /* TODO: char inbyte(int waitflag); */
1088         if (env->active_tc.gpr[4] == 0) {
1089             env->active_tc.gpr[2] = -1;
1090         }
1091         /* Fall through */
1092     case 11: /* TODO: char inbyte (void); */
1093         env->active_tc.gpr[2] = -1;
1094         break;
1095     case 3:
1096     case 12:
1097         printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1098         break;
1099     case 17:
1100         break;
1101     case 158:
1102         {
1103             unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
1104             printf("%s", fmt);
1105         }
1106         break;
1107     }
1108 }
1109 
1110 void helper_wait(CPUMIPSState *env)
1111 {
1112     CPUState *cs = env_cpu(env);
1113 
1114     cs->halted = 1;
1115     cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
1116     /*
1117      * Last instruction in the block, PC was updated before
1118      * - no need to recover PC and icount.
1119      */
1120     raise_exception(env, EXCP_HLT);
1121 }
1122 
1123 #if !defined(CONFIG_USER_ONLY)
1124 
1125 void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1126                                   MMUAccessType access_type,
1127                                   int mmu_idx, uintptr_t retaddr)
1128 {
1129     MIPSCPU *cpu = MIPS_CPU(cs);
1130     CPUMIPSState *env = &cpu->env;
1131     int error_code = 0;
1132     int excp;
1133 
1134     if (!(env->hflags & MIPS_HFLAG_DM)) {
1135         env->CP0_BadVAddr = addr;
1136     }
1137 
1138     if (access_type == MMU_DATA_STORE) {
1139         excp = EXCP_AdES;
1140     } else {
1141         excp = EXCP_AdEL;
1142         if (access_type == MMU_INST_FETCH) {
1143             error_code |= EXCP_INST_NOTAVAIL;
1144         }
1145     }
1146 
1147     do_raise_exception_err(env, excp, error_code, retaddr);
1148 }
1149 
1150 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1151                                     vaddr addr, unsigned size,
1152                                     MMUAccessType access_type,
1153                                     int mmu_idx, MemTxAttrs attrs,
1154                                     MemTxResult response, uintptr_t retaddr)
1155 {
1156     MIPSCPU *cpu = MIPS_CPU(cs);
1157     CPUMIPSState *env = &cpu->env;
1158 
1159     if (access_type == MMU_INST_FETCH) {
1160         do_raise_exception(env, EXCP_IBE, retaddr);
1161     } else {
1162         do_raise_exception(env, EXCP_DBE, retaddr);
1163     }
1164 }
1165 #endif /* !CONFIG_USER_ONLY */
1166 
1167 void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
1168 {
1169 #ifndef CONFIG_USER_ONLY
1170     static const char *const type_name[] = {
1171         "Primary Instruction",
1172         "Primary Data or Unified Primary",
1173         "Tertiary",
1174         "Secondary"
1175     };
1176     uint32_t cache_type = extract32(op, 0, 2);
1177     uint32_t cache_operation = extract32(op, 2, 3);
1178     target_ulong index = addr & 0x1fffffff;
1179 
1180     switch (cache_operation) {
1181     case 0b010: /* Index Store Tag */
1182         memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
1183                                      MO_64, MEMTXATTRS_UNSPECIFIED);
1184         break;
1185     case 0b001: /* Index Load Tag */
1186         memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
1187                                     MO_64, MEMTXATTRS_UNSPECIFIED);
1188         break;
1189     case 0b000: /* Index Invalidate */
1190     case 0b100: /* Hit Invalidate */
1191     case 0b110: /* Hit Writeback */
1192         /* no-op */
1193         break;
1194     default:
1195         qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n",
1196                       cache_operation, type_name[cache_type]);
1197         break;
1198     }
1199 #endif
1200 }
1201