xref: /qemu/target/mips/tcg/op_helper.c (revision 5f3013654e879bb4b22876617fdb235aa22568d3)
1 /*
2  *  MIPS emulation helpers for qemu.
3  *
4  *  Copyright (c) 2004-2005 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/memop.h"
27 
28 
29 /*****************************************************************************/
30 /* Exceptions processing helpers */
31 
32 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
33                                 int error_code)
34 {
35     do_raise_exception_err(env, exception, error_code, 0);
36 }
37 
38 void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
39 {
40     do_raise_exception(env, exception, GETPC());
41 }
42 
43 void helper_raise_exception_debug(CPUMIPSState *env)
44 {
45     do_raise_exception(env, EXCP_DEBUG, 0);
46 }
47 
48 static void raise_exception(CPUMIPSState *env, uint32_t exception)
49 {
50     do_raise_exception(env, exception, 0);
51 }
52 
53 /* 64 bits arithmetic for 32 bits hosts */
54 static inline uint64_t get_HILO(CPUMIPSState *env)
55 {
56     return ((uint64_t)(env->active_tc.HI[0]) << 32) |
57            (uint32_t)env->active_tc.LO[0];
58 }
59 
60 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
61 {
62     env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
63     return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
64 }
65 
66 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
67 {
68     target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
69     env->active_tc.HI[0] = (int32_t)(HILO >> 32);
70     return tmp;
71 }
72 
73 /* Multiplication variants of the vr54xx. */
74 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
75                          target_ulong arg2)
76 {
77     return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
78                                  (int64_t)(int32_t)arg2));
79 }
80 
81 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
82                           target_ulong arg2)
83 {
84     return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
85                        (uint64_t)(uint32_t)arg2);
86 }
87 
88 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
89                          target_ulong arg2)
90 {
91     return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
92                        (int64_t)(int32_t)arg2);
93 }
94 
95 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
96                            target_ulong arg2)
97 {
98     return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
99                        (int64_t)(int32_t)arg2);
100 }
101 
102 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
103                           target_ulong arg2)
104 {
105     return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
106                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
107 }
108 
109 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
110                             target_ulong arg2)
111 {
112     return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
113                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
114 }
115 
116 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
117                          target_ulong arg2)
118 {
119     return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
120                        (int64_t)(int32_t)arg2);
121 }
122 
123 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
124                            target_ulong arg2)
125 {
126     return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
127                        (int64_t)(int32_t)arg2);
128 }
129 
130 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
131                           target_ulong arg2)
132 {
133     return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
134                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
135 }
136 
137 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
138                             target_ulong arg2)
139 {
140     return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
141                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
142 }
143 
144 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
145                           target_ulong arg2)
146 {
147     return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
148 }
149 
150 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
151                            target_ulong arg2)
152 {
153     return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
154                        (uint64_t)(uint32_t)arg2);
155 }
156 
157 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
158                            target_ulong arg2)
159 {
160     return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
161                        (int64_t)(int32_t)arg2);
162 }
163 
164 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
165                             target_ulong arg2)
166 {
167     return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
168                        (uint64_t)(uint32_t)arg2);
169 }
170 
171 static inline target_ulong bitswap(target_ulong v)
172 {
173     v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
174               ((v & (target_ulong)0x5555555555555555ULL) << 1);
175     v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
176               ((v & (target_ulong)0x3333333333333333ULL) << 2);
177     v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
178               ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
179     return v;
180 }
181 
182 #ifdef TARGET_MIPS64
183 target_ulong helper_dbitswap(target_ulong rt)
184 {
185     return bitswap(rt);
186 }
187 #endif
188 
189 target_ulong helper_bitswap(target_ulong rt)
190 {
191     return (int32_t)bitswap(rt);
192 }
193 
194 target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
195                         uint32_t stripe)
196 {
197     int i;
198     uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
199     uint64_t tmp1 = tmp0;
200     for (i = 0; i <= 46; i++) {
201         int s;
202         if (i & 0x8) {
203             s = shift;
204         } else {
205             s = shiftx;
206         }
207 
208         if (stripe != 0 && !(i & 0x4)) {
209             s = ~s;
210         }
211         if (s & 0x10) {
212             if (tmp0 & (1LL << (i + 16))) {
213                 tmp1 |= 1LL << i;
214             } else {
215                 tmp1 &= ~(1LL << i);
216             }
217         }
218     }
219 
220     uint64_t tmp2 = tmp1;
221     for (i = 0; i <= 38; i++) {
222         int s;
223         if (i & 0x4) {
224             s = shift;
225         } else {
226             s = shiftx;
227         }
228 
229         if (s & 0x8) {
230             if (tmp1 & (1LL << (i + 8))) {
231                 tmp2 |= 1LL << i;
232             } else {
233                 tmp2 &= ~(1LL << i);
234             }
235         }
236     }
237 
238     uint64_t tmp3 = tmp2;
239     for (i = 0; i <= 34; i++) {
240         int s;
241         if (i & 0x2) {
242             s = shift;
243         } else {
244             s = shiftx;
245         }
246         if (s & 0x4) {
247             if (tmp2 & (1LL << (i + 4))) {
248                 tmp3 |= 1LL << i;
249             } else {
250                 tmp3 &= ~(1LL << i);
251             }
252         }
253     }
254 
255     uint64_t tmp4 = tmp3;
256     for (i = 0; i <= 32; i++) {
257         int s;
258         if (i & 0x1) {
259             s = shift;
260         } else {
261             s = shiftx;
262         }
263         if (s & 0x2) {
264             if (tmp3 & (1LL << (i + 2))) {
265                 tmp4 |= 1LL << i;
266             } else {
267                 tmp4 &= ~(1LL << i);
268             }
269         }
270     }
271 
272     uint64_t tmp5 = tmp4;
273     for (i = 0; i <= 31; i++) {
274         int s;
275         s = shift;
276         if (s & 0x1) {
277             if (tmp4 & (1LL << (i + 1))) {
278                 tmp5 |= 1LL << i;
279             } else {
280                 tmp5 &= ~(1LL << i);
281             }
282         }
283     }
284 
285     return (int64_t)(int32_t)(uint32_t)tmp5;
286 }
287 
288 #ifndef CONFIG_USER_ONLY
289 
290 static inline hwaddr do_translate_address(CPUMIPSState *env,
291                                                       target_ulong address,
292                                                       int rw, uintptr_t retaddr)
293 {
294     hwaddr paddr;
295     CPUState *cs = env_cpu(env);
296 
297     paddr = cpu_mips_translate_address(env, address, rw);
298 
299     if (paddr == -1LL) {
300         cpu_loop_exit_restore(cs, retaddr);
301     } else {
302         return paddr;
303     }
304 }
305 
306 #define HELPER_LD_ATOMIC(name, insn, almask, do_cast)                         \
307 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx)  \
308 {                                                                             \
309     if (arg & almask) {                                                       \
310         if (!(env->hflags & MIPS_HFLAG_DM)) {                                 \
311             env->CP0_BadVAddr = arg;                                          \
312         }                                                                     \
313         do_raise_exception(env, EXCP_AdEL, GETPC());                          \
314     }                                                                         \
315     env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC());             \
316     env->lladdr = arg;                                                        \
317     env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC());  \
318     return env->llval;                                                        \
319 }
320 HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t))
321 #ifdef TARGET_MIPS64
322 HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong))
323 #endif
324 #undef HELPER_LD_ATOMIC
325 #endif
326 
327 #ifdef TARGET_WORDS_BIGENDIAN
328 #define GET_LMASK(v) ((v) & 3)
329 #define GET_OFFSET(addr, offset) (addr + (offset))
330 #else
331 #define GET_LMASK(v) (((v) & 3) ^ 3)
332 #define GET_OFFSET(addr, offset) (addr - (offset))
333 #endif
334 
335 void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
336                 int mem_idx)
337 {
338     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC());
339 
340     if (GET_LMASK(arg2) <= 2) {
341         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16),
342                           mem_idx, GETPC());
343     }
344 
345     if (GET_LMASK(arg2) <= 1) {
346         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8),
347                           mem_idx, GETPC());
348     }
349 
350     if (GET_LMASK(arg2) == 0) {
351         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1,
352                           mem_idx, GETPC());
353     }
354 }
355 
356 void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
357                 int mem_idx)
358 {
359     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
360 
361     if (GET_LMASK(arg2) >= 1) {
362         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
363                           mem_idx, GETPC());
364     }
365 
366     if (GET_LMASK(arg2) >= 2) {
367         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
368                           mem_idx, GETPC());
369     }
370 
371     if (GET_LMASK(arg2) == 3) {
372         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
373                           mem_idx, GETPC());
374     }
375 }
376 
377 #if defined(TARGET_MIPS64)
378 /*
379  * "half" load and stores.  We must do the memory access inline,
380  * or fault handling won't work.
381  */
382 #ifdef TARGET_WORDS_BIGENDIAN
383 #define GET_LMASK64(v) ((v) & 7)
384 #else
385 #define GET_LMASK64(v) (((v) & 7) ^ 7)
386 #endif
387 
388 void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
389                 int mem_idx)
390 {
391     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC());
392 
393     if (GET_LMASK64(arg2) <= 6) {
394         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48),
395                           mem_idx, GETPC());
396     }
397 
398     if (GET_LMASK64(arg2) <= 5) {
399         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40),
400                           mem_idx, GETPC());
401     }
402 
403     if (GET_LMASK64(arg2) <= 4) {
404         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32),
405                           mem_idx, GETPC());
406     }
407 
408     if (GET_LMASK64(arg2) <= 3) {
409         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24),
410                           mem_idx, GETPC());
411     }
412 
413     if (GET_LMASK64(arg2) <= 2) {
414         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16),
415                           mem_idx, GETPC());
416     }
417 
418     if (GET_LMASK64(arg2) <= 1) {
419         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8),
420                           mem_idx, GETPC());
421     }
422 
423     if (GET_LMASK64(arg2) <= 0) {
424         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1,
425                           mem_idx, GETPC());
426     }
427 }
428 
429 void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
430                 int mem_idx)
431 {
432     cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC());
433 
434     if (GET_LMASK64(arg2) >= 1) {
435         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8),
436                           mem_idx, GETPC());
437     }
438 
439     if (GET_LMASK64(arg2) >= 2) {
440         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16),
441                           mem_idx, GETPC());
442     }
443 
444     if (GET_LMASK64(arg2) >= 3) {
445         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24),
446                           mem_idx, GETPC());
447     }
448 
449     if (GET_LMASK64(arg2) >= 4) {
450         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32),
451                           mem_idx, GETPC());
452     }
453 
454     if (GET_LMASK64(arg2) >= 5) {
455         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40),
456                           mem_idx, GETPC());
457     }
458 
459     if (GET_LMASK64(arg2) >= 6) {
460         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48),
461                           mem_idx, GETPC());
462     }
463 
464     if (GET_LMASK64(arg2) == 7) {
465         cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56),
466                           mem_idx, GETPC());
467     }
468 }
469 #endif /* TARGET_MIPS64 */
470 
471 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
472 
473 void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
474                 uint32_t mem_idx)
475 {
476     target_ulong base_reglist = reglist & 0xf;
477     target_ulong do_r31 = reglist & 0x10;
478 
479     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
480         target_ulong i;
481 
482         for (i = 0; i < base_reglist; i++) {
483             env->active_tc.gpr[multiple_regs[i]] =
484                 (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
485             addr += 4;
486         }
487     }
488 
489     if (do_r31) {
490         env->active_tc.gpr[31] =
491             (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC());
492     }
493 }
494 
495 void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
496                 uint32_t mem_idx)
497 {
498     target_ulong base_reglist = reglist & 0xf;
499     target_ulong do_r31 = reglist & 0x10;
500 
501     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
502         target_ulong i;
503 
504         for (i = 0; i < base_reglist; i++) {
505             cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
506                               mem_idx, GETPC());
507             addr += 4;
508         }
509     }
510 
511     if (do_r31) {
512         cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
513     }
514 }
515 
516 #if defined(TARGET_MIPS64)
517 void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
518                 uint32_t mem_idx)
519 {
520     target_ulong base_reglist = reglist & 0xf;
521     target_ulong do_r31 = reglist & 0x10;
522 
523     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
524         target_ulong i;
525 
526         for (i = 0; i < base_reglist; i++) {
527             env->active_tc.gpr[multiple_regs[i]] =
528                 cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
529             addr += 8;
530         }
531     }
532 
533     if (do_r31) {
534         env->active_tc.gpr[31] =
535             cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC());
536     }
537 }
538 
539 void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
540                 uint32_t mem_idx)
541 {
542     target_ulong base_reglist = reglist & 0xf;
543     target_ulong do_r31 = reglist & 0x10;
544 
545     if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) {
546         target_ulong i;
547 
548         for (i = 0; i < base_reglist; i++) {
549             cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]],
550                               mem_idx, GETPC());
551             addr += 8;
552         }
553     }
554 
555     if (do_r31) {
556         cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC());
557     }
558 }
559 #endif
560 
561 
562 void helper_fork(target_ulong arg1, target_ulong arg2)
563 {
564     /*
565      * arg1 = rt, arg2 = rs
566      * TODO: store to TC register
567      */
568 }
569 
570 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
571 {
572     target_long arg1 = arg;
573 
574     if (arg1 < 0) {
575         /* No scheduling policy implemented. */
576         if (arg1 != -2) {
577             if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
578                 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
579                 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
580                 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
581                 do_raise_exception(env, EXCP_THREAD, GETPC());
582             }
583         }
584     } else if (arg1 == 0) {
585         if (0) {
586             /* TODO: TC underflow */
587             env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
588             do_raise_exception(env, EXCP_THREAD, GETPC());
589         } else {
590             /* TODO: Deallocate TC */
591         }
592     } else if (arg1 > 0) {
593         /* Yield qualifier inputs not implemented. */
594         env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
595         env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
596         do_raise_exception(env, EXCP_THREAD, GETPC());
597     }
598     return env->CP0_YQMask;
599 }
600 
601 #ifndef CONFIG_USER_ONLY
602 /* TLB management */
603 static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
604 {
605     /* Discard entries from env->tlb[first] onwards.  */
606     while (env->tlb->tlb_in_use > first) {
607         r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
608     }
609 }
610 
611 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
612 {
613 #if defined(TARGET_MIPS64)
614     return extract64(entrylo, 6, 54);
615 #else
616     return extract64(entrylo, 6, 24) | /* PFN */
617            (extract64(entrylo, 32, 32) << 24); /* PFNX */
618 #endif
619 }
620 
621 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
622 {
623     r4k_tlb_t *tlb;
624     uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
625 
626     /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
627     tlb = &env->tlb->mmu.r4k.tlb[idx];
628     if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
629         tlb->EHINV = 1;
630         return;
631     }
632     tlb->EHINV = 0;
633     tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
634 #if defined(TARGET_MIPS64)
635     tlb->VPN &= env->SEGMask;
636 #endif
637     tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
638     tlb->MMID = env->CP0_MemoryMapID;
639     tlb->PageMask = env->CP0_PageMask;
640     tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
641     tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
642     tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
643     tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
644     tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
645     tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
646     tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
647     tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
648     tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
649     tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
650     tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
651     tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
652     tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
653 }
654 
655 void r4k_helper_tlbinv(CPUMIPSState *env)
656 {
657     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
658     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
659     uint32_t MMID = env->CP0_MemoryMapID;
660     uint32_t tlb_mmid;
661     r4k_tlb_t *tlb;
662     int idx;
663 
664     MMID = mi ? MMID : (uint32_t) ASID;
665     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
666         tlb = &env->tlb->mmu.r4k.tlb[idx];
667         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
668         if (!tlb->G && tlb_mmid == MMID) {
669             tlb->EHINV = 1;
670         }
671     }
672     cpu_mips_tlb_flush(env);
673 }
674 
675 void r4k_helper_tlbinvf(CPUMIPSState *env)
676 {
677     int idx;
678 
679     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
680         env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
681     }
682     cpu_mips_tlb_flush(env);
683 }
684 
685 void r4k_helper_tlbwi(CPUMIPSState *env)
686 {
687     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
688     target_ulong VPN;
689     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
690     uint32_t MMID = env->CP0_MemoryMapID;
691     uint32_t tlb_mmid;
692     bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
693     r4k_tlb_t *tlb;
694     int idx;
695 
696     MMID = mi ? MMID : (uint32_t) ASID;
697 
698     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
699     tlb = &env->tlb->mmu.r4k.tlb[idx];
700     VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
701 #if defined(TARGET_MIPS64)
702     VPN &= env->SEGMask;
703 #endif
704     EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
705     G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
706     V0 = (env->CP0_EntryLo0 & 2) != 0;
707     D0 = (env->CP0_EntryLo0 & 4) != 0;
708     XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
709     RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
710     V1 = (env->CP0_EntryLo1 & 2) != 0;
711     D1 = (env->CP0_EntryLo1 & 4) != 0;
712     XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
713     RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
714 
715     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
716     /*
717      * Discard cached TLB entries, unless tlbwi is just upgrading access
718      * permissions on the current entry.
719      */
720     if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
721         (!tlb->EHINV && EHINV) ||
722         (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
723         (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
724         (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
725         (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
726         r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
727     }
728 
729     r4k_invalidate_tlb(env, idx, 0);
730     r4k_fill_tlb(env, idx);
731 }
732 
733 void r4k_helper_tlbwr(CPUMIPSState *env)
734 {
735     int r = cpu_mips_get_random(env);
736 
737     r4k_invalidate_tlb(env, r, 1);
738     r4k_fill_tlb(env, r);
739 }
740 
741 void r4k_helper_tlbp(CPUMIPSState *env)
742 {
743     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
744     r4k_tlb_t *tlb;
745     target_ulong mask;
746     target_ulong tag;
747     target_ulong VPN;
748     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
749     uint32_t MMID = env->CP0_MemoryMapID;
750     uint32_t tlb_mmid;
751     int i;
752 
753     MMID = mi ? MMID : (uint32_t) ASID;
754     for (i = 0; i < env->tlb->nb_tlb; i++) {
755         tlb = &env->tlb->mmu.r4k.tlb[i];
756         /* 1k pages are not supported. */
757         mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
758         tag = env->CP0_EntryHi & ~mask;
759         VPN = tlb->VPN & ~mask;
760 #if defined(TARGET_MIPS64)
761         tag &= env->SEGMask;
762 #endif
763         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
764         /* Check ASID/MMID, virtual page number & size */
765         if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
766             /* TLB match */
767             env->CP0_Index = i;
768             break;
769         }
770     }
771     if (i == env->tlb->nb_tlb) {
772         /* No match.  Discard any shadow entries, if any of them match.  */
773         for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
774             tlb = &env->tlb->mmu.r4k.tlb[i];
775             /* 1k pages are not supported. */
776             mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
777             tag = env->CP0_EntryHi & ~mask;
778             VPN = tlb->VPN & ~mask;
779 #if defined(TARGET_MIPS64)
780             tag &= env->SEGMask;
781 #endif
782             tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
783             /* Check ASID/MMID, virtual page number & size */
784             if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
785                 r4k_mips_tlb_flush_extra(env, i);
786                 break;
787             }
788         }
789 
790         env->CP0_Index |= 0x80000000;
791     }
792 }
793 
794 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
795 {
796 #if defined(TARGET_MIPS64)
797     return tlb_pfn << 6;
798 #else
799     return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
800            (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
801 #endif
802 }
803 
804 void r4k_helper_tlbr(CPUMIPSState *env)
805 {
806     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
807     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
808     uint32_t MMID = env->CP0_MemoryMapID;
809     uint32_t tlb_mmid;
810     r4k_tlb_t *tlb;
811     int idx;
812 
813     MMID = mi ? MMID : (uint32_t) ASID;
814     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
815     tlb = &env->tlb->mmu.r4k.tlb[idx];
816 
817     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
818     /* If this will change the current ASID/MMID, flush qemu's TLB.  */
819     if (MMID != tlb_mmid) {
820         cpu_mips_tlb_flush(env);
821     }
822 
823     r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
824 
825     if (tlb->EHINV) {
826         env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
827         env->CP0_PageMask = 0;
828         env->CP0_EntryLo0 = 0;
829         env->CP0_EntryLo1 = 0;
830     } else {
831         env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
832         env->CP0_MemoryMapID = tlb->MMID;
833         env->CP0_PageMask = tlb->PageMask;
834         env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
835                         ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
836                         ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
837                         get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
838         env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
839                         ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
840                         ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
841                         get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
842     }
843 }
844 
845 void helper_tlbwi(CPUMIPSState *env)
846 {
847     env->tlb->helper_tlbwi(env);
848 }
849 
850 void helper_tlbwr(CPUMIPSState *env)
851 {
852     env->tlb->helper_tlbwr(env);
853 }
854 
855 void helper_tlbp(CPUMIPSState *env)
856 {
857     env->tlb->helper_tlbp(env);
858 }
859 
860 void helper_tlbr(CPUMIPSState *env)
861 {
862     env->tlb->helper_tlbr(env);
863 }
864 
865 void helper_tlbinv(CPUMIPSState *env)
866 {
867     env->tlb->helper_tlbinv(env);
868 }
869 
870 void helper_tlbinvf(CPUMIPSState *env)
871 {
872     env->tlb->helper_tlbinvf(env);
873 }
874 
875 static void global_invalidate_tlb(CPUMIPSState *env,
876                            uint32_t invMsgVPN2,
877                            uint8_t invMsgR,
878                            uint32_t invMsgMMid,
879                            bool invAll,
880                            bool invVAMMid,
881                            bool invMMid,
882                            bool invVA)
883 {
884 
885     int idx;
886     r4k_tlb_t *tlb;
887     bool VAMatch;
888     bool MMidMatch;
889 
890     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
891         tlb = &env->tlb->mmu.r4k.tlb[idx];
892         VAMatch =
893             (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
894 #ifdef TARGET_MIPS64
895             &&
896             (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
897 #endif
898             );
899         MMidMatch = tlb->MMID == invMsgMMid;
900         if ((invAll && (idx > env->CP0_Wired)) ||
901             (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
902             (VAMatch && invVA) ||
903             (MMidMatch && !(tlb->G) && invMMid)) {
904             tlb->EHINV = 1;
905         }
906     }
907     cpu_mips_tlb_flush(env);
908 }
909 
910 void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
911 {
912     bool invAll = type == 0;
913     bool invVA = type == 1;
914     bool invMMid = type == 2;
915     bool invVAMMid = type == 3;
916     uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
917     uint8_t invMsgR = 0;
918     uint32_t invMsgMMid = env->CP0_MemoryMapID;
919     CPUState *other_cs = first_cpu;
920 
921 #ifdef TARGET_MIPS64
922     invMsgR = extract64(arg, 62, 2);
923 #endif
924 
925     CPU_FOREACH(other_cs) {
926         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
927         global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
928                               invAll, invVAMMid, invMMid, invVA);
929     }
930 }
931 
932 /* Specials */
933 target_ulong helper_di(CPUMIPSState *env)
934 {
935     target_ulong t0 = env->CP0_Status;
936 
937     env->CP0_Status = t0 & ~(1 << CP0St_IE);
938     return t0;
939 }
940 
941 target_ulong helper_ei(CPUMIPSState *env)
942 {
943     target_ulong t0 = env->CP0_Status;
944 
945     env->CP0_Status = t0 | (1 << CP0St_IE);
946     return t0;
947 }
948 
949 static void debug_pre_eret(CPUMIPSState *env)
950 {
951     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
952         qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
953                 env->active_tc.PC, env->CP0_EPC);
954         if (env->CP0_Status & (1 << CP0St_ERL)) {
955             qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
956         }
957         if (env->hflags & MIPS_HFLAG_DM) {
958             qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
959         }
960         qemu_log("\n");
961     }
962 }
963 
964 static void debug_post_eret(CPUMIPSState *env)
965 {
966     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
967         qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
968                 env->active_tc.PC, env->CP0_EPC);
969         if (env->CP0_Status & (1 << CP0St_ERL)) {
970             qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
971         }
972         if (env->hflags & MIPS_HFLAG_DM) {
973             qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
974         }
975         switch (cpu_mmu_index(env, false)) {
976         case 3:
977             qemu_log(", ERL\n");
978             break;
979         case MIPS_HFLAG_UM:
980             qemu_log(", UM\n");
981             break;
982         case MIPS_HFLAG_SM:
983             qemu_log(", SM\n");
984             break;
985         case MIPS_HFLAG_KM:
986             qemu_log("\n");
987             break;
988         default:
989             cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
990             break;
991         }
992     }
993 }
994 
995 static void set_pc(CPUMIPSState *env, target_ulong error_pc)
996 {
997     env->active_tc.PC = error_pc & ~(target_ulong)1;
998     if (error_pc & 1) {
999         env->hflags |= MIPS_HFLAG_M16;
1000     } else {
1001         env->hflags &= ~(MIPS_HFLAG_M16);
1002     }
1003 }
1004 
1005 static inline void exception_return(CPUMIPSState *env)
1006 {
1007     debug_pre_eret(env);
1008     if (env->CP0_Status & (1 << CP0St_ERL)) {
1009         set_pc(env, env->CP0_ErrorEPC);
1010         env->CP0_Status &= ~(1 << CP0St_ERL);
1011     } else {
1012         set_pc(env, env->CP0_EPC);
1013         env->CP0_Status &= ~(1 << CP0St_EXL);
1014     }
1015     compute_hflags(env);
1016     debug_post_eret(env);
1017 }
1018 
1019 void helper_eret(CPUMIPSState *env)
1020 {
1021     exception_return(env);
1022     env->CP0_LLAddr = 1;
1023     env->lladdr = 1;
1024 }
1025 
1026 void helper_eretnc(CPUMIPSState *env)
1027 {
1028     exception_return(env);
1029 }
1030 
1031 void helper_deret(CPUMIPSState *env)
1032 {
1033     debug_pre_eret(env);
1034 
1035     env->hflags &= ~MIPS_HFLAG_DM;
1036     compute_hflags(env);
1037 
1038     set_pc(env, env->CP0_DEPC);
1039 
1040     debug_post_eret(env);
1041 }
1042 #endif /* !CONFIG_USER_ONLY */
1043 
1044 static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
1045 {
1046     if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
1047         return;
1048     }
1049     do_raise_exception(env, EXCP_RI, pc);
1050 }
1051 
1052 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
1053 {
1054     check_hwrena(env, 0, GETPC());
1055     return env->CP0_EBase & 0x3ff;
1056 }
1057 
1058 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
1059 {
1060     check_hwrena(env, 1, GETPC());
1061     return env->SYNCI_Step;
1062 }
1063 
1064 target_ulong helper_rdhwr_cc(CPUMIPSState *env)
1065 {
1066     check_hwrena(env, 2, GETPC());
1067 #ifdef CONFIG_USER_ONLY
1068     return env->CP0_Count;
1069 #else
1070     return (int32_t)cpu_mips_get_count(env);
1071 #endif
1072 }
1073 
1074 target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
1075 {
1076     check_hwrena(env, 3, GETPC());
1077     return env->CCRes;
1078 }
1079 
1080 target_ulong helper_rdhwr_performance(CPUMIPSState *env)
1081 {
1082     check_hwrena(env, 4, GETPC());
1083     return env->CP0_Performance0;
1084 }
1085 
1086 target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
1087 {
1088     check_hwrena(env, 5, GETPC());
1089     return (env->CP0_Config5 >> CP0C5_XNP) & 1;
1090 }
1091 
1092 void helper_pmon(CPUMIPSState *env, int function)
1093 {
1094     function /= 2;
1095     switch (function) {
1096     case 2: /* TODO: char inbyte(int waitflag); */
1097         if (env->active_tc.gpr[4] == 0) {
1098             env->active_tc.gpr[2] = -1;
1099         }
1100         /* Fall through */
1101     case 11: /* TODO: char inbyte (void); */
1102         env->active_tc.gpr[2] = -1;
1103         break;
1104     case 3:
1105     case 12:
1106         printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
1107         break;
1108     case 17:
1109         break;
1110     case 158:
1111         {
1112             unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
1113             printf("%s", fmt);
1114         }
1115         break;
1116     }
1117 }
1118 
1119 void helper_wait(CPUMIPSState *env)
1120 {
1121     CPUState *cs = env_cpu(env);
1122 
1123     cs->halted = 1;
1124     cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
1125     /*
1126      * Last instruction in the block, PC was updated before
1127      * - no need to recover PC and icount.
1128      */
1129     raise_exception(env, EXCP_HLT);
1130 }
1131 
1132 #if !defined(CONFIG_USER_ONLY)
1133 
1134 void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1135                                   MMUAccessType access_type,
1136                                   int mmu_idx, uintptr_t retaddr)
1137 {
1138     MIPSCPU *cpu = MIPS_CPU(cs);
1139     CPUMIPSState *env = &cpu->env;
1140     int error_code = 0;
1141     int excp;
1142 
1143     if (!(env->hflags & MIPS_HFLAG_DM)) {
1144         env->CP0_BadVAddr = addr;
1145     }
1146 
1147     if (access_type == MMU_DATA_STORE) {
1148         excp = EXCP_AdES;
1149     } else {
1150         excp = EXCP_AdEL;
1151         if (access_type == MMU_INST_FETCH) {
1152             error_code |= EXCP_INST_NOTAVAIL;
1153         }
1154     }
1155 
1156     do_raise_exception_err(env, excp, error_code, retaddr);
1157 }
1158 
1159 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
1160                                     vaddr addr, unsigned size,
1161                                     MMUAccessType access_type,
1162                                     int mmu_idx, MemTxAttrs attrs,
1163                                     MemTxResult response, uintptr_t retaddr)
1164 {
1165     MIPSCPU *cpu = MIPS_CPU(cs);
1166     CPUMIPSState *env = &cpu->env;
1167 
1168     if (access_type == MMU_INST_FETCH) {
1169         do_raise_exception(env, EXCP_IBE, retaddr);
1170     } else {
1171         do_raise_exception(env, EXCP_DBE, retaddr);
1172     }
1173 }
1174 #endif /* !CONFIG_USER_ONLY */
1175 
1176 
1177 /* MSA */
1178 /* Data format min and max values */
1179 #define DF_BITS(df) (1 << ((df) + 3))
1180 
1181 /* Element-by-element access macros */
1182 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df))
1183 
1184 #if !defined(CONFIG_USER_ONLY)
1185 #define MEMOP_IDX(DF)                                           \
1186         TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN,  \
1187                                         cpu_mmu_index(env, false));
1188 #else
1189 #define MEMOP_IDX(DF)
1190 #endif
1191 
1192 void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd,
1193                      target_ulong addr)
1194 {
1195     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1196     MEMOP_IDX(DF_BYTE)
1197 #if !defined(CONFIG_USER_ONLY)
1198 #if !defined(HOST_WORDS_BIGENDIAN)
1199     pwd->b[0]  = helper_ret_ldub_mmu(env, addr + (0  << DF_BYTE), oi, GETPC());
1200     pwd->b[1]  = helper_ret_ldub_mmu(env, addr + (1  << DF_BYTE), oi, GETPC());
1201     pwd->b[2]  = helper_ret_ldub_mmu(env, addr + (2  << DF_BYTE), oi, GETPC());
1202     pwd->b[3]  = helper_ret_ldub_mmu(env, addr + (3  << DF_BYTE), oi, GETPC());
1203     pwd->b[4]  = helper_ret_ldub_mmu(env, addr + (4  << DF_BYTE), oi, GETPC());
1204     pwd->b[5]  = helper_ret_ldub_mmu(env, addr + (5  << DF_BYTE), oi, GETPC());
1205     pwd->b[6]  = helper_ret_ldub_mmu(env, addr + (6  << DF_BYTE), oi, GETPC());
1206     pwd->b[7]  = helper_ret_ldub_mmu(env, addr + (7  << DF_BYTE), oi, GETPC());
1207     pwd->b[8]  = helper_ret_ldub_mmu(env, addr + (8  << DF_BYTE), oi, GETPC());
1208     pwd->b[9]  = helper_ret_ldub_mmu(env, addr + (9  << DF_BYTE), oi, GETPC());
1209     pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
1210     pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
1211     pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
1212     pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
1213     pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
1214     pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
1215 #else
1216     pwd->b[0]  = helper_ret_ldub_mmu(env, addr + (7  << DF_BYTE), oi, GETPC());
1217     pwd->b[1]  = helper_ret_ldub_mmu(env, addr + (6  << DF_BYTE), oi, GETPC());
1218     pwd->b[2]  = helper_ret_ldub_mmu(env, addr + (5  << DF_BYTE), oi, GETPC());
1219     pwd->b[3]  = helper_ret_ldub_mmu(env, addr + (4  << DF_BYTE), oi, GETPC());
1220     pwd->b[4]  = helper_ret_ldub_mmu(env, addr + (3  << DF_BYTE), oi, GETPC());
1221     pwd->b[5]  = helper_ret_ldub_mmu(env, addr + (2  << DF_BYTE), oi, GETPC());
1222     pwd->b[6]  = helper_ret_ldub_mmu(env, addr + (1  << DF_BYTE), oi, GETPC());
1223     pwd->b[7]  = helper_ret_ldub_mmu(env, addr + (0  << DF_BYTE), oi, GETPC());
1224     pwd->b[8]  = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC());
1225     pwd->b[9]  = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC());
1226     pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC());
1227     pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC());
1228     pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC());
1229     pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC());
1230     pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9  << DF_BYTE), oi, GETPC());
1231     pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8  << DF_BYTE), oi, GETPC());
1232 #endif
1233 #else
1234 #if !defined(HOST_WORDS_BIGENDIAN)
1235     pwd->b[0]  = cpu_ldub_data(env, addr + (0  << DF_BYTE));
1236     pwd->b[1]  = cpu_ldub_data(env, addr + (1  << DF_BYTE));
1237     pwd->b[2]  = cpu_ldub_data(env, addr + (2  << DF_BYTE));
1238     pwd->b[3]  = cpu_ldub_data(env, addr + (3  << DF_BYTE));
1239     pwd->b[4]  = cpu_ldub_data(env, addr + (4  << DF_BYTE));
1240     pwd->b[5]  = cpu_ldub_data(env, addr + (5  << DF_BYTE));
1241     pwd->b[6]  = cpu_ldub_data(env, addr + (6  << DF_BYTE));
1242     pwd->b[7]  = cpu_ldub_data(env, addr + (7  << DF_BYTE));
1243     pwd->b[8]  = cpu_ldub_data(env, addr + (8  << DF_BYTE));
1244     pwd->b[9]  = cpu_ldub_data(env, addr + (9  << DF_BYTE));
1245     pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
1246     pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
1247     pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
1248     pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
1249     pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE));
1250     pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE));
1251 #else
1252     pwd->b[0]  = cpu_ldub_data(env, addr + (7  << DF_BYTE));
1253     pwd->b[1]  = cpu_ldub_data(env, addr + (6  << DF_BYTE));
1254     pwd->b[2]  = cpu_ldub_data(env, addr + (5  << DF_BYTE));
1255     pwd->b[3]  = cpu_ldub_data(env, addr + (4  << DF_BYTE));
1256     pwd->b[4]  = cpu_ldub_data(env, addr + (3  << DF_BYTE));
1257     pwd->b[5]  = cpu_ldub_data(env, addr + (2  << DF_BYTE));
1258     pwd->b[6]  = cpu_ldub_data(env, addr + (1  << DF_BYTE));
1259     pwd->b[7]  = cpu_ldub_data(env, addr + (0  << DF_BYTE));
1260     pwd->b[8]  = cpu_ldub_data(env, addr + (15 << DF_BYTE));
1261     pwd->b[9]  = cpu_ldub_data(env, addr + (14 << DF_BYTE));
1262     pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE));
1263     pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE));
1264     pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE));
1265     pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE));
1266     pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE));
1267     pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE));
1268 #endif
1269 #endif
1270 }
1271 
1272 void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd,
1273                      target_ulong addr)
1274 {
1275     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1276     MEMOP_IDX(DF_HALF)
1277 #if !defined(CONFIG_USER_ONLY)
1278 #if !defined(HOST_WORDS_BIGENDIAN)
1279     pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
1280     pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
1281     pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
1282     pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
1283     pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
1284     pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
1285     pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
1286     pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
1287 #else
1288     pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC());
1289     pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC());
1290     pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC());
1291     pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC());
1292     pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC());
1293     pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC());
1294     pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC());
1295     pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC());
1296 #endif
1297 #else
1298 #if !defined(HOST_WORDS_BIGENDIAN)
1299     pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF));
1300     pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF));
1301     pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF));
1302     pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF));
1303     pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF));
1304     pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF));
1305     pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF));
1306     pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF));
1307 #else
1308     pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF));
1309     pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF));
1310     pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF));
1311     pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF));
1312     pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF));
1313     pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF));
1314     pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF));
1315     pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF));
1316 #endif
1317 #endif
1318 }
1319 
1320 void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd,
1321                      target_ulong addr)
1322 {
1323     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1324     MEMOP_IDX(DF_WORD)
1325 #if !defined(CONFIG_USER_ONLY)
1326 #if !defined(HOST_WORDS_BIGENDIAN)
1327     pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
1328     pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
1329     pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
1330     pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
1331 #else
1332     pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC());
1333     pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC());
1334     pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC());
1335     pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC());
1336 #endif
1337 #else
1338 #if !defined(HOST_WORDS_BIGENDIAN)
1339     pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD));
1340     pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD));
1341     pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD));
1342     pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD));
1343 #else
1344     pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD));
1345     pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD));
1346     pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD));
1347     pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD));
1348 #endif
1349 #endif
1350 }
1351 
1352 void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd,
1353                      target_ulong addr)
1354 {
1355     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1356     MEMOP_IDX(DF_DOUBLE)
1357 #if !defined(CONFIG_USER_ONLY)
1358     pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC());
1359     pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC());
1360 #else
1361     pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE));
1362     pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE));
1363 #endif
1364 }
1365 
1366 #define MSA_PAGESPAN(x) \
1367         ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN / 8 - 1) >= TARGET_PAGE_SIZE)
1368 
1369 static inline void ensure_writable_pages(CPUMIPSState *env,
1370                                          target_ulong addr,
1371                                          int mmu_idx,
1372                                          uintptr_t retaddr)
1373 {
1374     /* FIXME: Probe the actual accesses (pass and use a size) */
1375     if (unlikely(MSA_PAGESPAN(addr))) {
1376         /* first page */
1377         probe_write(env, addr, 0, mmu_idx, retaddr);
1378         /* second page */
1379         addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
1380         probe_write(env, addr, 0, mmu_idx, retaddr);
1381     }
1382 }
1383 
1384 void helper_msa_st_b(CPUMIPSState *env, uint32_t wd,
1385                      target_ulong addr)
1386 {
1387     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1388     int mmu_idx = cpu_mmu_index(env, false);
1389 
1390     MEMOP_IDX(DF_BYTE)
1391     ensure_writable_pages(env, addr, mmu_idx, GETPC());
1392 #if !defined(CONFIG_USER_ONLY)
1393 #if !defined(HOST_WORDS_BIGENDIAN)
1394     helper_ret_stb_mmu(env, addr + (0  << DF_BYTE), pwd->b[0],  oi, GETPC());
1395     helper_ret_stb_mmu(env, addr + (1  << DF_BYTE), pwd->b[1],  oi, GETPC());
1396     helper_ret_stb_mmu(env, addr + (2  << DF_BYTE), pwd->b[2],  oi, GETPC());
1397     helper_ret_stb_mmu(env, addr + (3  << DF_BYTE), pwd->b[3],  oi, GETPC());
1398     helper_ret_stb_mmu(env, addr + (4  << DF_BYTE), pwd->b[4],  oi, GETPC());
1399     helper_ret_stb_mmu(env, addr + (5  << DF_BYTE), pwd->b[5],  oi, GETPC());
1400     helper_ret_stb_mmu(env, addr + (6  << DF_BYTE), pwd->b[6],  oi, GETPC());
1401     helper_ret_stb_mmu(env, addr + (7  << DF_BYTE), pwd->b[7],  oi, GETPC());
1402     helper_ret_stb_mmu(env, addr + (8  << DF_BYTE), pwd->b[8],  oi, GETPC());
1403     helper_ret_stb_mmu(env, addr + (9  << DF_BYTE), pwd->b[9],  oi, GETPC());
1404     helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC());
1405     helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC());
1406     helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC());
1407     helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC());
1408     helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC());
1409     helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC());
1410 #else
1411     helper_ret_stb_mmu(env, addr + (7  << DF_BYTE), pwd->b[0],  oi, GETPC());
1412     helper_ret_stb_mmu(env, addr + (6  << DF_BYTE), pwd->b[1],  oi, GETPC());
1413     helper_ret_stb_mmu(env, addr + (5  << DF_BYTE), pwd->b[2],  oi, GETPC());
1414     helper_ret_stb_mmu(env, addr + (4  << DF_BYTE), pwd->b[3],  oi, GETPC());
1415     helper_ret_stb_mmu(env, addr + (3  << DF_BYTE), pwd->b[4],  oi, GETPC());
1416     helper_ret_stb_mmu(env, addr + (2  << DF_BYTE), pwd->b[5],  oi, GETPC());
1417     helper_ret_stb_mmu(env, addr + (1  << DF_BYTE), pwd->b[6],  oi, GETPC());
1418     helper_ret_stb_mmu(env, addr + (0  << DF_BYTE), pwd->b[7],  oi, GETPC());
1419     helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8],  oi, GETPC());
1420     helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9],  oi, GETPC());
1421     helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC());
1422     helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC());
1423     helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC());
1424     helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC());
1425     helper_ret_stb_mmu(env, addr + (9  << DF_BYTE), pwd->b[14], oi, GETPC());
1426     helper_ret_stb_mmu(env, addr + (8  << DF_BYTE), pwd->b[15], oi, GETPC());
1427 #endif
1428 #else
1429 #if !defined(HOST_WORDS_BIGENDIAN)
1430     cpu_stb_data(env, addr + (0  << DF_BYTE), pwd->b[0]);
1431     cpu_stb_data(env, addr + (1  << DF_BYTE), pwd->b[1]);
1432     cpu_stb_data(env, addr + (2  << DF_BYTE), pwd->b[2]);
1433     cpu_stb_data(env, addr + (3  << DF_BYTE), pwd->b[3]);
1434     cpu_stb_data(env, addr + (4  << DF_BYTE), pwd->b[4]);
1435     cpu_stb_data(env, addr + (5  << DF_BYTE), pwd->b[5]);
1436     cpu_stb_data(env, addr + (6  << DF_BYTE), pwd->b[6]);
1437     cpu_stb_data(env, addr + (7  << DF_BYTE), pwd->b[7]);
1438     cpu_stb_data(env, addr + (8  << DF_BYTE), pwd->b[8]);
1439     cpu_stb_data(env, addr + (9  << DF_BYTE), pwd->b[9]);
1440     cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]);
1441     cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]);
1442     cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]);
1443     cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]);
1444     cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]);
1445     cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]);
1446 #else
1447     cpu_stb_data(env, addr + (7  << DF_BYTE), pwd->b[0]);
1448     cpu_stb_data(env, addr + (6  << DF_BYTE), pwd->b[1]);
1449     cpu_stb_data(env, addr + (5  << DF_BYTE), pwd->b[2]);
1450     cpu_stb_data(env, addr + (4  << DF_BYTE), pwd->b[3]);
1451     cpu_stb_data(env, addr + (3  << DF_BYTE), pwd->b[4]);
1452     cpu_stb_data(env, addr + (2  << DF_BYTE), pwd->b[5]);
1453     cpu_stb_data(env, addr + (1  << DF_BYTE), pwd->b[6]);
1454     cpu_stb_data(env, addr + (0  << DF_BYTE), pwd->b[7]);
1455     cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]);
1456     cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]);
1457     cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]);
1458     cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]);
1459     cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]);
1460     cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]);
1461     cpu_stb_data(env, addr + (9  << DF_BYTE), pwd->b[14]);
1462     cpu_stb_data(env, addr + (8  << DF_BYTE), pwd->b[15]);
1463 #endif
1464 #endif
1465 }
1466 
1467 void helper_msa_st_h(CPUMIPSState *env, uint32_t wd,
1468                      target_ulong addr)
1469 {
1470     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1471     int mmu_idx = cpu_mmu_index(env, false);
1472 
1473     MEMOP_IDX(DF_HALF)
1474     ensure_writable_pages(env, addr, mmu_idx, GETPC());
1475 #if !defined(CONFIG_USER_ONLY)
1476 #if !defined(HOST_WORDS_BIGENDIAN)
1477     helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC());
1478     helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC());
1479     helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC());
1480     helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC());
1481     helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC());
1482     helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC());
1483     helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC());
1484     helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC());
1485 #else
1486     helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC());
1487     helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC());
1488     helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC());
1489     helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC());
1490     helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC());
1491     helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC());
1492     helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC());
1493     helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC());
1494 #endif
1495 #else
1496 #if !defined(HOST_WORDS_BIGENDIAN)
1497     cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]);
1498     cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]);
1499     cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]);
1500     cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]);
1501     cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]);
1502     cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]);
1503     cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]);
1504     cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]);
1505 #else
1506     cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]);
1507     cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]);
1508     cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]);
1509     cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]);
1510     cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]);
1511     cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]);
1512     cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]);
1513     cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]);
1514 #endif
1515 #endif
1516 }
1517 
1518 void helper_msa_st_w(CPUMIPSState *env, uint32_t wd,
1519                      target_ulong addr)
1520 {
1521     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1522     int mmu_idx = cpu_mmu_index(env, false);
1523 
1524     MEMOP_IDX(DF_WORD)
1525     ensure_writable_pages(env, addr, mmu_idx, GETPC());
1526 #if !defined(CONFIG_USER_ONLY)
1527 #if !defined(HOST_WORDS_BIGENDIAN)
1528     helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC());
1529     helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC());
1530     helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC());
1531     helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC());
1532 #else
1533     helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC());
1534     helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC());
1535     helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC());
1536     helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC());
1537 #endif
1538 #else
1539 #if !defined(HOST_WORDS_BIGENDIAN)
1540     cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]);
1541     cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]);
1542     cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]);
1543     cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]);
1544 #else
1545     cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]);
1546     cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]);
1547     cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]);
1548     cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]);
1549 #endif
1550 #endif
1551 }
1552 
1553 void helper_msa_st_d(CPUMIPSState *env, uint32_t wd,
1554                      target_ulong addr)
1555 {
1556     wr_t *pwd = &(env->active_fpu.fpr[wd].wr);
1557     int mmu_idx = cpu_mmu_index(env, false);
1558 
1559     MEMOP_IDX(DF_DOUBLE)
1560     ensure_writable_pages(env, addr, mmu_idx, GETPC());
1561 #if !defined(CONFIG_USER_ONLY)
1562     helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC());
1563     helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC());
1564 #else
1565     cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]);
1566     cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]);
1567 #endif
1568 }
1569 
1570 void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
1571 {
1572 #ifndef CONFIG_USER_ONLY
1573     static const char *const type_name[] = {
1574         "Primary Instruction",
1575         "Primary Data or Unified Primary",
1576         "Tertiary",
1577         "Secondary"
1578     };
1579     uint32_t cache_type = extract32(op, 0, 2);
1580     uint32_t cache_operation = extract32(op, 2, 3);
1581     target_ulong index = addr & 0x1fffffff;
1582 
1583     switch (cache_operation) {
1584     case 0b010: /* Index Store Tag */
1585         memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
1586                                      MO_64, MEMTXATTRS_UNSPECIFIED);
1587         break;
1588     case 0b001: /* Index Load Tag */
1589         memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
1590                                     MO_64, MEMTXATTRS_UNSPECIFIED);
1591         break;
1592     case 0b000: /* Index Invalidate */
1593     case 0b100: /* Hit Invalidate */
1594     case 0b110: /* Hit Writeback */
1595         /* no-op */
1596         break;
1597     default:
1598         qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n",
1599                       cache_operation, type_name[cache_type]);
1600         break;
1601     }
1602 #endif
1603 }
1604