xref: /qemu/target/mips/tcg/op_helper.c (revision d60146a9389db771fa4061d9376ba3e208ff2cdb)
1 /*
2  *  MIPS emulation helpers for qemu.
3  *
4  *  Copyright (c) 2004-2005 Jocelyn Mayer
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  *
19  */
20 
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/memop.h"
27 #include "fpu_helper.h"
28 
29 /*****************************************************************************/
30 /* Exceptions processing helpers */
31 
32 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
33                                 int error_code)
34 {
35     do_raise_exception_err(env, exception, error_code, 0);
36 }
37 
38 void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
39 {
40     do_raise_exception(env, exception, GETPC());
41 }
42 
43 void helper_raise_exception_debug(CPUMIPSState *env)
44 {
45     do_raise_exception(env, EXCP_DEBUG, 0);
46 }
47 
48 static void raise_exception(CPUMIPSState *env, uint32_t exception)
49 {
50     do_raise_exception(env, exception, 0);
51 }
52 
53 /* 64 bits arithmetic for 32 bits hosts */
54 static inline uint64_t get_HILO(CPUMIPSState *env)
55 {
56     return ((uint64_t)(env->active_tc.HI[0]) << 32) |
57            (uint32_t)env->active_tc.LO[0];
58 }
59 
60 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
61 {
62     env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
63     return env->active_tc.HI[0] = (int32_t)(HILO >> 32);
64 }
65 
66 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
67 {
68     target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
69     env->active_tc.HI[0] = (int32_t)(HILO >> 32);
70     return tmp;
71 }
72 
73 /* Multiplication variants of the vr54xx. */
74 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
75                          target_ulong arg2)
76 {
77     return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
78                                  (int64_t)(int32_t)arg2));
79 }
80 
81 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
82                           target_ulong arg2)
83 {
84     return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
85                        (uint64_t)(uint32_t)arg2);
86 }
87 
88 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
89                          target_ulong arg2)
90 {
91     return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
92                        (int64_t)(int32_t)arg2);
93 }
94 
95 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
96                            target_ulong arg2)
97 {
98     return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
99                        (int64_t)(int32_t)arg2);
100 }
101 
102 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
103                           target_ulong arg2)
104 {
105     return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
106                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
107 }
108 
109 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
110                             target_ulong arg2)
111 {
112     return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
113                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
114 }
115 
116 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
117                          target_ulong arg2)
118 {
119     return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
120                        (int64_t)(int32_t)arg2);
121 }
122 
123 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
124                            target_ulong arg2)
125 {
126     return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
127                        (int64_t)(int32_t)arg2);
128 }
129 
130 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
131                           target_ulong arg2)
132 {
133     return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
134                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
135 }
136 
137 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
138                             target_ulong arg2)
139 {
140     return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
141                        (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
142 }
143 
144 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
145                           target_ulong arg2)
146 {
147     return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
148 }
149 
150 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
151                            target_ulong arg2)
152 {
153     return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
154                        (uint64_t)(uint32_t)arg2);
155 }
156 
157 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
158                            target_ulong arg2)
159 {
160     return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
161                        (int64_t)(int32_t)arg2);
162 }
163 
164 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
165                             target_ulong arg2)
166 {
167     return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
168                        (uint64_t)(uint32_t)arg2);
169 }
170 
171 static inline target_ulong bitswap(target_ulong v)
172 {
173     v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
174               ((v & (target_ulong)0x5555555555555555ULL) << 1);
175     v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
176               ((v & (target_ulong)0x3333333333333333ULL) << 2);
177     v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
178               ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
179     return v;
180 }
181 
182 #ifdef TARGET_MIPS64
183 target_ulong helper_dbitswap(target_ulong rt)
184 {
185     return bitswap(rt);
186 }
187 #endif
188 
189 target_ulong helper_bitswap(target_ulong rt)
190 {
191     return (int32_t)bitswap(rt);
192 }
193 
194 target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx,
195                         uint32_t stripe)
196 {
197     int i;
198     uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff);
199     uint64_t tmp1 = tmp0;
200     for (i = 0; i <= 46; i++) {
201         int s;
202         if (i & 0x8) {
203             s = shift;
204         } else {
205             s = shiftx;
206         }
207 
208         if (stripe != 0 && !(i & 0x4)) {
209             s = ~s;
210         }
211         if (s & 0x10) {
212             if (tmp0 & (1LL << (i + 16))) {
213                 tmp1 |= 1LL << i;
214             } else {
215                 tmp1 &= ~(1LL << i);
216             }
217         }
218     }
219 
220     uint64_t tmp2 = tmp1;
221     for (i = 0; i <= 38; i++) {
222         int s;
223         if (i & 0x4) {
224             s = shift;
225         } else {
226             s = shiftx;
227         }
228 
229         if (s & 0x8) {
230             if (tmp1 & (1LL << (i + 8))) {
231                 tmp2 |= 1LL << i;
232             } else {
233                 tmp2 &= ~(1LL << i);
234             }
235         }
236     }
237 
238     uint64_t tmp3 = tmp2;
239     for (i = 0; i <= 34; i++) {
240         int s;
241         if (i & 0x2) {
242             s = shift;
243         } else {
244             s = shiftx;
245         }
246         if (s & 0x4) {
247             if (tmp2 & (1LL << (i + 4))) {
248                 tmp3 |= 1LL << i;
249             } else {
250                 tmp3 &= ~(1LL << i);
251             }
252         }
253     }
254 
255     uint64_t tmp4 = tmp3;
256     for (i = 0; i <= 32; i++) {
257         int s;
258         if (i & 0x1) {
259             s = shift;
260         } else {
261             s = shiftx;
262         }
263         if (s & 0x2) {
264             if (tmp3 & (1LL << (i + 2))) {
265                 tmp4 |= 1LL << i;
266             } else {
267                 tmp4 &= ~(1LL << i);
268             }
269         }
270     }
271 
272     uint64_t tmp5 = tmp4;
273     for (i = 0; i <= 31; i++) {
274         int s;
275         s = shift;
276         if (s & 0x1) {
277             if (tmp4 & (1LL << (i + 1))) {
278                 tmp5 |= 1LL << i;
279             } else {
280                 tmp5 &= ~(1LL << i);
281             }
282         }
283     }
284 
285     return (int64_t)(int32_t)(uint32_t)tmp5;
286 }
287 
288 void helper_fork(target_ulong arg1, target_ulong arg2)
289 {
290     /*
291      * arg1 = rt, arg2 = rs
292      * TODO: store to TC register
293      */
294 }
295 
296 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
297 {
298     target_long arg1 = arg;
299 
300     if (arg1 < 0) {
301         /* No scheduling policy implemented. */
302         if (arg1 != -2) {
303             if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
304                 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
305                 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
306                 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
307                 do_raise_exception(env, EXCP_THREAD, GETPC());
308             }
309         }
310     } else if (arg1 == 0) {
311         if (0) {
312             /* TODO: TC underflow */
313             env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
314             do_raise_exception(env, EXCP_THREAD, GETPC());
315         } else {
316             /* TODO: Deallocate TC */
317         }
318     } else if (arg1 > 0) {
319         /* Yield qualifier inputs not implemented. */
320         env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
321         env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
322         do_raise_exception(env, EXCP_THREAD, GETPC());
323     }
324     return env->CP0_YQMask;
325 }
326 
327 #ifndef CONFIG_USER_ONLY
328 /* TLB management */
329 static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first)
330 {
331     /* Discard entries from env->tlb[first] onwards.  */
332     while (env->tlb->tlb_in_use > first) {
333         r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
334     }
335 }
336 
337 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo)
338 {
339 #if defined(TARGET_MIPS64)
340     return extract64(entrylo, 6, 54);
341 #else
342     return extract64(entrylo, 6, 24) | /* PFN */
343            (extract64(entrylo, 32, 32) << 24); /* PFNX */
344 #endif
345 }
346 
347 static void r4k_fill_tlb(CPUMIPSState *env, int idx)
348 {
349     r4k_tlb_t *tlb;
350     uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1);
351 
352     /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
353     tlb = &env->tlb->mmu.r4k.tlb[idx];
354     if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) {
355         tlb->EHINV = 1;
356         return;
357     }
358     tlb->EHINV = 0;
359     tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
360 #if defined(TARGET_MIPS64)
361     tlb->VPN &= env->SEGMask;
362 #endif
363     tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
364     tlb->MMID = env->CP0_MemoryMapID;
365     tlb->PageMask = env->CP0_PageMask;
366     tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
367     tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
368     tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
369     tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
370     tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1;
371     tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1;
372     tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12;
373     tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
374     tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
375     tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
376     tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1;
377     tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1;
378     tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12;
379 }
380 
381 void r4k_helper_tlbinv(CPUMIPSState *env)
382 {
383     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
384     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
385     uint32_t MMID = env->CP0_MemoryMapID;
386     uint32_t tlb_mmid;
387     r4k_tlb_t *tlb;
388     int idx;
389 
390     MMID = mi ? MMID : (uint32_t) ASID;
391     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
392         tlb = &env->tlb->mmu.r4k.tlb[idx];
393         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
394         if (!tlb->G && tlb_mmid == MMID) {
395             tlb->EHINV = 1;
396         }
397     }
398     cpu_mips_tlb_flush(env);
399 }
400 
401 void r4k_helper_tlbinvf(CPUMIPSState *env)
402 {
403     int idx;
404 
405     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
406         env->tlb->mmu.r4k.tlb[idx].EHINV = 1;
407     }
408     cpu_mips_tlb_flush(env);
409 }
410 
411 void r4k_helper_tlbwi(CPUMIPSState *env)
412 {
413     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
414     target_ulong VPN;
415     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
416     uint32_t MMID = env->CP0_MemoryMapID;
417     uint32_t tlb_mmid;
418     bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1;
419     r4k_tlb_t *tlb;
420     int idx;
421 
422     MMID = mi ? MMID : (uint32_t) ASID;
423 
424     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
425     tlb = &env->tlb->mmu.r4k.tlb[idx];
426     VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
427 #if defined(TARGET_MIPS64)
428     VPN &= env->SEGMask;
429 #endif
430     EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0;
431     G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
432     V0 = (env->CP0_EntryLo0 & 2) != 0;
433     D0 = (env->CP0_EntryLo0 & 4) != 0;
434     XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1;
435     RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1;
436     V1 = (env->CP0_EntryLo1 & 2) != 0;
437     D1 = (env->CP0_EntryLo1 & 4) != 0;
438     XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1;
439     RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1;
440 
441     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
442     /*
443      * Discard cached TLB entries, unless tlbwi is just upgrading access
444      * permissions on the current entry.
445      */
446     if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G ||
447         (!tlb->EHINV && EHINV) ||
448         (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
449         (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) ||
450         (tlb->V1 && !V1) || (tlb->D1 && !D1) ||
451         (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) {
452         r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
453     }
454 
455     r4k_invalidate_tlb(env, idx, 0);
456     r4k_fill_tlb(env, idx);
457 }
458 
459 void r4k_helper_tlbwr(CPUMIPSState *env)
460 {
461     int r = cpu_mips_get_random(env);
462 
463     r4k_invalidate_tlb(env, r, 1);
464     r4k_fill_tlb(env, r);
465 }
466 
467 void r4k_helper_tlbp(CPUMIPSState *env)
468 {
469     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
470     r4k_tlb_t *tlb;
471     target_ulong mask;
472     target_ulong tag;
473     target_ulong VPN;
474     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
475     uint32_t MMID = env->CP0_MemoryMapID;
476     uint32_t tlb_mmid;
477     int i;
478 
479     MMID = mi ? MMID : (uint32_t) ASID;
480     for (i = 0; i < env->tlb->nb_tlb; i++) {
481         tlb = &env->tlb->mmu.r4k.tlb[i];
482         /* 1k pages are not supported. */
483         mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
484         tag = env->CP0_EntryHi & ~mask;
485         VPN = tlb->VPN & ~mask;
486 #if defined(TARGET_MIPS64)
487         tag &= env->SEGMask;
488 #endif
489         tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
490         /* Check ASID/MMID, virtual page number & size */
491         if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) {
492             /* TLB match */
493             env->CP0_Index = i;
494             break;
495         }
496     }
497     if (i == env->tlb->nb_tlb) {
498         /* No match.  Discard any shadow entries, if any of them match.  */
499         for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
500             tlb = &env->tlb->mmu.r4k.tlb[i];
501             /* 1k pages are not supported. */
502             mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
503             tag = env->CP0_EntryHi & ~mask;
504             VPN = tlb->VPN & ~mask;
505 #if defined(TARGET_MIPS64)
506             tag &= env->SEGMask;
507 #endif
508             tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
509             /* Check ASID/MMID, virtual page number & size */
510             if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) {
511                 r4k_mips_tlb_flush_extra(env, i);
512                 break;
513             }
514         }
515 
516         env->CP0_Index |= 0x80000000;
517     }
518 }
519 
520 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn)
521 {
522 #if defined(TARGET_MIPS64)
523     return tlb_pfn << 6;
524 #else
525     return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */
526            (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */
527 #endif
528 }
529 
530 void r4k_helper_tlbr(CPUMIPSState *env)
531 {
532     bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1);
533     uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask;
534     uint32_t MMID = env->CP0_MemoryMapID;
535     uint32_t tlb_mmid;
536     r4k_tlb_t *tlb;
537     int idx;
538 
539     MMID = mi ? MMID : (uint32_t) ASID;
540     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
541     tlb = &env->tlb->mmu.r4k.tlb[idx];
542 
543     tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID;
544     /* If this will change the current ASID/MMID, flush qemu's TLB.  */
545     if (MMID != tlb_mmid) {
546         cpu_mips_tlb_flush(env);
547     }
548 
549     r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
550 
551     if (tlb->EHINV) {
552         env->CP0_EntryHi = 1 << CP0EnHi_EHINV;
553         env->CP0_PageMask = 0;
554         env->CP0_EntryLo0 = 0;
555         env->CP0_EntryLo1 = 0;
556     } else {
557         env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID;
558         env->CP0_MemoryMapID = tlb->MMID;
559         env->CP0_PageMask = tlb->PageMask;
560         env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
561                         ((uint64_t)tlb->RI0 << CP0EnLo_RI) |
562                         ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) |
563                         get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12);
564         env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
565                         ((uint64_t)tlb->RI1 << CP0EnLo_RI) |
566                         ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) |
567                         get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12);
568     }
569 }
570 
571 void helper_tlbwi(CPUMIPSState *env)
572 {
573     env->tlb->helper_tlbwi(env);
574 }
575 
576 void helper_tlbwr(CPUMIPSState *env)
577 {
578     env->tlb->helper_tlbwr(env);
579 }
580 
581 void helper_tlbp(CPUMIPSState *env)
582 {
583     env->tlb->helper_tlbp(env);
584 }
585 
586 void helper_tlbr(CPUMIPSState *env)
587 {
588     env->tlb->helper_tlbr(env);
589 }
590 
591 void helper_tlbinv(CPUMIPSState *env)
592 {
593     env->tlb->helper_tlbinv(env);
594 }
595 
596 void helper_tlbinvf(CPUMIPSState *env)
597 {
598     env->tlb->helper_tlbinvf(env);
599 }
600 
601 static void global_invalidate_tlb(CPUMIPSState *env,
602                            uint32_t invMsgVPN2,
603                            uint8_t invMsgR,
604                            uint32_t invMsgMMid,
605                            bool invAll,
606                            bool invVAMMid,
607                            bool invMMid,
608                            bool invVA)
609 {
610 
611     int idx;
612     r4k_tlb_t *tlb;
613     bool VAMatch;
614     bool MMidMatch;
615 
616     for (idx = 0; idx < env->tlb->nb_tlb; idx++) {
617         tlb = &env->tlb->mmu.r4k.tlb[idx];
618         VAMatch =
619             (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask))
620 #ifdef TARGET_MIPS64
621             &&
622             (extract64(env->CP0_EntryHi, 62, 2) == invMsgR)
623 #endif
624             );
625         MMidMatch = tlb->MMID == invMsgMMid;
626         if ((invAll && (idx > env->CP0_Wired)) ||
627             (VAMatch && invVAMMid && (tlb->G || MMidMatch)) ||
628             (VAMatch && invVA) ||
629             (MMidMatch && !(tlb->G) && invMMid)) {
630             tlb->EHINV = 1;
631         }
632     }
633     cpu_mips_tlb_flush(env);
634 }
635 
636 void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type)
637 {
638     bool invAll = type == 0;
639     bool invVA = type == 1;
640     bool invMMid = type == 2;
641     bool invVAMMid = type == 3;
642     uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1);
643     uint8_t invMsgR = 0;
644     uint32_t invMsgMMid = env->CP0_MemoryMapID;
645     CPUState *other_cs = first_cpu;
646 
647 #ifdef TARGET_MIPS64
648     invMsgR = extract64(arg, 62, 2);
649 #endif
650 
651     CPU_FOREACH(other_cs) {
652         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
653         global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid,
654                               invAll, invVAMMid, invMMid, invVA);
655     }
656 }
657 
658 #endif /* !CONFIG_USER_ONLY */
659 
660 static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc)
661 {
662     if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) {
663         return;
664     }
665     do_raise_exception(env, EXCP_RI, pc);
666 }
667 
668 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
669 {
670     check_hwrena(env, 0, GETPC());
671     return env->CP0_EBase & 0x3ff;
672 }
673 
674 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
675 {
676     check_hwrena(env, 1, GETPC());
677     return env->SYNCI_Step;
678 }
679 
680 target_ulong helper_rdhwr_cc(CPUMIPSState *env)
681 {
682     check_hwrena(env, 2, GETPC());
683 #ifdef CONFIG_USER_ONLY
684     return env->CP0_Count;
685 #else
686     return (int32_t)cpu_mips_get_count(env);
687 #endif
688 }
689 
690 target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
691 {
692     check_hwrena(env, 3, GETPC());
693     return env->CCRes;
694 }
695 
696 target_ulong helper_rdhwr_performance(CPUMIPSState *env)
697 {
698     check_hwrena(env, 4, GETPC());
699     return env->CP0_Performance0;
700 }
701 
702 target_ulong helper_rdhwr_xnp(CPUMIPSState *env)
703 {
704     check_hwrena(env, 5, GETPC());
705     return (env->CP0_Config5 >> CP0C5_XNP) & 1;
706 }
707 
708 void helper_pmon(CPUMIPSState *env, int function)
709 {
710     function /= 2;
711     switch (function) {
712     case 2: /* TODO: char inbyte(int waitflag); */
713         if (env->active_tc.gpr[4] == 0) {
714             env->active_tc.gpr[2] = -1;
715         }
716         /* Fall through */
717     case 11: /* TODO: char inbyte (void); */
718         env->active_tc.gpr[2] = -1;
719         break;
720     case 3:
721     case 12:
722         printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
723         break;
724     case 17:
725         break;
726     case 158:
727         {
728             unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
729             printf("%s", fmt);
730         }
731         break;
732     }
733 }
734 
735 void helper_wait(CPUMIPSState *env)
736 {
737     CPUState *cs = env_cpu(env);
738 
739     cs->halted = 1;
740     cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
741     /*
742      * Last instruction in the block, PC was updated before
743      * - no need to recover PC and icount.
744      */
745     raise_exception(env, EXCP_HLT);
746 }
747 
748 #if !defined(CONFIG_USER_ONLY)
749 
750 void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
751                                   MMUAccessType access_type,
752                                   int mmu_idx, uintptr_t retaddr)
753 {
754     MIPSCPU *cpu = MIPS_CPU(cs);
755     CPUMIPSState *env = &cpu->env;
756     int error_code = 0;
757     int excp;
758 
759     if (!(env->hflags & MIPS_HFLAG_DM)) {
760         env->CP0_BadVAddr = addr;
761     }
762 
763     if (access_type == MMU_DATA_STORE) {
764         excp = EXCP_AdES;
765     } else {
766         excp = EXCP_AdEL;
767         if (access_type == MMU_INST_FETCH) {
768             error_code |= EXCP_INST_NOTAVAIL;
769         }
770     }
771 
772     do_raise_exception_err(env, excp, error_code, retaddr);
773 }
774 
775 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
776                                     vaddr addr, unsigned size,
777                                     MMUAccessType access_type,
778                                     int mmu_idx, MemTxAttrs attrs,
779                                     MemTxResult response, uintptr_t retaddr)
780 {
781     MIPSCPU *cpu = MIPS_CPU(cs);
782     CPUMIPSState *env = &cpu->env;
783 
784     if (access_type == MMU_INST_FETCH) {
785         do_raise_exception(env, EXCP_IBE, retaddr);
786     } else {
787         do_raise_exception(env, EXCP_DBE, retaddr);
788     }
789 }
790 #endif /* !CONFIG_USER_ONLY */
791 
792 void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op)
793 {
794 #ifndef CONFIG_USER_ONLY
795     static const char *const type_name[] = {
796         "Primary Instruction",
797         "Primary Data or Unified Primary",
798         "Tertiary",
799         "Secondary"
800     };
801     uint32_t cache_type = extract32(op, 0, 2);
802     uint32_t cache_operation = extract32(op, 2, 3);
803     target_ulong index = addr & 0x1fffffff;
804 
805     switch (cache_operation) {
806     case 0b010: /* Index Store Tag */
807         memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo,
808                                      MO_64, MEMTXATTRS_UNSPECIFIED);
809         break;
810     case 0b001: /* Index Load Tag */
811         memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo,
812                                     MO_64, MEMTXATTRS_UNSPECIFIED);
813         break;
814     case 0b000: /* Index Invalidate */
815     case 0b100: /* Hit Invalidate */
816     case 0b110: /* Hit Writeback */
817         /* no-op */
818         break;
819     default:
820         qemu_log_mask(LOG_UNIMP, "cache operation:%u (type: %s cache)\n",
821                       cache_operation, type_name[cache_type]);
822         break;
823     }
824 #endif
825 }
826