xref: /qemu/target/mips/tcg/system/cp0_helper.c (revision fca2817fdcb00e65020c2dcfcb0b23b2a20ea3c4)
1 /*
2  *  Helpers for emulation of CP0-related MIPS instructions.
3  *
4  *  Copyright (C) 2004-2005  Jocelyn Mayer
5  *  Copyright (C) 2020  Wave Computing, Inc.
6  *  Copyright (C) 2020  Aleksandar Markovic <amarkovic@wavecomp.com>
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20  *
21  */
22 
23 #include "qemu/osdep.h"
24 #include "qemu/log.h"
25 #include "qemu/main-loop.h"
26 #include "cpu.h"
27 #include "internal.h"
28 #include "qemu/host-utils.h"
29 #include "exec/helper-proto.h"
30 #include "exec/cputlb.h"
31 
32 
33 /* SMP helpers.  */
34 static bool mips_vpe_is_wfi(MIPSCPU *c)
35 {
36     CPUState *cpu = CPU(c);
37     CPUMIPSState *env = &c->env;
38 
39     /*
40      * If the VPE is halted but otherwise active, it means it's waiting for
41      * an interrupt.\
42      */
43     return cpu->halted && mips_vpe_active(env);
44 }
45 
46 static bool mips_vp_is_wfi(MIPSCPU *c)
47 {
48     CPUState *cpu = CPU(c);
49     CPUMIPSState *env = &c->env;
50 
51     return cpu->halted && mips_vp_active(env);
52 }
53 
54 static inline void mips_vpe_wake(MIPSCPU *c)
55 {
56     /*
57      * Don't set ->halted = 0 directly, let it be done via cpu_has_work
58      * because there might be other conditions that state that c should
59      * be sleeping.
60      */
61     bql_lock();
62     cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
63     bql_unlock();
64 }
65 
66 static inline void mips_vpe_sleep(MIPSCPU *cpu)
67 {
68     CPUState *cs = CPU(cpu);
69 
70     /*
71      * The VPE was shut off, really go to bed.
72      * Reset any old _WAKE requests.
73      */
74     cs->halted = 1;
75     cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
76 }
77 
78 static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
79 {
80     CPUMIPSState *c = &cpu->env;
81 
82     /* FIXME: TC reschedule.  */
83     if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
84         mips_vpe_wake(cpu);
85     }
86 }
87 
88 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
89 {
90     CPUMIPSState *c = &cpu->env;
91 
92     /* FIXME: TC reschedule.  */
93     if (!mips_vpe_active(c)) {
94         mips_vpe_sleep(cpu);
95     }
96 }
97 
98 /**
99  * mips_cpu_map_tc:
100  * @env: CPU from which mapping is performed.
101  * @tc: Should point to an int with the value of the global TC index.
102  *
103  * This function will transform @tc into a local index within the
104  * returned #CPUMIPSState.
105  */
106 
107 /*
108  * FIXME: This code assumes that all VPEs have the same number of TCs,
109  *        which depends on runtime setup. Can probably be fixed by
110  *        walking the list of CPUMIPSStates.
111  */
112 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
113 {
114     MIPSCPU *cpu;
115     CPUState *cs;
116     CPUState *other_cs;
117     int vpe_idx;
118     int tc_idx = *tc;
119 
120     if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
121         /* Not allowed to address other CPUs.  */
122         *tc = env->current_tc;
123         return env;
124     }
125 
126     cs = env_cpu(env);
127     vpe_idx = tc_idx / cs->nr_threads;
128     *tc = tc_idx % cs->nr_threads;
129     other_cs = qemu_get_cpu(vpe_idx);
130     if (other_cs == NULL) {
131         return env;
132     }
133     cpu = MIPS_CPU(other_cs);
134     return &cpu->env;
135 }
136 
137 /*
138  * The per VPE CP0_Status register shares some fields with the per TC
139  * CP0_TCStatus registers. These fields are wired to the same registers,
140  * so changes to either of them should be reflected on both registers.
141  *
142  * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
143  *
144  * These helper call synchronizes the regs for a given cpu.
145  */
146 
147 /*
148  * Called for updates to CP0_Status.  Defined in "cpu.h" for gdbstub.c.
149  * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
150  *                                   int tc);
151  */
152 
153 /* Called for updates to CP0_TCStatus.  */
154 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
155                              target_ulong v)
156 {
157     uint32_t status;
158     uint32_t tcu, tmx, tasid, tksu;
159     uint32_t mask = ((1U << CP0St_CU3)
160                        | (1 << CP0St_CU2)
161                        | (1 << CP0St_CU1)
162                        | (1 << CP0St_CU0)
163                        | (1 << CP0St_MX)
164                        | (3 << CP0St_KSU));
165 
166     tcu = (v >> CP0TCSt_TCU0) & 0xf;
167     tmx = (v >> CP0TCSt_TMX) & 0x1;
168     tasid = v & cpu->CP0_EntryHi_ASID_mask;
169     tksu = (v >> CP0TCSt_TKSU) & 0x3;
170 
171     status = tcu << CP0St_CU0;
172     status |= tmx << CP0St_MX;
173     status |= tksu << CP0St_KSU;
174 
175     cpu->CP0_Status &= ~mask;
176     cpu->CP0_Status |= status;
177 
178     /* Sync the TASID with EntryHi.  */
179     cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
180     cpu->CP0_EntryHi |= tasid;
181 
182     compute_hflags(cpu);
183 }
184 
185 /* Called for updates to CP0_EntryHi.  */
186 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
187 {
188     int32_t *tcst;
189     uint32_t asid, v = cpu->CP0_EntryHi;
190 
191     asid = v & cpu->CP0_EntryHi_ASID_mask;
192 
193     if (tc == cpu->current_tc) {
194         tcst = &cpu->active_tc.CP0_TCStatus;
195     } else {
196         tcst = &cpu->tcs[tc].CP0_TCStatus;
197     }
198 
199     *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
200     *tcst |= asid;
201 }
202 
203 /* XXX: do not use a global */
204 uint32_t cpu_mips_get_random(CPUMIPSState *env)
205 {
206     static uint32_t seed = 1;
207     static uint32_t prev_idx;
208     uint32_t idx;
209     uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired;
210 
211     if (nb_rand_tlb == 1) {
212         return env->tlb->nb_tlb - 1;
213     }
214 
215     /* Don't return same value twice, so get another value */
216     do {
217         /*
218          * Use a simple algorithm of Linear Congruential Generator
219          * from ISO/IEC 9899 standard.
220          */
221         seed = 1103515245 * seed + 12345;
222         idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired;
223     } while (idx == prev_idx);
224     prev_idx = idx;
225     return idx;
226 }
227 
228 /* CP0 helpers */
229 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
230 {
231     return env->mvp->CP0_MVPControl;
232 }
233 
234 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
235 {
236     return env->mvp->CP0_MVPConf0;
237 }
238 
239 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
240 {
241     return env->mvp->CP0_MVPConf1;
242 }
243 
244 target_ulong helper_mfc0_random(CPUMIPSState *env)
245 {
246     return (int32_t)cpu_mips_get_random(env);
247 }
248 
249 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
250 {
251     return env->active_tc.CP0_TCStatus;
252 }
253 
254 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
255 {
256     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
257     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
258 
259     if (other_tc == other->current_tc) {
260         return other->active_tc.CP0_TCStatus;
261     } else {
262         return other->tcs[other_tc].CP0_TCStatus;
263     }
264 }
265 
266 target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
267 {
268     return env->active_tc.CP0_TCBind;
269 }
270 
271 target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
272 {
273     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
274     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
275 
276     if (other_tc == other->current_tc) {
277         return other->active_tc.CP0_TCBind;
278     } else {
279         return other->tcs[other_tc].CP0_TCBind;
280     }
281 }
282 
283 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
284 {
285     return env->active_tc.PC;
286 }
287 
288 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
289 {
290     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
291     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
292 
293     if (other_tc == other->current_tc) {
294         return other->active_tc.PC;
295     } else {
296         return other->tcs[other_tc].PC;
297     }
298 }
299 
300 target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
301 {
302     return env->active_tc.CP0_TCHalt;
303 }
304 
305 target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
306 {
307     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
308     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
309 
310     if (other_tc == other->current_tc) {
311         return other->active_tc.CP0_TCHalt;
312     } else {
313         return other->tcs[other_tc].CP0_TCHalt;
314     }
315 }
316 
317 target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
318 {
319     return env->active_tc.CP0_TCContext;
320 }
321 
322 target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
323 {
324     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
325     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
326 
327     if (other_tc == other->current_tc) {
328         return other->active_tc.CP0_TCContext;
329     } else {
330         return other->tcs[other_tc].CP0_TCContext;
331     }
332 }
333 
334 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
335 {
336     return env->active_tc.CP0_TCSchedule;
337 }
338 
339 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
340 {
341     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
342     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
343 
344     if (other_tc == other->current_tc) {
345         return other->active_tc.CP0_TCSchedule;
346     } else {
347         return other->tcs[other_tc].CP0_TCSchedule;
348     }
349 }
350 
351 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
352 {
353     return env->active_tc.CP0_TCScheFBack;
354 }
355 
356 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
357 {
358     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
359     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
360 
361     if (other_tc == other->current_tc) {
362         return other->active_tc.CP0_TCScheFBack;
363     } else {
364         return other->tcs[other_tc].CP0_TCScheFBack;
365     }
366 }
367 
368 target_ulong helper_mfc0_count(CPUMIPSState *env)
369 {
370     return (int32_t)cpu_mips_get_count(env);
371 }
372 
373 target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
374 {
375     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
376     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
377 
378     return other->CP0_EntryHi;
379 }
380 
381 target_ulong helper_mftc0_cause(CPUMIPSState *env)
382 {
383     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
384     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
385 
386     return other->CP0_Cause;
387 }
388 
389 target_ulong helper_mftc0_status(CPUMIPSState *env)
390 {
391     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
392     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
393 
394     return other->CP0_Status;
395 }
396 
397 target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
398 {
399     return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
400 }
401 
402 target_ulong helper_mfc0_maar(CPUMIPSState *env)
403 {
404     return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
405 }
406 
407 target_ulong helper_mfhc0_maar(CPUMIPSState *env)
408 {
409     return env->CP0_MAAR[env->CP0_MAARI] >> 32;
410 }
411 
412 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
413 {
414     return (int32_t)env->CP0_WatchLo[sel];
415 }
416 
417 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
418 {
419     return (int32_t) env->CP0_WatchHi[sel];
420 }
421 
422 target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
423 {
424     return env->CP0_WatchHi[sel] >> 32;
425 }
426 
427 target_ulong helper_mfc0_debug(CPUMIPSState *env)
428 {
429     target_ulong t0 = env->CP0_Debug;
430     if (env->hflags & MIPS_HFLAG_DM) {
431         t0 |= 1 << CP0DB_DM;
432     }
433 
434     return t0;
435 }
436 
437 target_ulong helper_mftc0_debug(CPUMIPSState *env)
438 {
439     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
440     int32_t tcstatus;
441     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
442 
443     if (other_tc == other->current_tc) {
444         tcstatus = other->active_tc.CP0_Debug_tcstatus;
445     } else {
446         tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
447     }
448 
449     /* XXX: Might be wrong, check with EJTAG spec. */
450     return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
451             (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
452 }
453 
454 #if defined(TARGET_MIPS64)
455 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
456 {
457     return env->active_tc.PC;
458 }
459 
460 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
461 {
462     return env->active_tc.CP0_TCHalt;
463 }
464 
465 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
466 {
467     return env->active_tc.CP0_TCContext;
468 }
469 
470 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
471 {
472     return env->active_tc.CP0_TCSchedule;
473 }
474 
475 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
476 {
477     return env->active_tc.CP0_TCScheFBack;
478 }
479 
480 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
481 {
482     return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
483 }
484 
485 target_ulong helper_dmfc0_maar(CPUMIPSState *env)
486 {
487     return env->CP0_MAAR[env->CP0_MAARI];
488 }
489 
490 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
491 {
492     return env->CP0_WatchLo[sel];
493 }
494 
495 target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
496 {
497     return env->CP0_WatchHi[sel];
498 }
499 
500 #endif /* TARGET_MIPS64 */
501 
502 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
503 {
504     uint32_t index_p = env->CP0_Index & 0x80000000;
505     uint32_t tlb_index = arg1 & 0x7fffffff;
506     if (tlb_index < env->tlb->nb_tlb) {
507         if (env->insn_flags & ISA_MIPS_R6) {
508             index_p |= arg1 & 0x80000000;
509         }
510         env->CP0_Index = index_p | tlb_index;
511     }
512 }
513 
514 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
515 {
516     uint32_t mask = 0;
517     uint32_t newval;
518 
519     if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
520         mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
521                 (1 << CP0MVPCo_EVP);
522     }
523     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
524         mask |= (1 << CP0MVPCo_STLB);
525     }
526     newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
527 
528     /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
529 
530     env->mvp->CP0_MVPControl = newval;
531 }
532 
533 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
534 {
535     uint32_t mask;
536     uint32_t newval;
537 
538     mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
539            (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
540     newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
541 
542     /*
543      * Yield scheduler intercept not implemented.
544      * Gating storage scheduler intercept not implemented.
545      */
546 
547     /* TODO: Enable/disable TCs. */
548 
549     env->CP0_VPEControl = newval;
550 }
551 
552 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
553 {
554     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
555     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
556     uint32_t mask;
557     uint32_t newval;
558 
559     mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
560            (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
561     newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
562 
563     /* TODO: Enable/disable TCs.  */
564 
565     other->CP0_VPEControl = newval;
566 }
567 
568 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
569 {
570     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
571     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
572     /* FIXME: Mask away return zero on read bits.  */
573     return other->CP0_VPEControl;
574 }
575 
576 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
577 {
578     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
579     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
580 
581     return other->CP0_VPEConf0;
582 }
583 
584 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
585 {
586     uint32_t mask = 0;
587     uint32_t newval;
588 
589     if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
590         if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
591             mask |= (0xff << CP0VPEC0_XTC);
592         }
593         mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
594     }
595     newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
596 
597     /* TODO: TC exclusive handling due to ERL/EXL. */
598 
599     env->CP0_VPEConf0 = newval;
600 }
601 
602 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
603 {
604     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
605     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
606     uint32_t mask = 0;
607     uint32_t newval;
608 
609     mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
610     newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
611 
612     /* TODO: TC exclusive handling due to ERL/EXL.  */
613     other->CP0_VPEConf0 = newval;
614 }
615 
616 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
617 {
618     uint32_t mask = 0;
619     uint32_t newval;
620 
621     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
622         mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
623                 (0xff << CP0VPEC1_NCP1);
624     newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
625 
626     /* UDI not implemented. */
627     /* CP2 not implemented. */
628 
629     /* TODO: Handle FPU (CP1) binding. */
630 
631     env->CP0_VPEConf1 = newval;
632 }
633 
634 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
635 {
636     /* Yield qualifier inputs not implemented. */
637     env->CP0_YQMask = 0x00000000;
638 }
639 
640 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
641 {
642     env->CP0_VPEOpt = arg1 & 0x0000ffff;
643 }
644 
645 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
646 
647 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
648 {
649     /* 1k pages not implemented */
650     target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
651     env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
652                         | (rxi << (CP0EnLo_XI - 30));
653 }
654 
655 #if defined(TARGET_MIPS64)
656 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
657 
658 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
659 {
660     uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
661     env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
662 }
663 #endif
664 
665 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
666 {
667     uint32_t mask = env->CP0_TCStatus_rw_bitmask;
668     uint32_t newval;
669 
670     newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
671 
672     env->active_tc.CP0_TCStatus = newval;
673     sync_c0_tcstatus(env, env->current_tc, newval);
674 }
675 
676 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
677 {
678     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
679     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
680 
681     if (other_tc == other->current_tc) {
682         other->active_tc.CP0_TCStatus = arg1;
683     } else {
684         other->tcs[other_tc].CP0_TCStatus = arg1;
685     }
686     sync_c0_tcstatus(other, other_tc, arg1);
687 }
688 
689 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
690 {
691     uint32_t mask = (1 << CP0TCBd_TBE);
692     uint32_t newval;
693 
694     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
695         mask |= (1 << CP0TCBd_CurVPE);
696     }
697     newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
698     env->active_tc.CP0_TCBind = newval;
699 }
700 
701 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
702 {
703     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
704     uint32_t mask = (1 << CP0TCBd_TBE);
705     uint32_t newval;
706     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
707 
708     if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
709         mask |= (1 << CP0TCBd_CurVPE);
710     }
711     if (other_tc == other->current_tc) {
712         newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
713         other->active_tc.CP0_TCBind = newval;
714     } else {
715         newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
716         other->tcs[other_tc].CP0_TCBind = newval;
717     }
718 }
719 
720 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
721 {
722     env->active_tc.PC = arg1;
723     env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
724     env->CP0_LLAddr = 0;
725     env->lladdr = 0;
726     /* MIPS16 not implemented. */
727 }
728 
729 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
730 {
731     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
732     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
733 
734     if (other_tc == other->current_tc) {
735         other->active_tc.PC = arg1;
736         other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
737         other->CP0_LLAddr = 0;
738         other->lladdr = 0;
739         /* MIPS16 not implemented. */
740     } else {
741         other->tcs[other_tc].PC = arg1;
742         other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
743         other->CP0_LLAddr = 0;
744         other->lladdr = 0;
745         /* MIPS16 not implemented. */
746     }
747 }
748 
749 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
750 {
751     MIPSCPU *cpu = env_archcpu(env);
752 
753     env->active_tc.CP0_TCHalt = arg1 & 0x1;
754 
755     /* TODO: Halt TC / Restart (if allocated+active) TC. */
756     if (env->active_tc.CP0_TCHalt & 1) {
757         mips_tc_sleep(cpu, env->current_tc);
758     } else {
759         mips_tc_wake(cpu, env->current_tc);
760     }
761 }
762 
763 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
764 {
765     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
766     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
767     MIPSCPU *other_cpu = env_archcpu(other);
768 
769     /* TODO: Halt TC / Restart (if allocated+active) TC. */
770 
771     if (other_tc == other->current_tc) {
772         other->active_tc.CP0_TCHalt = arg1;
773     } else {
774         other->tcs[other_tc].CP0_TCHalt = arg1;
775     }
776 
777     if (arg1 & 1) {
778         mips_tc_sleep(other_cpu, other_tc);
779     } else {
780         mips_tc_wake(other_cpu, other_tc);
781     }
782 }
783 
784 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
785 {
786     env->active_tc.CP0_TCContext = arg1;
787 }
788 
789 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
790 {
791     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
792     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
793 
794     if (other_tc == other->current_tc) {
795         other->active_tc.CP0_TCContext = arg1;
796     } else {
797         other->tcs[other_tc].CP0_TCContext = arg1;
798     }
799 }
800 
801 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
802 {
803     env->active_tc.CP0_TCSchedule = arg1;
804 }
805 
806 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
807 {
808     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
809     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
810 
811     if (other_tc == other->current_tc) {
812         other->active_tc.CP0_TCSchedule = arg1;
813     } else {
814         other->tcs[other_tc].CP0_TCSchedule = arg1;
815     }
816 }
817 
818 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
819 {
820     env->active_tc.CP0_TCScheFBack = arg1;
821 }
822 
823 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
824 {
825     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
826     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
827 
828     if (other_tc == other->current_tc) {
829         other->active_tc.CP0_TCScheFBack = arg1;
830     } else {
831         other->tcs[other_tc].CP0_TCScheFBack = arg1;
832     }
833 }
834 
835 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
836 {
837     /* 1k pages not implemented */
838     target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
839     env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
840                         | (rxi << (CP0EnLo_XI - 30));
841 }
842 
843 #if defined(TARGET_MIPS64)
844 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
845 {
846     uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
847     env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
848 }
849 #endif
850 
851 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
852 {
853     env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
854 }
855 
856 void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
857 {
858     int32_t old;
859     old = env->CP0_MemoryMapID;
860     env->CP0_MemoryMapID = (int32_t) arg1;
861     /* If the MemoryMapID changes, flush qemu's TLB.  */
862     if (old != env->CP0_MemoryMapID) {
863         cpu_mips_tlb_flush(env);
864     }
865 }
866 
867 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask)
868 {
869     uint32_t mask;
870     int maskbits;
871 
872     /* Don't care MASKX as we don't support 1KB page */
873     mask = extract32((uint32_t)arg1, CP0PM_MASK, 16);
874     maskbits = cto32(mask);
875 
876     /* Ensure no more set bit after first zero */
877     if ((mask >> maskbits) != 0) {
878         goto invalid;
879     }
880     env->CP0_PageMask = mask << CP0PM_MASK;
881 
882     return;
883 
884 invalid:
885     /* When invalid, set to default target page size. */
886     env->CP0_PageMask = 0;
887 }
888 
889 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
890 {
891     update_pagemask(env, arg1, &env->CP0_PageMask);
892 }
893 
894 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
895 {
896     /* SmartMIPS not implemented */
897     /* 1k pages not implemented */
898     env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
899                          (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
900     compute_hflags(env);
901     restore_pamask(env);
902 }
903 
904 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
905 {
906     CPUState *cs = env_cpu(env);
907 
908     env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
909     tlb_flush(cs);
910 }
911 
912 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
913 {
914     CPUState *cs = env_cpu(env);
915 
916     env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
917     tlb_flush(cs);
918 }
919 
920 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
921 {
922     CPUState *cs = env_cpu(env);
923 
924     env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
925     tlb_flush(cs);
926 }
927 
928 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
929 {
930 #if defined(TARGET_MIPS64)
931     uint64_t mask = 0x3F3FFFFFFFULL;
932     uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
933     uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
934 
935     if ((env->insn_flags & ISA_MIPS_R6)) {
936         if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
937             mask &= ~(0x3FULL << CP0PF_BDI);
938         }
939         if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
940             mask &= ~(0x3FULL << CP0PF_GDI);
941         }
942         if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
943             mask &= ~(0x3FULL << CP0PF_UDI);
944         }
945         if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
946             mask &= ~(0x3FULL << CP0PF_MDI);
947         }
948         if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
949             mask &= ~(0x3FULL << CP0PF_PTI);
950         }
951     }
952     env->CP0_PWField = arg1 & mask;
953 
954     if ((new_ptei >= 32) ||
955             ((env->insn_flags & ISA_MIPS_R6) &&
956                     (new_ptei == 0 || new_ptei == 1))) {
957         env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
958                 (old_ptei << CP0PF_PTEI);
959     }
960 #else
961     uint32_t mask = 0x3FFFFFFF;
962     uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
963     uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
964 
965     if ((env->insn_flags & ISA_MIPS_R6)) {
966         if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
967             mask &= ~(0x3F << CP0PF_GDW);
968         }
969         if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
970             mask &= ~(0x3F << CP0PF_UDW);
971         }
972         if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
973             mask &= ~(0x3F << CP0PF_MDW);
974         }
975         if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
976             mask &= ~(0x3F << CP0PF_PTW);
977         }
978     }
979     env->CP0_PWField = arg1 & mask;
980 
981     if ((new_ptew >= 32) ||
982             ((env->insn_flags & ISA_MIPS_R6) &&
983                     (new_ptew == 0 || new_ptew == 1))) {
984         env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
985                 (old_ptew << CP0PF_PTEW);
986     }
987 #endif
988 }
989 
990 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
991 {
992 #if defined(TARGET_MIPS64)
993     env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
994 #else
995     env->CP0_PWSize = arg1 & 0x3FFFFFFF;
996 #endif
997 }
998 
999 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
1000 {
1001     if (env->insn_flags & ISA_MIPS_R6) {
1002         if (arg1 < env->tlb->nb_tlb) {
1003             env->CP0_Wired = arg1;
1004         }
1005     } else {
1006         env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1007     }
1008 }
1009 
1010 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
1011 {
1012 #if defined(TARGET_MIPS64)
1013     /* PWEn = 0. Hardware page table walking is not implemented. */
1014     env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
1015 #else
1016     env->CP0_PWCtl = (arg1 & 0x800000FF);
1017 #endif
1018 }
1019 
1020 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1021 {
1022     env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1023 }
1024 
1025 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1026 {
1027     env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1028 }
1029 
1030 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1031 {
1032     env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1033 }
1034 
1035 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1036 {
1037     env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1038 }
1039 
1040 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1041 {
1042     env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1043 }
1044 
1045 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1046 {
1047     uint32_t mask = 0x0000000F;
1048 
1049     if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
1050         (env->insn_flags & ISA_MIPS_R6)) {
1051         mask |= (1 << 4);
1052     }
1053     if (env->insn_flags & ISA_MIPS_R6) {
1054         mask |= (1 << 5);
1055     }
1056     if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
1057         mask |= (1 << 29);
1058 
1059         if (arg1 & (1 << 29)) {
1060             env->hflags |= MIPS_HFLAG_HWRENA_ULR;
1061         } else {
1062             env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
1063         }
1064     }
1065 
1066     env->CP0_HWREna = arg1 & mask;
1067 }
1068 
1069 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1070 {
1071     cpu_mips_store_count(env, arg1);
1072 }
1073 
1074 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1075 {
1076     target_ulong old, val, mask;
1077     mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
1078     if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
1079         mask |= 1 << CP0EnHi_EHINV;
1080     }
1081 
1082     /* 1k pages not implemented */
1083 #if defined(TARGET_MIPS64)
1084     if (env->insn_flags & ISA_MIPS_R6) {
1085         int entryhi_r = extract64(arg1, 62, 2);
1086         int config0_at = extract32(env->CP0_Config0, 13, 2);
1087         bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
1088         if ((entryhi_r == 2) ||
1089             (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
1090             /* skip EntryHi.R field if new value is reserved */
1091             mask &= ~(0x3ull << 62);
1092         }
1093     }
1094     mask &= env->SEGMask;
1095 #endif
1096     old = env->CP0_EntryHi;
1097     val = (arg1 & mask) | (old & ~mask);
1098     env->CP0_EntryHi = val;
1099     if (ase_mt_available(env)) {
1100         sync_c0_entryhi(env, env->current_tc);
1101     }
1102     /* If the ASID changes, flush qemu's TLB.  */
1103     if ((old & env->CP0_EntryHi_ASID_mask) !=
1104         (val & env->CP0_EntryHi_ASID_mask)) {
1105         tlb_flush(env_cpu(env));
1106     }
1107 }
1108 
1109 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1110 {
1111     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1112     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1113 
1114     other->CP0_EntryHi = arg1;
1115     sync_c0_entryhi(other, other_tc);
1116 }
1117 
1118 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1119 {
1120     cpu_mips_store_compare(env, arg1);
1121 }
1122 
1123 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1124 {
1125     uint32_t val, old;
1126 
1127     old = env->CP0_Status;
1128     cpu_mips_store_status(env, arg1);
1129     val = env->CP0_Status;
1130 
1131     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1132         qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1133                 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1134                 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1135                 env->CP0_Cause);
1136         switch (mips_env_mmu_index(env)) {
1137         case 3:
1138             qemu_log(", ERL\n");
1139             break;
1140         case MIPS_HFLAG_UM:
1141             qemu_log(", UM\n");
1142             break;
1143         case MIPS_HFLAG_SM:
1144             qemu_log(", SM\n");
1145             break;
1146         case MIPS_HFLAG_KM:
1147             qemu_log("\n");
1148             break;
1149         default:
1150             cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
1151             break;
1152         }
1153     }
1154 }
1155 
1156 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1157 {
1158     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1159     uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
1160     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1161 
1162     other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
1163     sync_c0_status(env, other, other_tc);
1164 }
1165 
1166 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1167 {
1168     env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1169 }
1170 
1171 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1172 {
1173     uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1174     env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1175 }
1176 
1177 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1178 {
1179     cpu_mips_store_cause(env, arg1);
1180 }
1181 
1182 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1183 {
1184     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1185     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1186 
1187     cpu_mips_store_cause(other, arg1);
1188 }
1189 
1190 target_ulong helper_mftc0_epc(CPUMIPSState *env)
1191 {
1192     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1193     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1194 
1195     return other->CP0_EPC;
1196 }
1197 
1198 target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1199 {
1200     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1201     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1202 
1203     return other->CP0_EBase;
1204 }
1205 
1206 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1207 {
1208     target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1209     if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1210         mask |= ~0x3FFFFFFF;
1211     }
1212     env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
1213 }
1214 
1215 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1216 {
1217     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1218     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1219     target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1220     if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1221         mask |= ~0x3FFFFFFF;
1222     }
1223     other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
1224 }
1225 
1226 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1227 {
1228     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1229     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1230 
1231     switch (idx) {
1232     case 0: return other->CP0_Config0;
1233     case 1: return other->CP0_Config1;
1234     case 2: return other->CP0_Config2;
1235     case 3: return other->CP0_Config3;
1236     /* 4 and 5 are reserved.  */
1237     case 6: return other->CP0_Config6;
1238     case 7: return other->CP0_Config7;
1239     default:
1240         break;
1241     }
1242     return 0;
1243 }
1244 
1245 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1246 {
1247     env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1248 }
1249 
1250 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1251 {
1252     /* tertiary/secondary caches not implemented */
1253     env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1254 }
1255 
1256 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
1257 {
1258     if (env->insn_flags & ASE_MICROMIPS) {
1259         env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
1260                            (arg1 & (1 << CP0C3_ISA_ON_EXC));
1261     }
1262 }
1263 
1264 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
1265 {
1266     env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
1267                        (arg1 & env->CP0_Config4_rw_bitmask);
1268 }
1269 
1270 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
1271 {
1272     env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
1273                        (arg1 & env->CP0_Config5_rw_bitmask);
1274     env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
1275             0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
1276     compute_hflags(env);
1277 }
1278 
1279 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1280 {
1281     target_long mask = env->CP0_LLAddr_rw_bitmask;
1282     arg1 = arg1 << env->CP0_LLAddr_shift;
1283     env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
1284 }
1285 
1286 #define MTC0_MAAR_MASK(env) \
1287         ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1288 
1289 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
1290 {
1291     env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
1292 }
1293 
1294 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
1295 {
1296     env->CP0_MAAR[env->CP0_MAARI] =
1297         (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
1298         (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
1299 }
1300 
1301 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
1302 {
1303     int index = arg1 & 0x3f;
1304     if (index == 0x3f) {
1305         /*
1306          * Software may write all ones to INDEX to determine the
1307          *  maximum value supported.
1308          */
1309         env->CP0_MAARI = MIPS_MAAR_MAX - 1;
1310     } else if (index < MIPS_MAAR_MAX) {
1311         env->CP0_MAARI = index;
1312     }
1313     /*
1314      * Other than the all ones, if the value written is not supported,
1315      * then INDEX is unchanged from its previous value.
1316      */
1317 }
1318 
1319 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1320 {
1321     /*
1322      * Watch exceptions for instructions, data loads, data stores
1323      * not implemented.
1324      */
1325     env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1326 }
1327 
1328 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1329 {
1330     uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
1331     uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */
1332     if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
1333         mask |= 0xFFFFFFFF00000000ULL; /* MMID */
1334     }
1335     env->CP0_WatchHi[sel] = m_bit | (arg1 & mask);
1336     env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1337 }
1338 
1339 void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1340 {
1341     env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
1342                             (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
1343 }
1344 
1345 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1346 {
1347     target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1348     env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1349 }
1350 
1351 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1352 {
1353     env->CP0_Framemask = arg1; /* XXX */
1354 }
1355 
1356 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1357 {
1358     env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1359     if (arg1 & (1 << CP0DB_DM)) {
1360         env->hflags |= MIPS_HFLAG_DM;
1361     } else {
1362         env->hflags &= ~MIPS_HFLAG_DM;
1363     }
1364 }
1365 
1366 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1367 {
1368     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1369     uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1370     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1371 
1372     /* XXX: Might be wrong, check with EJTAG spec. */
1373     if (other_tc == other->current_tc) {
1374         other->active_tc.CP0_Debug_tcstatus = val;
1375     } else {
1376         other->tcs[other_tc].CP0_Debug_tcstatus = val;
1377     }
1378     other->CP0_Debug = (other->CP0_Debug &
1379                      ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1380                      (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1381 }
1382 
1383 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1384 {
1385     env->CP0_Performance0 = arg1 & 0x000007ff;
1386 }
1387 
1388 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
1389 {
1390     int32_t wst = arg1 & (1 << CP0EC_WST);
1391     int32_t spr = arg1 & (1 << CP0EC_SPR);
1392     int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
1393 
1394     env->CP0_ErrCtl = wst | spr | itc;
1395 
1396     if (itc && !wst && !spr) {
1397         env->hflags |= MIPS_HFLAG_ITC_CACHE;
1398     } else {
1399         env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
1400     }
1401 }
1402 
1403 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1404 {
1405     if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
1406         /*
1407          * If CACHE instruction is configured for ITC tags then make all
1408          * CP0.TagLo bits writable. The actual write to ITC Configuration
1409          * Tag will take care of the read-only bits.
1410          */
1411         env->CP0_TagLo = arg1;
1412     } else {
1413         env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1414     }
1415 }
1416 
1417 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1418 {
1419     env->CP0_DataLo = arg1; /* XXX */
1420 }
1421 
1422 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1423 {
1424     env->CP0_TagHi = arg1; /* XXX */
1425 }
1426 
1427 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1428 {
1429     env->CP0_DataHi = arg1; /* XXX */
1430 }
1431 
1432 /* MIPS MT functions */
1433 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
1434 {
1435     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1436     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1437 
1438     if (other_tc == other->current_tc) {
1439         return other->active_tc.gpr[sel];
1440     } else {
1441         return other->tcs[other_tc].gpr[sel];
1442     }
1443 }
1444 
1445 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
1446 {
1447     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1448     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1449 
1450     if (other_tc == other->current_tc) {
1451         return other->active_tc.LO[sel];
1452     } else {
1453         return other->tcs[other_tc].LO[sel];
1454     }
1455 }
1456 
1457 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
1458 {
1459     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1460     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1461 
1462     if (other_tc == other->current_tc) {
1463         return other->active_tc.HI[sel];
1464     } else {
1465         return other->tcs[other_tc].HI[sel];
1466     }
1467 }
1468 
1469 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
1470 {
1471     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1472     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1473 
1474     if (other_tc == other->current_tc) {
1475         return other->active_tc.ACX[sel];
1476     } else {
1477         return other->tcs[other_tc].ACX[sel];
1478     }
1479 }
1480 
1481 target_ulong helper_mftdsp(CPUMIPSState *env)
1482 {
1483     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1484     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1485 
1486     if (other_tc == other->current_tc) {
1487         return other->active_tc.DSPControl;
1488     } else {
1489         return other->tcs[other_tc].DSPControl;
1490     }
1491 }
1492 
1493 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1494 {
1495     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1496     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1497 
1498     if (other_tc == other->current_tc) {
1499         other->active_tc.gpr[sel] = arg1;
1500     } else {
1501         other->tcs[other_tc].gpr[sel] = arg1;
1502     }
1503 }
1504 
1505 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1506 {
1507     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1508     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1509 
1510     if (other_tc == other->current_tc) {
1511         other->active_tc.LO[sel] = arg1;
1512     } else {
1513         other->tcs[other_tc].LO[sel] = arg1;
1514     }
1515 }
1516 
1517 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1518 {
1519     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1520     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1521 
1522     if (other_tc == other->current_tc) {
1523         other->active_tc.HI[sel] = arg1;
1524     } else {
1525         other->tcs[other_tc].HI[sel] = arg1;
1526     }
1527 }
1528 
1529 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1530 {
1531     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1532     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1533 
1534     if (other_tc == other->current_tc) {
1535         other->active_tc.ACX[sel] = arg1;
1536     } else {
1537         other->tcs[other_tc].ACX[sel] = arg1;
1538     }
1539 }
1540 
1541 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
1542 {
1543     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1544     CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1545 
1546     if (other_tc == other->current_tc) {
1547         other->active_tc.DSPControl = arg1;
1548     } else {
1549         other->tcs[other_tc].DSPControl = arg1;
1550     }
1551 }
1552 
1553 /* MIPS MT functions */
1554 target_ulong helper_dmt(void)
1555 {
1556     /* TODO */
1557     return 0;
1558 }
1559 
1560 target_ulong helper_emt(void)
1561 {
1562     /* TODO */
1563     return 0;
1564 }
1565 
1566 target_ulong helper_dvpe(CPUMIPSState *env)
1567 {
1568     CPUState *other_cs = first_cpu;
1569     target_ulong prev = env->mvp->CP0_MVPControl;
1570 
1571     CPU_FOREACH(other_cs) {
1572         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1573         /* Turn off all VPEs except the one executing the dvpe.  */
1574         if (&other_cpu->env != env) {
1575             other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1576             mips_vpe_sleep(other_cpu);
1577         }
1578     }
1579     return prev;
1580 }
1581 
1582 target_ulong helper_evpe(CPUMIPSState *env)
1583 {
1584     CPUState *other_cs = first_cpu;
1585     target_ulong prev = env->mvp->CP0_MVPControl;
1586 
1587     CPU_FOREACH(other_cs) {
1588         MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1589 
1590         if (&other_cpu->env != env
1591             /* If the VPE is WFI, don't disturb its sleep.  */
1592             && !mips_vpe_is_wfi(other_cpu)) {
1593             /* Enable the VPE.  */
1594             other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1595             mips_vpe_wake(other_cpu); /* And wake it up.  */
1596         }
1597     }
1598     return prev;
1599 }
1600 
1601 /* R6 Multi-threading */
1602 target_ulong helper_dvp(CPUMIPSState *env)
1603 {
1604     CPUState *other_cs = first_cpu;
1605     target_ulong prev = env->CP0_VPControl;
1606 
1607     if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
1608         CPU_FOREACH(other_cs) {
1609             MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1610             /* Turn off all VPs except the one executing the dvp. */
1611             if (&other_cpu->env != env) {
1612                 mips_vpe_sleep(other_cpu);
1613             }
1614         }
1615         env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
1616     }
1617     return prev;
1618 }
1619 
1620 target_ulong helper_evp(CPUMIPSState *env)
1621 {
1622     CPUState *other_cs = first_cpu;
1623     target_ulong prev = env->CP0_VPControl;
1624 
1625     if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
1626         CPU_FOREACH(other_cs) {
1627             MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1628             if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
1629                 /*
1630                  * If the VP is WFI, don't disturb its sleep.
1631                  * Otherwise, wake it up.
1632                  */
1633                 mips_vpe_wake(other_cpu);
1634             }
1635         }
1636         env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
1637     }
1638     return prev;
1639 }
1640