1 /*
2 * Helpers for emulation of CP0-related MIPS instructions.
3 *
4 * Copyright (C) 2004-2005 Jocelyn Mayer
5 * Copyright (C) 2020 Wave Computing, Inc.
6 * Copyright (C) 2020 Aleksandar Markovic <amarkovic@wavecomp.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 *
21 */
22
23 #include "qemu/osdep.h"
24 #include "qemu/log.h"
25 #include "qemu/main-loop.h"
26 #include "cpu.h"
27 #include "internal.h"
28 #include "qemu/host-utils.h"
29 #include "exec/helper-proto.h"
30 #include "exec/cputlb.h"
31 #include "exec/target_page.h"
32
33
34 /* SMP helpers. */
mips_vpe_is_wfi(MIPSCPU * c)35 static bool mips_vpe_is_wfi(MIPSCPU *c)
36 {
37 CPUState *cpu = CPU(c);
38 CPUMIPSState *env = &c->env;
39
40 /*
41 * If the VPE is halted but otherwise active, it means it's waiting for
42 * an interrupt.\
43 */
44 return cpu->halted && mips_vpe_active(env);
45 }
46
mips_vp_is_wfi(MIPSCPU * c)47 static bool mips_vp_is_wfi(MIPSCPU *c)
48 {
49 CPUState *cpu = CPU(c);
50 CPUMIPSState *env = &c->env;
51
52 return cpu->halted && mips_vp_active(env);
53 }
54
mips_vpe_wake(MIPSCPU * c)55 static inline void mips_vpe_wake(MIPSCPU *c)
56 {
57 /*
58 * Don't set ->halted = 0 directly, let it be done via cpu_has_work
59 * because there might be other conditions that state that c should
60 * be sleeping.
61 */
62 bql_lock();
63 cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE);
64 bql_unlock();
65 }
66
mips_vpe_sleep(MIPSCPU * cpu)67 static inline void mips_vpe_sleep(MIPSCPU *cpu)
68 {
69 CPUState *cs = CPU(cpu);
70
71 /*
72 * The VPE was shut off, really go to bed.
73 * Reset any old _WAKE requests.
74 */
75 cs->halted = 1;
76 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
77 }
78
mips_tc_wake(MIPSCPU * cpu,int tc)79 static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
80 {
81 CPUMIPSState *c = &cpu->env;
82
83 /* FIXME: TC reschedule. */
84 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
85 mips_vpe_wake(cpu);
86 }
87 }
88
mips_tc_sleep(MIPSCPU * cpu,int tc)89 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
90 {
91 CPUMIPSState *c = &cpu->env;
92
93 /* FIXME: TC reschedule. */
94 if (!mips_vpe_active(c)) {
95 mips_vpe_sleep(cpu);
96 }
97 }
98
99 /**
100 * mips_cpu_map_tc:
101 * @env: CPU from which mapping is performed.
102 * @tc: Should point to an int with the value of the global TC index.
103 *
104 * This function will transform @tc into a local index within the
105 * returned #CPUMIPSState.
106 */
107
108 /*
109 * FIXME: This code assumes that all VPEs have the same number of TCs,
110 * which depends on runtime setup. Can probably be fixed by
111 * walking the list of CPUMIPSStates.
112 */
mips_cpu_map_tc(CPUMIPSState * env,int * tc)113 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
114 {
115 MIPSCPU *cpu;
116 CPUState *cs;
117 CPUState *other_cs;
118 int vpe_idx;
119 int tc_idx = *tc;
120
121 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
122 /* Not allowed to address other CPUs. */
123 *tc = env->current_tc;
124 return env;
125 }
126
127 cs = env_cpu(env);
128 vpe_idx = tc_idx / cs->nr_threads;
129 *tc = tc_idx % cs->nr_threads;
130 other_cs = qemu_get_cpu(vpe_idx);
131 if (other_cs == NULL) {
132 return env;
133 }
134 cpu = MIPS_CPU(other_cs);
135 return &cpu->env;
136 }
137
138 /*
139 * The per VPE CP0_Status register shares some fields with the per TC
140 * CP0_TCStatus registers. These fields are wired to the same registers,
141 * so changes to either of them should be reflected on both registers.
142 *
143 * Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
144 *
145 * These helper call synchronizes the regs for a given cpu.
146 */
147
148 /*
149 * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c.
150 * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu,
151 * int tc);
152 */
153
154 /* Called for updates to CP0_TCStatus. */
sync_c0_tcstatus(CPUMIPSState * cpu,int tc,target_ulong v)155 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
156 target_ulong v)
157 {
158 uint32_t status;
159 uint32_t tcu, tmx, tasid, tksu;
160 uint32_t mask = ((1U << CP0St_CU3)
161 | (1 << CP0St_CU2)
162 | (1 << CP0St_CU1)
163 | (1 << CP0St_CU0)
164 | (1 << CP0St_MX)
165 | (3 << CP0St_KSU));
166
167 tcu = (v >> CP0TCSt_TCU0) & 0xf;
168 tmx = (v >> CP0TCSt_TMX) & 0x1;
169 tasid = v & cpu->CP0_EntryHi_ASID_mask;
170 tksu = (v >> CP0TCSt_TKSU) & 0x3;
171
172 status = tcu << CP0St_CU0;
173 status |= tmx << CP0St_MX;
174 status |= tksu << CP0St_KSU;
175
176 cpu->CP0_Status &= ~mask;
177 cpu->CP0_Status |= status;
178
179 /* Sync the TASID with EntryHi. */
180 cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask;
181 cpu->CP0_EntryHi |= tasid;
182
183 compute_hflags(cpu);
184 }
185
186 /* Called for updates to CP0_EntryHi. */
sync_c0_entryhi(CPUMIPSState * cpu,int tc)187 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
188 {
189 int32_t *tcst;
190 uint32_t asid, v = cpu->CP0_EntryHi;
191
192 asid = v & cpu->CP0_EntryHi_ASID_mask;
193
194 if (tc == cpu->current_tc) {
195 tcst = &cpu->active_tc.CP0_TCStatus;
196 } else {
197 tcst = &cpu->tcs[tc].CP0_TCStatus;
198 }
199
200 *tcst &= ~cpu->CP0_EntryHi_ASID_mask;
201 *tcst |= asid;
202 }
203
204 /* XXX: do not use a global */
cpu_mips_get_random(CPUMIPSState * env)205 uint32_t cpu_mips_get_random(CPUMIPSState *env)
206 {
207 static uint32_t seed = 1;
208 static uint32_t prev_idx;
209 uint32_t idx;
210 uint32_t nb_rand_tlb = env->tlb->nb_tlb - env->CP0_Wired;
211
212 if (nb_rand_tlb == 1) {
213 return env->tlb->nb_tlb - 1;
214 }
215
216 /* Don't return same value twice, so get another value */
217 do {
218 /*
219 * Use a simple algorithm of Linear Congruential Generator
220 * from ISO/IEC 9899 standard.
221 */
222 seed = 1103515245 * seed + 12345;
223 idx = (seed >> 16) % nb_rand_tlb + env->CP0_Wired;
224 } while (idx == prev_idx);
225 prev_idx = idx;
226 return idx;
227 }
228
229 /* CP0 helpers */
helper_mfc0_mvpcontrol(CPUMIPSState * env)230 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
231 {
232 return env->mvp->CP0_MVPControl;
233 }
234
helper_mfc0_mvpconf0(CPUMIPSState * env)235 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
236 {
237 return env->mvp->CP0_MVPConf0;
238 }
239
helper_mfc0_mvpconf1(CPUMIPSState * env)240 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
241 {
242 return env->mvp->CP0_MVPConf1;
243 }
244
helper_mfc0_random(CPUMIPSState * env)245 target_ulong helper_mfc0_random(CPUMIPSState *env)
246 {
247 return (int32_t)cpu_mips_get_random(env);
248 }
249
helper_mfc0_tcstatus(CPUMIPSState * env)250 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
251 {
252 return env->active_tc.CP0_TCStatus;
253 }
254
helper_mftc0_tcstatus(CPUMIPSState * env)255 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
256 {
257 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
258 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
259
260 if (other_tc == other->current_tc) {
261 return other->active_tc.CP0_TCStatus;
262 } else {
263 return other->tcs[other_tc].CP0_TCStatus;
264 }
265 }
266
helper_mfc0_tcbind(CPUMIPSState * env)267 target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
268 {
269 return env->active_tc.CP0_TCBind;
270 }
271
helper_mftc0_tcbind(CPUMIPSState * env)272 target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
273 {
274 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
275 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
276
277 if (other_tc == other->current_tc) {
278 return other->active_tc.CP0_TCBind;
279 } else {
280 return other->tcs[other_tc].CP0_TCBind;
281 }
282 }
283
helper_mfc0_tcrestart(CPUMIPSState * env)284 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
285 {
286 return env->active_tc.PC;
287 }
288
helper_mftc0_tcrestart(CPUMIPSState * env)289 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
290 {
291 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
292 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
293
294 if (other_tc == other->current_tc) {
295 return other->active_tc.PC;
296 } else {
297 return other->tcs[other_tc].PC;
298 }
299 }
300
helper_mfc0_tchalt(CPUMIPSState * env)301 target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
302 {
303 return env->active_tc.CP0_TCHalt;
304 }
305
helper_mftc0_tchalt(CPUMIPSState * env)306 target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
307 {
308 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
309 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
310
311 if (other_tc == other->current_tc) {
312 return other->active_tc.CP0_TCHalt;
313 } else {
314 return other->tcs[other_tc].CP0_TCHalt;
315 }
316 }
317
helper_mfc0_tccontext(CPUMIPSState * env)318 target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
319 {
320 return env->active_tc.CP0_TCContext;
321 }
322
helper_mftc0_tccontext(CPUMIPSState * env)323 target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
324 {
325 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
326 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
327
328 if (other_tc == other->current_tc) {
329 return other->active_tc.CP0_TCContext;
330 } else {
331 return other->tcs[other_tc].CP0_TCContext;
332 }
333 }
334
helper_mfc0_tcschedule(CPUMIPSState * env)335 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
336 {
337 return env->active_tc.CP0_TCSchedule;
338 }
339
helper_mftc0_tcschedule(CPUMIPSState * env)340 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
341 {
342 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
343 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
344
345 if (other_tc == other->current_tc) {
346 return other->active_tc.CP0_TCSchedule;
347 } else {
348 return other->tcs[other_tc].CP0_TCSchedule;
349 }
350 }
351
helper_mfc0_tcschefback(CPUMIPSState * env)352 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
353 {
354 return env->active_tc.CP0_TCScheFBack;
355 }
356
helper_mftc0_tcschefback(CPUMIPSState * env)357 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
358 {
359 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
360 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
361
362 if (other_tc == other->current_tc) {
363 return other->active_tc.CP0_TCScheFBack;
364 } else {
365 return other->tcs[other_tc].CP0_TCScheFBack;
366 }
367 }
368
helper_mfc0_count(CPUMIPSState * env)369 target_ulong helper_mfc0_count(CPUMIPSState *env)
370 {
371 return (int32_t)cpu_mips_get_count(env);
372 }
373
helper_mftc0_entryhi(CPUMIPSState * env)374 target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
375 {
376 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
377 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
378
379 return other->CP0_EntryHi;
380 }
381
helper_mftc0_cause(CPUMIPSState * env)382 target_ulong helper_mftc0_cause(CPUMIPSState *env)
383 {
384 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
385 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
386
387 return other->CP0_Cause;
388 }
389
helper_mftc0_status(CPUMIPSState * env)390 target_ulong helper_mftc0_status(CPUMIPSState *env)
391 {
392 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
393 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
394
395 return other->CP0_Status;
396 }
397
helper_mfc0_lladdr(CPUMIPSState * env)398 target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
399 {
400 return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift);
401 }
402
helper_mfc0_maar(CPUMIPSState * env)403 target_ulong helper_mfc0_maar(CPUMIPSState *env)
404 {
405 return (int32_t) env->CP0_MAAR[env->CP0_MAARI];
406 }
407
helper_mfhc0_maar(CPUMIPSState * env)408 target_ulong helper_mfhc0_maar(CPUMIPSState *env)
409 {
410 return env->CP0_MAAR[env->CP0_MAARI] >> 32;
411 }
412
helper_mfc0_watchlo(CPUMIPSState * env,uint32_t sel)413 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
414 {
415 return (int32_t)env->CP0_WatchLo[sel];
416 }
417
helper_mfc0_watchhi(CPUMIPSState * env,uint32_t sel)418 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
419 {
420 return (int32_t) env->CP0_WatchHi[sel];
421 }
422
helper_mfhc0_watchhi(CPUMIPSState * env,uint32_t sel)423 target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel)
424 {
425 return env->CP0_WatchHi[sel] >> 32;
426 }
427
helper_mfc0_debug(CPUMIPSState * env)428 target_ulong helper_mfc0_debug(CPUMIPSState *env)
429 {
430 target_ulong t0 = env->CP0_Debug;
431 if (env->hflags & MIPS_HFLAG_DM) {
432 t0 |= 1 << CP0DB_DM;
433 }
434
435 return t0;
436 }
437
helper_mftc0_debug(CPUMIPSState * env)438 target_ulong helper_mftc0_debug(CPUMIPSState *env)
439 {
440 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
441 int32_t tcstatus;
442 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
443
444 if (other_tc == other->current_tc) {
445 tcstatus = other->active_tc.CP0_Debug_tcstatus;
446 } else {
447 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
448 }
449
450 /* XXX: Might be wrong, check with EJTAG spec. */
451 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
452 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
453 }
454
455 #if defined(TARGET_MIPS64)
helper_dmfc0_tcrestart(CPUMIPSState * env)456 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
457 {
458 return env->active_tc.PC;
459 }
460
helper_dmfc0_tchalt(CPUMIPSState * env)461 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
462 {
463 return env->active_tc.CP0_TCHalt;
464 }
465
helper_dmfc0_tccontext(CPUMIPSState * env)466 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
467 {
468 return env->active_tc.CP0_TCContext;
469 }
470
helper_dmfc0_tcschedule(CPUMIPSState * env)471 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
472 {
473 return env->active_tc.CP0_TCSchedule;
474 }
475
helper_dmfc0_tcschefback(CPUMIPSState * env)476 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
477 {
478 return env->active_tc.CP0_TCScheFBack;
479 }
480
helper_dmfc0_lladdr(CPUMIPSState * env)481 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
482 {
483 return env->CP0_LLAddr >> env->CP0_LLAddr_shift;
484 }
485
helper_dmfc0_maar(CPUMIPSState * env)486 target_ulong helper_dmfc0_maar(CPUMIPSState *env)
487 {
488 return env->CP0_MAAR[env->CP0_MAARI];
489 }
490
helper_dmfc0_watchlo(CPUMIPSState * env,uint32_t sel)491 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
492 {
493 return env->CP0_WatchLo[sel];
494 }
495
helper_dmfc0_watchhi(CPUMIPSState * env,uint32_t sel)496 target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel)
497 {
498 return env->CP0_WatchHi[sel];
499 }
500
501 #endif /* TARGET_MIPS64 */
502
helper_mtc0_index(CPUMIPSState * env,target_ulong arg1)503 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
504 {
505 uint32_t index_p = env->CP0_Index & 0x80000000;
506 uint32_t tlb_index = arg1 & 0x7fffffff;
507 if (tlb_index < env->tlb->nb_tlb) {
508 if (env->insn_flags & ISA_MIPS_R6) {
509 index_p |= arg1 & 0x80000000;
510 }
511 env->CP0_Index = index_p | tlb_index;
512 }
513 }
514
helper_mtc0_mvpcontrol(CPUMIPSState * env,target_ulong arg1)515 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
516 {
517 uint32_t mask = 0;
518 uint32_t newval;
519
520 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
521 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
522 (1 << CP0MVPCo_EVP);
523 }
524 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
525 mask |= (1 << CP0MVPCo_STLB);
526 }
527 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
528
529 /* TODO: Enable/disable shared TLB, enable/disable VPEs. */
530
531 env->mvp->CP0_MVPControl = newval;
532 }
533
helper_mtc0_vpecontrol(CPUMIPSState * env,target_ulong arg1)534 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
535 {
536 uint32_t mask;
537 uint32_t newval;
538
539 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
540 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
541 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
542
543 /*
544 * Yield scheduler intercept not implemented.
545 * Gating storage scheduler intercept not implemented.
546 */
547
548 /* TODO: Enable/disable TCs. */
549
550 env->CP0_VPEControl = newval;
551 }
552
helper_mttc0_vpecontrol(CPUMIPSState * env,target_ulong arg1)553 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
554 {
555 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
556 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
557 uint32_t mask;
558 uint32_t newval;
559
560 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
561 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
562 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
563
564 /* TODO: Enable/disable TCs. */
565
566 other->CP0_VPEControl = newval;
567 }
568
helper_mftc0_vpecontrol(CPUMIPSState * env)569 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
570 {
571 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
572 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
573 /* FIXME: Mask away return zero on read bits. */
574 return other->CP0_VPEControl;
575 }
576
helper_mftc0_vpeconf0(CPUMIPSState * env)577 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
578 {
579 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
580 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
581
582 return other->CP0_VPEConf0;
583 }
584
helper_mtc0_vpeconf0(CPUMIPSState * env,target_ulong arg1)585 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
586 {
587 uint32_t mask = 0;
588 uint32_t newval;
589
590 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
591 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) {
592 mask |= (0xff << CP0VPEC0_XTC);
593 }
594 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
595 }
596 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
597
598 /* TODO: TC exclusive handling due to ERL/EXL. */
599
600 env->CP0_VPEConf0 = newval;
601 }
602
helper_mttc0_vpeconf0(CPUMIPSState * env,target_ulong arg1)603 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
604 {
605 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
606 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
607 uint32_t mask = 0;
608 uint32_t newval;
609
610 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
611 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
612
613 /* TODO: TC exclusive handling due to ERL/EXL. */
614 other->CP0_VPEConf0 = newval;
615 }
616
helper_mtc0_vpeconf1(CPUMIPSState * env,target_ulong arg1)617 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
618 {
619 uint32_t mask = 0;
620 uint32_t newval;
621
622 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
623 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
624 (0xff << CP0VPEC1_NCP1);
625 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
626
627 /* UDI not implemented. */
628 /* CP2 not implemented. */
629
630 /* TODO: Handle FPU (CP1) binding. */
631
632 env->CP0_VPEConf1 = newval;
633 }
634
helper_mtc0_yqmask(CPUMIPSState * env,target_ulong arg1)635 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
636 {
637 /* Yield qualifier inputs not implemented. */
638 env->CP0_YQMask = 0x00000000;
639 }
640
helper_mtc0_vpeopt(CPUMIPSState * env,target_ulong arg1)641 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
642 {
643 env->CP0_VPEOpt = arg1 & 0x0000ffff;
644 }
645
646 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF)
647
helper_mtc0_entrylo0(CPUMIPSState * env,target_ulong arg1)648 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
649 {
650 /* 1k pages not implemented */
651 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
652 env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env))
653 | (rxi << (CP0EnLo_XI - 30));
654 }
655
656 #if defined(TARGET_MIPS64)
657 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6)
658
helper_dmtc0_entrylo0(CPUMIPSState * env,uint64_t arg1)659 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1)
660 {
661 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
662 env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
663 }
664 #endif
665
helper_mtc0_tcstatus(CPUMIPSState * env,target_ulong arg1)666 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
667 {
668 uint32_t mask = env->CP0_TCStatus_rw_bitmask;
669 uint32_t newval;
670
671 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
672
673 env->active_tc.CP0_TCStatus = newval;
674 sync_c0_tcstatus(env, env->current_tc, newval);
675 }
676
helper_mttc0_tcstatus(CPUMIPSState * env,target_ulong arg1)677 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
678 {
679 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
680 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
681
682 if (other_tc == other->current_tc) {
683 other->active_tc.CP0_TCStatus = arg1;
684 } else {
685 other->tcs[other_tc].CP0_TCStatus = arg1;
686 }
687 sync_c0_tcstatus(other, other_tc, arg1);
688 }
689
helper_mtc0_tcbind(CPUMIPSState * env,target_ulong arg1)690 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
691 {
692 uint32_t mask = (1 << CP0TCBd_TBE);
693 uint32_t newval;
694
695 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
696 mask |= (1 << CP0TCBd_CurVPE);
697 }
698 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
699 env->active_tc.CP0_TCBind = newval;
700 }
701
helper_mttc0_tcbind(CPUMIPSState * env,target_ulong arg1)702 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
703 {
704 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
705 uint32_t mask = (1 << CP0TCBd_TBE);
706 uint32_t newval;
707 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
708
709 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) {
710 mask |= (1 << CP0TCBd_CurVPE);
711 }
712 if (other_tc == other->current_tc) {
713 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
714 other->active_tc.CP0_TCBind = newval;
715 } else {
716 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
717 other->tcs[other_tc].CP0_TCBind = newval;
718 }
719 }
720
helper_mtc0_tcrestart(CPUMIPSState * env,target_ulong arg1)721 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
722 {
723 env->active_tc.PC = arg1;
724 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
725 env->CP0_LLAddr = 0;
726 env->lladdr = 0;
727 /* MIPS16 not implemented. */
728 }
729
helper_mttc0_tcrestart(CPUMIPSState * env,target_ulong arg1)730 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
731 {
732 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
733 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
734
735 if (other_tc == other->current_tc) {
736 other->active_tc.PC = arg1;
737 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
738 other->CP0_LLAddr = 0;
739 other->lladdr = 0;
740 /* MIPS16 not implemented. */
741 } else {
742 other->tcs[other_tc].PC = arg1;
743 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
744 other->CP0_LLAddr = 0;
745 other->lladdr = 0;
746 /* MIPS16 not implemented. */
747 }
748 }
749
helper_mtc0_tchalt(CPUMIPSState * env,target_ulong arg1)750 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
751 {
752 MIPSCPU *cpu = env_archcpu(env);
753
754 env->active_tc.CP0_TCHalt = arg1 & 0x1;
755
756 /* TODO: Halt TC / Restart (if allocated+active) TC. */
757 if (env->active_tc.CP0_TCHalt & 1) {
758 mips_tc_sleep(cpu, env->current_tc);
759 } else {
760 mips_tc_wake(cpu, env->current_tc);
761 }
762 }
763
helper_mttc0_tchalt(CPUMIPSState * env,target_ulong arg1)764 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
765 {
766 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
767 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
768 MIPSCPU *other_cpu = env_archcpu(other);
769
770 /* TODO: Halt TC / Restart (if allocated+active) TC. */
771
772 if (other_tc == other->current_tc) {
773 other->active_tc.CP0_TCHalt = arg1;
774 } else {
775 other->tcs[other_tc].CP0_TCHalt = arg1;
776 }
777
778 if (arg1 & 1) {
779 mips_tc_sleep(other_cpu, other_tc);
780 } else {
781 mips_tc_wake(other_cpu, other_tc);
782 }
783 }
784
helper_mtc0_tccontext(CPUMIPSState * env,target_ulong arg1)785 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
786 {
787 env->active_tc.CP0_TCContext = arg1;
788 }
789
helper_mttc0_tccontext(CPUMIPSState * env,target_ulong arg1)790 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
791 {
792 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
793 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
794
795 if (other_tc == other->current_tc) {
796 other->active_tc.CP0_TCContext = arg1;
797 } else {
798 other->tcs[other_tc].CP0_TCContext = arg1;
799 }
800 }
801
helper_mtc0_tcschedule(CPUMIPSState * env,target_ulong arg1)802 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
803 {
804 env->active_tc.CP0_TCSchedule = arg1;
805 }
806
helper_mttc0_tcschedule(CPUMIPSState * env,target_ulong arg1)807 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
808 {
809 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
810 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
811
812 if (other_tc == other->current_tc) {
813 other->active_tc.CP0_TCSchedule = arg1;
814 } else {
815 other->tcs[other_tc].CP0_TCSchedule = arg1;
816 }
817 }
818
helper_mtc0_tcschefback(CPUMIPSState * env,target_ulong arg1)819 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
820 {
821 env->active_tc.CP0_TCScheFBack = arg1;
822 }
823
helper_mttc0_tcschefback(CPUMIPSState * env,target_ulong arg1)824 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
825 {
826 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
827 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
828
829 if (other_tc == other->current_tc) {
830 other->active_tc.CP0_TCScheFBack = arg1;
831 } else {
832 other->tcs[other_tc].CP0_TCScheFBack = arg1;
833 }
834 }
835
helper_mtc0_entrylo1(CPUMIPSState * env,target_ulong arg1)836 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
837 {
838 /* 1k pages not implemented */
839 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE));
840 env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env))
841 | (rxi << (CP0EnLo_XI - 30));
842 }
843
844 #if defined(TARGET_MIPS64)
helper_dmtc0_entrylo1(CPUMIPSState * env,uint64_t arg1)845 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1)
846 {
847 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32);
848 env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi;
849 }
850 #endif
851
helper_mtc0_context(CPUMIPSState * env,target_ulong arg1)852 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
853 {
854 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
855 }
856
helper_mtc0_memorymapid(CPUMIPSState * env,target_ulong arg1)857 void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1)
858 {
859 int32_t old;
860 old = env->CP0_MemoryMapID;
861 env->CP0_MemoryMapID = (int32_t) arg1;
862 /* If the MemoryMapID changes, flush qemu's TLB. */
863 if (old != env->CP0_MemoryMapID) {
864 cpu_mips_tlb_flush(env);
865 }
866 }
867
compute_pagemask(uint32_t val)868 uint32_t compute_pagemask(uint32_t val)
869 {
870 /* Don't care MASKX as we don't support 1KB page */
871 uint32_t mask = extract32(val, CP0PM_MASK, 16);
872 int maskbits = cto32(mask);
873
874 /* Ensure no more set bit after first zero, and maskbits even. */
875 if ((mask >> maskbits) == 0 && maskbits % 2 == 0) {
876 return mask << CP0PM_MASK;
877 } else {
878 /* When invalid, set to default target page size. */
879 return 0;
880 }
881 }
882
helper_mtc0_pagemask(CPUMIPSState * env,target_ulong arg1)883 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
884 {
885 env->CP0_PageMask = compute_pagemask(arg1);
886 }
887
helper_mtc0_pagegrain(CPUMIPSState * env,target_ulong arg1)888 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
889 {
890 /* SmartMIPS not implemented */
891 /* 1k pages not implemented */
892 env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) |
893 (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask);
894 compute_hflags(env);
895 restore_pamask(env);
896 }
897
helper_mtc0_segctl0(CPUMIPSState * env,target_ulong arg1)898 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1)
899 {
900 CPUState *cs = env_cpu(env);
901
902 env->CP0_SegCtl0 = arg1 & CP0SC0_MASK;
903 tlb_flush(cs);
904 }
905
helper_mtc0_segctl1(CPUMIPSState * env,target_ulong arg1)906 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1)
907 {
908 CPUState *cs = env_cpu(env);
909
910 env->CP0_SegCtl1 = arg1 & CP0SC1_MASK;
911 tlb_flush(cs);
912 }
913
helper_mtc0_segctl2(CPUMIPSState * env,target_ulong arg1)914 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1)
915 {
916 CPUState *cs = env_cpu(env);
917
918 env->CP0_SegCtl2 = arg1 & CP0SC2_MASK;
919 tlb_flush(cs);
920 }
921
helper_mtc0_pwfield(CPUMIPSState * env,target_ulong arg1)922 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1)
923 {
924 #if defined(TARGET_MIPS64)
925 uint64_t mask = 0x3F3FFFFFFFULL;
926 uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL;
927 uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL;
928
929 if ((env->insn_flags & ISA_MIPS_R6)) {
930 if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) {
931 mask &= ~(0x3FULL << CP0PF_BDI);
932 }
933 if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) {
934 mask &= ~(0x3FULL << CP0PF_GDI);
935 }
936 if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) {
937 mask &= ~(0x3FULL << CP0PF_UDI);
938 }
939 if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) {
940 mask &= ~(0x3FULL << CP0PF_MDI);
941 }
942 if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) {
943 mask &= ~(0x3FULL << CP0PF_PTI);
944 }
945 }
946 env->CP0_PWField = arg1 & mask;
947
948 if ((new_ptei >= 32) ||
949 ((env->insn_flags & ISA_MIPS_R6) &&
950 (new_ptei == 0 || new_ptei == 1))) {
951 env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) |
952 (old_ptei << CP0PF_PTEI);
953 }
954 #else
955 uint32_t mask = 0x3FFFFFFF;
956 uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F;
957 uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F;
958
959 if ((env->insn_flags & ISA_MIPS_R6)) {
960 if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) {
961 mask &= ~(0x3F << CP0PF_GDW);
962 }
963 if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) {
964 mask &= ~(0x3F << CP0PF_UDW);
965 }
966 if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) {
967 mask &= ~(0x3F << CP0PF_MDW);
968 }
969 if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) {
970 mask &= ~(0x3F << CP0PF_PTW);
971 }
972 }
973 env->CP0_PWField = arg1 & mask;
974
975 if ((new_ptew >= 32) ||
976 ((env->insn_flags & ISA_MIPS_R6) &&
977 (new_ptew == 0 || new_ptew == 1))) {
978 env->CP0_PWField = (env->CP0_PWField & ~0x3F) |
979 (old_ptew << CP0PF_PTEW);
980 }
981 #endif
982 }
983
helper_mtc0_pwsize(CPUMIPSState * env,target_ulong arg1)984 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1)
985 {
986 #if defined(TARGET_MIPS64)
987 env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL;
988 #else
989 env->CP0_PWSize = arg1 & 0x3FFFFFFF;
990 #endif
991 }
992
helper_mtc0_wired(CPUMIPSState * env,target_ulong arg1)993 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
994 {
995 if (env->insn_flags & ISA_MIPS_R6) {
996 if (arg1 < env->tlb->nb_tlb) {
997 env->CP0_Wired = arg1;
998 }
999 } else {
1000 env->CP0_Wired = arg1 % env->tlb->nb_tlb;
1001 }
1002 }
1003
helper_mtc0_pwctl(CPUMIPSState * env,target_ulong arg1)1004 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1)
1005 {
1006 #if defined(TARGET_MIPS64)
1007 /* PWEn = 0. Hardware page table walking is not implemented. */
1008 env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F);
1009 #else
1010 env->CP0_PWCtl = (arg1 & 0x800000FF);
1011 #endif
1012 }
1013
helper_mtc0_srsconf0(CPUMIPSState * env,target_ulong arg1)1014 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
1015 {
1016 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
1017 }
1018
helper_mtc0_srsconf1(CPUMIPSState * env,target_ulong arg1)1019 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
1020 {
1021 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
1022 }
1023
helper_mtc0_srsconf2(CPUMIPSState * env,target_ulong arg1)1024 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
1025 {
1026 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
1027 }
1028
helper_mtc0_srsconf3(CPUMIPSState * env,target_ulong arg1)1029 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
1030 {
1031 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
1032 }
1033
helper_mtc0_srsconf4(CPUMIPSState * env,target_ulong arg1)1034 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
1035 {
1036 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
1037 }
1038
helper_mtc0_hwrena(CPUMIPSState * env,target_ulong arg1)1039 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
1040 {
1041 uint32_t mask = 0x0000000F;
1042
1043 if ((env->CP0_Config1 & (1 << CP0C1_PC)) &&
1044 (env->insn_flags & ISA_MIPS_R6)) {
1045 mask |= (1 << 4);
1046 }
1047 if (env->insn_flags & ISA_MIPS_R6) {
1048 mask |= (1 << 5);
1049 }
1050 if (env->CP0_Config3 & (1 << CP0C3_ULRI)) {
1051 mask |= (1 << 29);
1052
1053 if (arg1 & (1 << 29)) {
1054 env->hflags |= MIPS_HFLAG_HWRENA_ULR;
1055 } else {
1056 env->hflags &= ~MIPS_HFLAG_HWRENA_ULR;
1057 }
1058 }
1059
1060 env->CP0_HWREna = arg1 & mask;
1061 }
1062
helper_mtc0_count(CPUMIPSState * env,target_ulong arg1)1063 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
1064 {
1065 cpu_mips_store_count(env, arg1);
1066 }
1067
helper_mtc0_entryhi(CPUMIPSState * env,target_ulong arg1)1068 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1069 {
1070 target_ulong old, val, mask;
1071 mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask;
1072 if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) {
1073 mask |= 1 << CP0EnHi_EHINV;
1074 }
1075
1076 /* 1k pages not implemented */
1077 #if defined(TARGET_MIPS64)
1078 if (env->insn_flags & ISA_MIPS_R6) {
1079 int entryhi_r = extract64(arg1, 62, 2);
1080 int config0_at = extract32(env->CP0_Config0, 13, 2);
1081 bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0;
1082 if ((entryhi_r == 2) ||
1083 (entryhi_r == 1 && (no_supervisor || config0_at == 1))) {
1084 /* skip EntryHi.R field if new value is reserved */
1085 mask &= ~(0x3ull << 62);
1086 }
1087 }
1088 mask &= env->SEGMask;
1089 #endif
1090 old = env->CP0_EntryHi;
1091 val = (arg1 & mask) | (old & ~mask);
1092 env->CP0_EntryHi = val;
1093 if (ase_mt_available(env)) {
1094 sync_c0_entryhi(env, env->current_tc);
1095 }
1096 /* If the ASID changes, flush qemu's TLB. */
1097 if ((old & env->CP0_EntryHi_ASID_mask) !=
1098 (val & env->CP0_EntryHi_ASID_mask)) {
1099 tlb_flush(env_cpu(env));
1100 }
1101 }
1102
helper_mttc0_entryhi(CPUMIPSState * env,target_ulong arg1)1103 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
1104 {
1105 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1106 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1107
1108 other->CP0_EntryHi = arg1;
1109 sync_c0_entryhi(other, other_tc);
1110 }
1111
helper_mtc0_compare(CPUMIPSState * env,target_ulong arg1)1112 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
1113 {
1114 cpu_mips_store_compare(env, arg1);
1115 }
1116
helper_mtc0_status(CPUMIPSState * env,target_ulong arg1)1117 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
1118 {
1119 uint32_t val, old;
1120
1121 old = env->CP0_Status;
1122 cpu_mips_store_status(env, arg1);
1123 val = env->CP0_Status;
1124
1125 if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
1126 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
1127 old, old & env->CP0_Cause & CP0Ca_IP_mask,
1128 val, val & env->CP0_Cause & CP0Ca_IP_mask,
1129 env->CP0_Cause);
1130 switch (mips_env_mmu_index(env)) {
1131 case 3:
1132 qemu_log(", ERL\n");
1133 break;
1134 case MIPS_HFLAG_UM:
1135 qemu_log(", UM\n");
1136 break;
1137 case MIPS_HFLAG_SM:
1138 qemu_log(", SM\n");
1139 break;
1140 case MIPS_HFLAG_KM:
1141 qemu_log("\n");
1142 break;
1143 default:
1144 cpu_abort(env_cpu(env), "Invalid MMU mode!\n");
1145 break;
1146 }
1147 }
1148 }
1149
helper_mttc0_status(CPUMIPSState * env,target_ulong arg1)1150 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
1151 {
1152 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1153 uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018;
1154 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1155
1156 other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask);
1157 sync_c0_status(env, other, other_tc);
1158 }
1159
helper_mtc0_intctl(CPUMIPSState * env,target_ulong arg1)1160 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
1161 {
1162 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
1163 }
1164
helper_mtc0_srsctl(CPUMIPSState * env,target_ulong arg1)1165 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
1166 {
1167 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
1168 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
1169 }
1170
helper_mtc0_cause(CPUMIPSState * env,target_ulong arg1)1171 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
1172 {
1173 cpu_mips_store_cause(env, arg1);
1174 }
1175
helper_mttc0_cause(CPUMIPSState * env,target_ulong arg1)1176 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
1177 {
1178 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1179 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1180
1181 cpu_mips_store_cause(other, arg1);
1182 }
1183
helper_mftc0_epc(CPUMIPSState * env)1184 target_ulong helper_mftc0_epc(CPUMIPSState *env)
1185 {
1186 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1187 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1188
1189 return other->CP0_EPC;
1190 }
1191
helper_mftc0_ebase(CPUMIPSState * env)1192 target_ulong helper_mftc0_ebase(CPUMIPSState *env)
1193 {
1194 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1195 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1196
1197 return other->CP0_EBase;
1198 }
1199
helper_mtc0_ebase(CPUMIPSState * env,target_ulong arg1)1200 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
1201 {
1202 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1203 if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1204 mask |= ~0x3FFFFFFF;
1205 }
1206 env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask);
1207 }
1208
helper_mttc0_ebase(CPUMIPSState * env,target_ulong arg1)1209 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
1210 {
1211 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1212 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1213 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask;
1214 if (arg1 & env->CP0_EBaseWG_rw_bitmask) {
1215 mask |= ~0x3FFFFFFF;
1216 }
1217 other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask);
1218 }
1219
helper_mftc0_configx(CPUMIPSState * env,target_ulong idx)1220 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
1221 {
1222 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1223 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1224
1225 switch (idx) {
1226 case 0: return other->CP0_Config0;
1227 case 1: return other->CP0_Config1;
1228 case 2: return other->CP0_Config2;
1229 case 3: return other->CP0_Config3;
1230 /* 4 and 5 are reserved. */
1231 case 6: return other->CP0_Config6;
1232 case 7: return other->CP0_Config7;
1233 default:
1234 break;
1235 }
1236 return 0;
1237 }
1238
helper_mtc0_config0(CPUMIPSState * env,target_ulong arg1)1239 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
1240 {
1241 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
1242 }
1243
helper_mtc0_config2(CPUMIPSState * env,target_ulong arg1)1244 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
1245 {
1246 /* tertiary/secondary caches not implemented */
1247 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
1248 }
1249
helper_mtc0_config3(CPUMIPSState * env,target_ulong arg1)1250 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1)
1251 {
1252 if (env->insn_flags & ASE_MICROMIPS) {
1253 env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) |
1254 (arg1 & (1 << CP0C3_ISA_ON_EXC));
1255 }
1256 }
1257
helper_mtc0_config4(CPUMIPSState * env,target_ulong arg1)1258 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1)
1259 {
1260 env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) |
1261 (arg1 & env->CP0_Config4_rw_bitmask);
1262 }
1263
helper_mtc0_config5(CPUMIPSState * env,target_ulong arg1)1264 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1)
1265 {
1266 env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) |
1267 (arg1 & env->CP0_Config5_rw_bitmask);
1268 env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ?
1269 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff;
1270 compute_hflags(env);
1271 }
1272
helper_mtc0_lladdr(CPUMIPSState * env,target_ulong arg1)1273 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
1274 {
1275 target_long mask = env->CP0_LLAddr_rw_bitmask;
1276 arg1 = arg1 << env->CP0_LLAddr_shift;
1277 env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask);
1278 }
1279
1280 #define MTC0_MAAR_MASK(env) \
1281 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3)
1282
helper_mtc0_maar(CPUMIPSState * env,target_ulong arg1)1283 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1)
1284 {
1285 env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env);
1286 }
1287
helper_mthc0_maar(CPUMIPSState * env,target_ulong arg1)1288 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1)
1289 {
1290 env->CP0_MAAR[env->CP0_MAARI] =
1291 (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) |
1292 (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL);
1293 }
1294
helper_mtc0_maari(CPUMIPSState * env,target_ulong arg1)1295 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1)
1296 {
1297 int index = arg1 & 0x3f;
1298 if (index == 0x3f) {
1299 /*
1300 * Software may write all ones to INDEX to determine the
1301 * maximum value supported.
1302 */
1303 env->CP0_MAARI = MIPS_MAAR_MAX - 1;
1304 } else if (index < MIPS_MAAR_MAX) {
1305 env->CP0_MAARI = index;
1306 }
1307 /*
1308 * Other than the all ones, if the value written is not supported,
1309 * then INDEX is unchanged from its previous value.
1310 */
1311 }
1312
helper_mtc0_watchlo(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1313 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1314 {
1315 /*
1316 * Watch exceptions for instructions, data loads, data stores
1317 * not implemented.
1318 */
1319 env->CP0_WatchLo[sel] = (arg1 & ~0x7);
1320 }
1321
helper_mtc0_watchhi(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1322 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1323 {
1324 uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID);
1325 uint64_t m_bit = env->CP0_WatchHi[sel] & (1 << CP0WH_M); /* read-only */
1326 if ((env->CP0_Config5 >> CP0C5_MI) & 1) {
1327 mask |= 0xFFFFFFFF00000000ULL; /* MMID */
1328 }
1329 env->CP0_WatchHi[sel] = m_bit | (arg1 & mask);
1330 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
1331 }
1332
helper_mthc0_watchhi(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1333 void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1334 {
1335 env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) |
1336 (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL);
1337 }
1338
helper_mtc0_xcontext(CPUMIPSState * env,target_ulong arg1)1339 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
1340 {
1341 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
1342 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
1343 }
1344
helper_mtc0_framemask(CPUMIPSState * env,target_ulong arg1)1345 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
1346 {
1347 env->CP0_Framemask = arg1; /* XXX */
1348 }
1349
helper_mtc0_debug(CPUMIPSState * env,target_ulong arg1)1350 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
1351 {
1352 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
1353 if (arg1 & (1 << CP0DB_DM)) {
1354 env->hflags |= MIPS_HFLAG_DM;
1355 } else {
1356 env->hflags &= ~MIPS_HFLAG_DM;
1357 }
1358 }
1359
helper_mttc0_debug(CPUMIPSState * env,target_ulong arg1)1360 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
1361 {
1362 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1363 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
1364 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1365
1366 /* XXX: Might be wrong, check with EJTAG spec. */
1367 if (other_tc == other->current_tc) {
1368 other->active_tc.CP0_Debug_tcstatus = val;
1369 } else {
1370 other->tcs[other_tc].CP0_Debug_tcstatus = val;
1371 }
1372 other->CP0_Debug = (other->CP0_Debug &
1373 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
1374 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
1375 }
1376
helper_mtc0_performance0(CPUMIPSState * env,target_ulong arg1)1377 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
1378 {
1379 env->CP0_Performance0 = arg1 & 0x000007ff;
1380 }
1381
helper_mtc0_errctl(CPUMIPSState * env,target_ulong arg1)1382 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1)
1383 {
1384 int32_t wst = arg1 & (1 << CP0EC_WST);
1385 int32_t spr = arg1 & (1 << CP0EC_SPR);
1386 int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0;
1387
1388 env->CP0_ErrCtl = wst | spr | itc;
1389
1390 if (itc && !wst && !spr) {
1391 env->hflags |= MIPS_HFLAG_ITC_CACHE;
1392 } else {
1393 env->hflags &= ~MIPS_HFLAG_ITC_CACHE;
1394 }
1395 }
1396
helper_mtc0_taglo(CPUMIPSState * env,target_ulong arg1)1397 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
1398 {
1399 if (env->hflags & MIPS_HFLAG_ITC_CACHE) {
1400 /*
1401 * If CACHE instruction is configured for ITC tags then make all
1402 * CP0.TagLo bits writable. The actual write to ITC Configuration
1403 * Tag will take care of the read-only bits.
1404 */
1405 env->CP0_TagLo = arg1;
1406 } else {
1407 env->CP0_TagLo = arg1 & 0xFFFFFCF6;
1408 }
1409 }
1410
helper_mtc0_datalo(CPUMIPSState * env,target_ulong arg1)1411 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
1412 {
1413 env->CP0_DataLo = arg1; /* XXX */
1414 }
1415
helper_mtc0_taghi(CPUMIPSState * env,target_ulong arg1)1416 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
1417 {
1418 env->CP0_TagHi = arg1; /* XXX */
1419 }
1420
helper_mtc0_datahi(CPUMIPSState * env,target_ulong arg1)1421 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
1422 {
1423 env->CP0_DataHi = arg1; /* XXX */
1424 }
1425
1426 /* MIPS MT functions */
helper_mftgpr(CPUMIPSState * env,uint32_t sel)1427 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
1428 {
1429 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1430 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1431
1432 if (other_tc == other->current_tc) {
1433 return other->active_tc.gpr[sel];
1434 } else {
1435 return other->tcs[other_tc].gpr[sel];
1436 }
1437 }
1438
helper_mftlo(CPUMIPSState * env,uint32_t sel)1439 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
1440 {
1441 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1442 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1443
1444 if (other_tc == other->current_tc) {
1445 return other->active_tc.LO[sel];
1446 } else {
1447 return other->tcs[other_tc].LO[sel];
1448 }
1449 }
1450
helper_mfthi(CPUMIPSState * env,uint32_t sel)1451 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
1452 {
1453 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1454 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1455
1456 if (other_tc == other->current_tc) {
1457 return other->active_tc.HI[sel];
1458 } else {
1459 return other->tcs[other_tc].HI[sel];
1460 }
1461 }
1462
helper_mftacx(CPUMIPSState * env,uint32_t sel)1463 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
1464 {
1465 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1466 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1467
1468 if (other_tc == other->current_tc) {
1469 return other->active_tc.ACX[sel];
1470 } else {
1471 return other->tcs[other_tc].ACX[sel];
1472 }
1473 }
1474
helper_mftdsp(CPUMIPSState * env)1475 target_ulong helper_mftdsp(CPUMIPSState *env)
1476 {
1477 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1478 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1479
1480 if (other_tc == other->current_tc) {
1481 return other->active_tc.DSPControl;
1482 } else {
1483 return other->tcs[other_tc].DSPControl;
1484 }
1485 }
1486
helper_mttgpr(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1487 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1488 {
1489 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1490 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1491
1492 if (other_tc == other->current_tc) {
1493 other->active_tc.gpr[sel] = arg1;
1494 } else {
1495 other->tcs[other_tc].gpr[sel] = arg1;
1496 }
1497 }
1498
helper_mttlo(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1499 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1500 {
1501 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1502 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1503
1504 if (other_tc == other->current_tc) {
1505 other->active_tc.LO[sel] = arg1;
1506 } else {
1507 other->tcs[other_tc].LO[sel] = arg1;
1508 }
1509 }
1510
helper_mtthi(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1511 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1512 {
1513 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1514 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1515
1516 if (other_tc == other->current_tc) {
1517 other->active_tc.HI[sel] = arg1;
1518 } else {
1519 other->tcs[other_tc].HI[sel] = arg1;
1520 }
1521 }
1522
helper_mttacx(CPUMIPSState * env,target_ulong arg1,uint32_t sel)1523 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
1524 {
1525 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1526 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1527
1528 if (other_tc == other->current_tc) {
1529 other->active_tc.ACX[sel] = arg1;
1530 } else {
1531 other->tcs[other_tc].ACX[sel] = arg1;
1532 }
1533 }
1534
helper_mttdsp(CPUMIPSState * env,target_ulong arg1)1535 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
1536 {
1537 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
1538 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
1539
1540 if (other_tc == other->current_tc) {
1541 other->active_tc.DSPControl = arg1;
1542 } else {
1543 other->tcs[other_tc].DSPControl = arg1;
1544 }
1545 }
1546
1547 /* MIPS MT functions */
helper_dmt(void)1548 target_ulong helper_dmt(void)
1549 {
1550 /* TODO */
1551 return 0;
1552 }
1553
helper_emt(void)1554 target_ulong helper_emt(void)
1555 {
1556 /* TODO */
1557 return 0;
1558 }
1559
helper_dvpe(CPUMIPSState * env)1560 target_ulong helper_dvpe(CPUMIPSState *env)
1561 {
1562 CPUState *other_cs = first_cpu;
1563 target_ulong prev = env->mvp->CP0_MVPControl;
1564
1565 CPU_FOREACH(other_cs) {
1566 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1567 /* Turn off all VPEs except the one executing the dvpe. */
1568 if (&other_cpu->env != env) {
1569 other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
1570 mips_vpe_sleep(other_cpu);
1571 }
1572 }
1573 return prev;
1574 }
1575
helper_evpe(CPUMIPSState * env)1576 target_ulong helper_evpe(CPUMIPSState *env)
1577 {
1578 CPUState *other_cs = first_cpu;
1579 target_ulong prev = env->mvp->CP0_MVPControl;
1580
1581 CPU_FOREACH(other_cs) {
1582 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1583
1584 if (&other_cpu->env != env
1585 /* If the VPE is WFI, don't disturb its sleep. */
1586 && !mips_vpe_is_wfi(other_cpu)) {
1587 /* Enable the VPE. */
1588 other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
1589 mips_vpe_wake(other_cpu); /* And wake it up. */
1590 }
1591 }
1592 return prev;
1593 }
1594
1595 /* R6 Multi-threading */
helper_dvp(CPUMIPSState * env)1596 target_ulong helper_dvp(CPUMIPSState *env)
1597 {
1598 CPUState *other_cs = first_cpu;
1599 target_ulong prev = env->CP0_VPControl;
1600
1601 if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
1602 CPU_FOREACH(other_cs) {
1603 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1604 /* Turn off all VPs except the one executing the dvp. */
1605 if (&other_cpu->env != env) {
1606 mips_vpe_sleep(other_cpu);
1607 }
1608 }
1609 env->CP0_VPControl |= (1 << CP0VPCtl_DIS);
1610 }
1611 return prev;
1612 }
1613
helper_evp(CPUMIPSState * env)1614 target_ulong helper_evp(CPUMIPSState *env)
1615 {
1616 CPUState *other_cs = first_cpu;
1617 target_ulong prev = env->CP0_VPControl;
1618
1619 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
1620 CPU_FOREACH(other_cs) {
1621 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
1622 if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) {
1623 /*
1624 * If the VP is WFI, don't disturb its sleep.
1625 * Otherwise, wake it up.
1626 */
1627 mips_vpe_wake(other_cpu);
1628 }
1629 }
1630 env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS);
1631 }
1632 return prev;
1633 }
1634