1 /* 2 * MIPS emulation helpers for qemu. 3 * 4 * Copyright (c) 2004-2005 Jocelyn Mayer 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/main-loop.h" 21 #include "cpu.h" 22 #include "internal.h" 23 #include "qemu/host-utils.h" 24 #include "exec/helper-proto.h" 25 #include "exec/exec-all.h" 26 #include "exec/cpu_ldst.h" 27 #include "exec/memop.h" 28 #include "sysemu/kvm.h" 29 #include "fpu/softfloat.h" 30 31 /*****************************************************************************/ 32 /* Exceptions processing helpers */ 33 34 void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception, 35 int error_code) 36 { 37 do_raise_exception_err(env, exception, error_code, 0); 38 } 39 40 void helper_raise_exception(CPUMIPSState *env, uint32_t exception) 41 { 42 do_raise_exception(env, exception, GETPC()); 43 } 44 45 void helper_raise_exception_debug(CPUMIPSState *env) 46 { 47 do_raise_exception(env, EXCP_DEBUG, 0); 48 } 49 50 static void raise_exception(CPUMIPSState *env, uint32_t exception) 51 { 52 do_raise_exception(env, exception, 0); 53 } 54 55 /* 64 bits arithmetic for 32 bits hosts */ 56 static inline uint64_t get_HILO(CPUMIPSState *env) 57 { 58 return ((uint64_t)(env->active_tc.HI[0]) << 32) | 59 (uint32_t)env->active_tc.LO[0]; 60 } 61 62 static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO) 63 { 64 env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); 65 return env->active_tc.HI[0] = (int32_t)(HILO >> 32); 66 } 67 68 static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO) 69 { 70 target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF); 71 env->active_tc.HI[0] = (int32_t)(HILO >> 32); 72 return tmp; 73 } 74 75 /* Multiplication variants of the vr54xx. */ 76 target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1, 77 target_ulong arg2) 78 { 79 return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 * 80 (int64_t)(int32_t)arg2)); 81 } 82 83 target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1, 84 target_ulong arg2) 85 { 86 return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 * 87 (uint64_t)(uint32_t)arg2); 88 } 89 90 target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1, 91 target_ulong arg2) 92 { 93 return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * 94 (int64_t)(int32_t)arg2); 95 } 96 97 target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1, 98 target_ulong arg2) 99 { 100 return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 * 101 (int64_t)(int32_t)arg2); 102 } 103 104 target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1, 105 target_ulong arg2) 106 { 107 return set_HI_LOT0(env, (uint64_t)get_HILO(env) + 108 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); 109 } 110 111 target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1, 112 target_ulong arg2) 113 { 114 return set_HIT0_LO(env, (uint64_t)get_HILO(env) + 115 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); 116 } 117 118 target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1, 119 target_ulong arg2) 120 { 121 return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * 122 (int64_t)(int32_t)arg2); 123 } 124 125 target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1, 126 target_ulong arg2) 127 { 128 return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 * 129 (int64_t)(int32_t)arg2); 130 } 131 132 target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1, 133 target_ulong arg2) 134 { 135 return set_HI_LOT0(env, (uint64_t)get_HILO(env) - 136 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); 137 } 138 139 target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1, 140 target_ulong arg2) 141 { 142 return set_HIT0_LO(env, (uint64_t)get_HILO(env) - 143 (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2); 144 } 145 146 target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1, 147 target_ulong arg2) 148 { 149 return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2); 150 } 151 152 target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1, 153 target_ulong arg2) 154 { 155 return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 * 156 (uint64_t)(uint32_t)arg2); 157 } 158 159 target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1, 160 target_ulong arg2) 161 { 162 return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 * 163 (int64_t)(int32_t)arg2); 164 } 165 166 target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1, 167 target_ulong arg2) 168 { 169 return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 * 170 (uint64_t)(uint32_t)arg2); 171 } 172 173 static inline target_ulong bitswap(target_ulong v) 174 { 175 v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | 176 ((v & (target_ulong)0x5555555555555555ULL) << 1); 177 v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | 178 ((v & (target_ulong)0x3333333333333333ULL) << 2); 179 v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | 180 ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); 181 return v; 182 } 183 184 #ifdef TARGET_MIPS64 185 target_ulong helper_dbitswap(target_ulong rt) 186 { 187 return bitswap(rt); 188 } 189 #endif 190 191 target_ulong helper_bitswap(target_ulong rt) 192 { 193 return (int32_t)bitswap(rt); 194 } 195 196 target_ulong helper_rotx(target_ulong rs, uint32_t shift, uint32_t shiftx, 197 uint32_t stripe) 198 { 199 int i; 200 uint64_t tmp0 = ((uint64_t)rs) << 32 | ((uint64_t)rs & 0xffffffff); 201 uint64_t tmp1 = tmp0; 202 for (i = 0; i <= 46; i++) { 203 int s; 204 if (i & 0x8) { 205 s = shift; 206 } else { 207 s = shiftx; 208 } 209 210 if (stripe != 0 && !(i & 0x4)) { 211 s = ~s; 212 } 213 if (s & 0x10) { 214 if (tmp0 & (1LL << (i + 16))) { 215 tmp1 |= 1LL << i; 216 } else { 217 tmp1 &= ~(1LL << i); 218 } 219 } 220 } 221 222 uint64_t tmp2 = tmp1; 223 for (i = 0; i <= 38; i++) { 224 int s; 225 if (i & 0x4) { 226 s = shift; 227 } else { 228 s = shiftx; 229 } 230 231 if (s & 0x8) { 232 if (tmp1 & (1LL << (i + 8))) { 233 tmp2 |= 1LL << i; 234 } else { 235 tmp2 &= ~(1LL << i); 236 } 237 } 238 } 239 240 uint64_t tmp3 = tmp2; 241 for (i = 0; i <= 34; i++) { 242 int s; 243 if (i & 0x2) { 244 s = shift; 245 } else { 246 s = shiftx; 247 } 248 if (s & 0x4) { 249 if (tmp2 & (1LL << (i + 4))) { 250 tmp3 |= 1LL << i; 251 } else { 252 tmp3 &= ~(1LL << i); 253 } 254 } 255 } 256 257 uint64_t tmp4 = tmp3; 258 for (i = 0; i <= 32; i++) { 259 int s; 260 if (i & 0x1) { 261 s = shift; 262 } else { 263 s = shiftx; 264 } 265 if (s & 0x2) { 266 if (tmp3 & (1LL << (i + 2))) { 267 tmp4 |= 1LL << i; 268 } else { 269 tmp4 &= ~(1LL << i); 270 } 271 } 272 } 273 274 uint64_t tmp5 = tmp4; 275 for (i = 0; i <= 31; i++) { 276 int s; 277 s = shift; 278 if (s & 0x1) { 279 if (tmp4 & (1LL << (i + 1))) { 280 tmp5 |= 1LL << i; 281 } else { 282 tmp5 &= ~(1LL << i); 283 } 284 } 285 } 286 287 return (int64_t)(int32_t)(uint32_t)tmp5; 288 } 289 290 #ifndef CONFIG_USER_ONLY 291 292 static inline hwaddr do_translate_address(CPUMIPSState *env, 293 target_ulong address, 294 int rw, uintptr_t retaddr) 295 { 296 hwaddr paddr; 297 CPUState *cs = env_cpu(env); 298 299 paddr = cpu_mips_translate_address(env, address, rw); 300 301 if (paddr == -1LL) { 302 cpu_loop_exit_restore(cs, retaddr); 303 } else { 304 return paddr; 305 } 306 } 307 308 #define HELPER_LD_ATOMIC(name, insn, almask, do_cast) \ 309 target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \ 310 { \ 311 if (arg & almask) { \ 312 if (!(env->hflags & MIPS_HFLAG_DM)) { \ 313 env->CP0_BadVAddr = arg; \ 314 } \ 315 do_raise_exception(env, EXCP_AdEL, GETPC()); \ 316 } \ 317 env->CP0_LLAddr = do_translate_address(env, arg, 0, GETPC()); \ 318 env->lladdr = arg; \ 319 env->llval = do_cast cpu_##insn##_mmuidx_ra(env, arg, mem_idx, GETPC()); \ 320 return env->llval; \ 321 } 322 HELPER_LD_ATOMIC(ll, ldl, 0x3, (target_long)(int32_t)) 323 #ifdef TARGET_MIPS64 324 HELPER_LD_ATOMIC(lld, ldq, 0x7, (target_ulong)) 325 #endif 326 #undef HELPER_LD_ATOMIC 327 #endif 328 329 #ifdef TARGET_WORDS_BIGENDIAN 330 #define GET_LMASK(v) ((v) & 3) 331 #define GET_OFFSET(addr, offset) (addr + (offset)) 332 #else 333 #define GET_LMASK(v) (((v) & 3) ^ 3) 334 #define GET_OFFSET(addr, offset) (addr - (offset)) 335 #endif 336 337 void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, 338 int mem_idx) 339 { 340 cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 24), mem_idx, GETPC()); 341 342 if (GET_LMASK(arg2) <= 2) { 343 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), 344 mem_idx, GETPC()); 345 } 346 347 if (GET_LMASK(arg2) <= 1) { 348 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), 349 mem_idx, GETPC()); 350 } 351 352 if (GET_LMASK(arg2) == 0) { 353 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, 354 mem_idx, GETPC()); 355 } 356 } 357 358 void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, 359 int mem_idx) 360 { 361 cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); 362 363 if (GET_LMASK(arg2) >= 1) { 364 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), 365 mem_idx, GETPC()); 366 } 367 368 if (GET_LMASK(arg2) >= 2) { 369 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), 370 mem_idx, GETPC()); 371 } 372 373 if (GET_LMASK(arg2) == 3) { 374 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), 375 mem_idx, GETPC()); 376 } 377 } 378 379 #if defined(TARGET_MIPS64) 380 /* 381 * "half" load and stores. We must do the memory access inline, 382 * or fault handling won't work. 383 */ 384 #ifdef TARGET_WORDS_BIGENDIAN 385 #define GET_LMASK64(v) ((v) & 7) 386 #else 387 #define GET_LMASK64(v) (((v) & 7) ^ 7) 388 #endif 389 390 void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, 391 int mem_idx) 392 { 393 cpu_stb_mmuidx_ra(env, arg2, (uint8_t)(arg1 >> 56), mem_idx, GETPC()); 394 395 if (GET_LMASK64(arg2) <= 6) { 396 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), 397 mem_idx, GETPC()); 398 } 399 400 if (GET_LMASK64(arg2) <= 5) { 401 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), 402 mem_idx, GETPC()); 403 } 404 405 if (GET_LMASK64(arg2) <= 4) { 406 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), 407 mem_idx, GETPC()); 408 } 409 410 if (GET_LMASK64(arg2) <= 3) { 411 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), 412 mem_idx, GETPC()); 413 } 414 415 if (GET_LMASK64(arg2) <= 2) { 416 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), 417 mem_idx, GETPC()); 418 } 419 420 if (GET_LMASK64(arg2) <= 1) { 421 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), 422 mem_idx, GETPC()); 423 } 424 425 if (GET_LMASK64(arg2) <= 0) { 426 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, 427 mem_idx, GETPC()); 428 } 429 } 430 431 void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2, 432 int mem_idx) 433 { 434 cpu_stb_mmuidx_ra(env, arg2, (uint8_t)arg1, mem_idx, GETPC()); 435 436 if (GET_LMASK64(arg2) >= 1) { 437 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), 438 mem_idx, GETPC()); 439 } 440 441 if (GET_LMASK64(arg2) >= 2) { 442 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), 443 mem_idx, GETPC()); 444 } 445 446 if (GET_LMASK64(arg2) >= 3) { 447 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), 448 mem_idx, GETPC()); 449 } 450 451 if (GET_LMASK64(arg2) >= 4) { 452 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), 453 mem_idx, GETPC()); 454 } 455 456 if (GET_LMASK64(arg2) >= 5) { 457 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), 458 mem_idx, GETPC()); 459 } 460 461 if (GET_LMASK64(arg2) >= 6) { 462 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), 463 mem_idx, GETPC()); 464 } 465 466 if (GET_LMASK64(arg2) == 7) { 467 cpu_stb_mmuidx_ra(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), 468 mem_idx, GETPC()); 469 } 470 } 471 #endif /* TARGET_MIPS64 */ 472 473 static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 }; 474 475 void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, 476 uint32_t mem_idx) 477 { 478 target_ulong base_reglist = reglist & 0xf; 479 target_ulong do_r31 = reglist & 0x10; 480 481 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { 482 target_ulong i; 483 484 for (i = 0; i < base_reglist; i++) { 485 env->active_tc.gpr[multiple_regs[i]] = 486 (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); 487 addr += 4; 488 } 489 } 490 491 if (do_r31) { 492 env->active_tc.gpr[31] = 493 (target_long)cpu_ldl_mmuidx_ra(env, addr, mem_idx, GETPC()); 494 } 495 } 496 497 void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, 498 uint32_t mem_idx) 499 { 500 target_ulong base_reglist = reglist & 0xf; 501 target_ulong do_r31 = reglist & 0x10; 502 503 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { 504 target_ulong i; 505 506 for (i = 0; i < base_reglist; i++) { 507 cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], 508 mem_idx, GETPC()); 509 addr += 4; 510 } 511 } 512 513 if (do_r31) { 514 cpu_stw_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); 515 } 516 } 517 518 #if defined(TARGET_MIPS64) 519 void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, 520 uint32_t mem_idx) 521 { 522 target_ulong base_reglist = reglist & 0xf; 523 target_ulong do_r31 = reglist & 0x10; 524 525 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { 526 target_ulong i; 527 528 for (i = 0; i < base_reglist; i++) { 529 env->active_tc.gpr[multiple_regs[i]] = 530 cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); 531 addr += 8; 532 } 533 } 534 535 if (do_r31) { 536 env->active_tc.gpr[31] = 537 cpu_ldq_mmuidx_ra(env, addr, mem_idx, GETPC()); 538 } 539 } 540 541 void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist, 542 uint32_t mem_idx) 543 { 544 target_ulong base_reglist = reglist & 0xf; 545 target_ulong do_r31 = reglist & 0x10; 546 547 if (base_reglist > 0 && base_reglist <= ARRAY_SIZE(multiple_regs)) { 548 target_ulong i; 549 550 for (i = 0; i < base_reglist; i++) { 551 cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[multiple_regs[i]], 552 mem_idx, GETPC()); 553 addr += 8; 554 } 555 } 556 557 if (do_r31) { 558 cpu_stq_mmuidx_ra(env, addr, env->active_tc.gpr[31], mem_idx, GETPC()); 559 } 560 } 561 #endif 562 563 #ifndef CONFIG_USER_ONLY 564 /* SMP helpers. */ 565 static bool mips_vpe_is_wfi(MIPSCPU *c) 566 { 567 CPUState *cpu = CPU(c); 568 CPUMIPSState *env = &c->env; 569 570 /* 571 * If the VPE is halted but otherwise active, it means it's waiting for 572 * an interrupt.\ 573 */ 574 return cpu->halted && mips_vpe_active(env); 575 } 576 577 static bool mips_vp_is_wfi(MIPSCPU *c) 578 { 579 CPUState *cpu = CPU(c); 580 CPUMIPSState *env = &c->env; 581 582 return cpu->halted && mips_vp_active(env); 583 } 584 585 static inline void mips_vpe_wake(MIPSCPU *c) 586 { 587 /* 588 * Don't set ->halted = 0 directly, let it be done via cpu_has_work 589 * because there might be other conditions that state that c should 590 * be sleeping. 591 */ 592 qemu_mutex_lock_iothread(); 593 cpu_interrupt(CPU(c), CPU_INTERRUPT_WAKE); 594 qemu_mutex_unlock_iothread(); 595 } 596 597 static inline void mips_vpe_sleep(MIPSCPU *cpu) 598 { 599 CPUState *cs = CPU(cpu); 600 601 /* 602 * The VPE was shut off, really go to bed. 603 * Reset any old _WAKE requests. 604 */ 605 cs->halted = 1; 606 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); 607 } 608 609 static inline void mips_tc_wake(MIPSCPU *cpu, int tc) 610 { 611 CPUMIPSState *c = &cpu->env; 612 613 /* FIXME: TC reschedule. */ 614 if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) { 615 mips_vpe_wake(cpu); 616 } 617 } 618 619 static inline void mips_tc_sleep(MIPSCPU *cpu, int tc) 620 { 621 CPUMIPSState *c = &cpu->env; 622 623 /* FIXME: TC reschedule. */ 624 if (!mips_vpe_active(c)) { 625 mips_vpe_sleep(cpu); 626 } 627 } 628 629 /** 630 * mips_cpu_map_tc: 631 * @env: CPU from which mapping is performed. 632 * @tc: Should point to an int with the value of the global TC index. 633 * 634 * This function will transform @tc into a local index within the 635 * returned #CPUMIPSState. 636 */ 637 638 /* 639 * FIXME: This code assumes that all VPEs have the same number of TCs, 640 * which depends on runtime setup. Can probably be fixed by 641 * walking the list of CPUMIPSStates. 642 */ 643 static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc) 644 { 645 MIPSCPU *cpu; 646 CPUState *cs; 647 CPUState *other_cs; 648 int vpe_idx; 649 int tc_idx = *tc; 650 651 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) { 652 /* Not allowed to address other CPUs. */ 653 *tc = env->current_tc; 654 return env; 655 } 656 657 cs = env_cpu(env); 658 vpe_idx = tc_idx / cs->nr_threads; 659 *tc = tc_idx % cs->nr_threads; 660 other_cs = qemu_get_cpu(vpe_idx); 661 if (other_cs == NULL) { 662 return env; 663 } 664 cpu = MIPS_CPU(other_cs); 665 return &cpu->env; 666 } 667 668 /* 669 * The per VPE CP0_Status register shares some fields with the per TC 670 * CP0_TCStatus registers. These fields are wired to the same registers, 671 * so changes to either of them should be reflected on both registers. 672 * 673 * Also, EntryHi shares the bottom 8 bit ASID with TCStauts. 674 * 675 * These helper call synchronizes the regs for a given cpu. 676 */ 677 678 /* 679 * Called for updates to CP0_Status. Defined in "cpu.h" for gdbstub.c. 680 * static inline void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, 681 * int tc); 682 */ 683 684 /* Called for updates to CP0_TCStatus. */ 685 static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc, 686 target_ulong v) 687 { 688 uint32_t status; 689 uint32_t tcu, tmx, tasid, tksu; 690 uint32_t mask = ((1U << CP0St_CU3) 691 | (1 << CP0St_CU2) 692 | (1 << CP0St_CU1) 693 | (1 << CP0St_CU0) 694 | (1 << CP0St_MX) 695 | (3 << CP0St_KSU)); 696 697 tcu = (v >> CP0TCSt_TCU0) & 0xf; 698 tmx = (v >> CP0TCSt_TMX) & 0x1; 699 tasid = v & cpu->CP0_EntryHi_ASID_mask; 700 tksu = (v >> CP0TCSt_TKSU) & 0x3; 701 702 status = tcu << CP0St_CU0; 703 status |= tmx << CP0St_MX; 704 status |= tksu << CP0St_KSU; 705 706 cpu->CP0_Status &= ~mask; 707 cpu->CP0_Status |= status; 708 709 /* Sync the TASID with EntryHi. */ 710 cpu->CP0_EntryHi &= ~cpu->CP0_EntryHi_ASID_mask; 711 cpu->CP0_EntryHi |= tasid; 712 713 compute_hflags(cpu); 714 } 715 716 /* Called for updates to CP0_EntryHi. */ 717 static void sync_c0_entryhi(CPUMIPSState *cpu, int tc) 718 { 719 int32_t *tcst; 720 uint32_t asid, v = cpu->CP0_EntryHi; 721 722 asid = v & cpu->CP0_EntryHi_ASID_mask; 723 724 if (tc == cpu->current_tc) { 725 tcst = &cpu->active_tc.CP0_TCStatus; 726 } else { 727 tcst = &cpu->tcs[tc].CP0_TCStatus; 728 } 729 730 *tcst &= ~cpu->CP0_EntryHi_ASID_mask; 731 *tcst |= asid; 732 } 733 734 /* CP0 helpers */ 735 target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env) 736 { 737 return env->mvp->CP0_MVPControl; 738 } 739 740 target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env) 741 { 742 return env->mvp->CP0_MVPConf0; 743 } 744 745 target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env) 746 { 747 return env->mvp->CP0_MVPConf1; 748 } 749 750 target_ulong helper_mfc0_random(CPUMIPSState *env) 751 { 752 return (int32_t)cpu_mips_get_random(env); 753 } 754 755 target_ulong helper_mfc0_tcstatus(CPUMIPSState *env) 756 { 757 return env->active_tc.CP0_TCStatus; 758 } 759 760 target_ulong helper_mftc0_tcstatus(CPUMIPSState *env) 761 { 762 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 763 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 764 765 if (other_tc == other->current_tc) { 766 return other->active_tc.CP0_TCStatus; 767 } else { 768 return other->tcs[other_tc].CP0_TCStatus; 769 } 770 } 771 772 target_ulong helper_mfc0_tcbind(CPUMIPSState *env) 773 { 774 return env->active_tc.CP0_TCBind; 775 } 776 777 target_ulong helper_mftc0_tcbind(CPUMIPSState *env) 778 { 779 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 780 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 781 782 if (other_tc == other->current_tc) { 783 return other->active_tc.CP0_TCBind; 784 } else { 785 return other->tcs[other_tc].CP0_TCBind; 786 } 787 } 788 789 target_ulong helper_mfc0_tcrestart(CPUMIPSState *env) 790 { 791 return env->active_tc.PC; 792 } 793 794 target_ulong helper_mftc0_tcrestart(CPUMIPSState *env) 795 { 796 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 797 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 798 799 if (other_tc == other->current_tc) { 800 return other->active_tc.PC; 801 } else { 802 return other->tcs[other_tc].PC; 803 } 804 } 805 806 target_ulong helper_mfc0_tchalt(CPUMIPSState *env) 807 { 808 return env->active_tc.CP0_TCHalt; 809 } 810 811 target_ulong helper_mftc0_tchalt(CPUMIPSState *env) 812 { 813 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 814 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 815 816 if (other_tc == other->current_tc) { 817 return other->active_tc.CP0_TCHalt; 818 } else { 819 return other->tcs[other_tc].CP0_TCHalt; 820 } 821 } 822 823 target_ulong helper_mfc0_tccontext(CPUMIPSState *env) 824 { 825 return env->active_tc.CP0_TCContext; 826 } 827 828 target_ulong helper_mftc0_tccontext(CPUMIPSState *env) 829 { 830 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 831 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 832 833 if (other_tc == other->current_tc) { 834 return other->active_tc.CP0_TCContext; 835 } else { 836 return other->tcs[other_tc].CP0_TCContext; 837 } 838 } 839 840 target_ulong helper_mfc0_tcschedule(CPUMIPSState *env) 841 { 842 return env->active_tc.CP0_TCSchedule; 843 } 844 845 target_ulong helper_mftc0_tcschedule(CPUMIPSState *env) 846 { 847 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 848 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 849 850 if (other_tc == other->current_tc) { 851 return other->active_tc.CP0_TCSchedule; 852 } else { 853 return other->tcs[other_tc].CP0_TCSchedule; 854 } 855 } 856 857 target_ulong helper_mfc0_tcschefback(CPUMIPSState *env) 858 { 859 return env->active_tc.CP0_TCScheFBack; 860 } 861 862 target_ulong helper_mftc0_tcschefback(CPUMIPSState *env) 863 { 864 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 865 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 866 867 if (other_tc == other->current_tc) { 868 return other->active_tc.CP0_TCScheFBack; 869 } else { 870 return other->tcs[other_tc].CP0_TCScheFBack; 871 } 872 } 873 874 target_ulong helper_mfc0_count(CPUMIPSState *env) 875 { 876 return (int32_t)cpu_mips_get_count(env); 877 } 878 879 target_ulong helper_mfc0_saar(CPUMIPSState *env) 880 { 881 if ((env->CP0_SAARI & 0x3f) < 2) { 882 return (int32_t) env->CP0_SAAR[env->CP0_SAARI & 0x3f]; 883 } 884 return 0; 885 } 886 887 target_ulong helper_mfhc0_saar(CPUMIPSState *env) 888 { 889 if ((env->CP0_SAARI & 0x3f) < 2) { 890 return env->CP0_SAAR[env->CP0_SAARI & 0x3f] >> 32; 891 } 892 return 0; 893 } 894 895 target_ulong helper_mftc0_entryhi(CPUMIPSState *env) 896 { 897 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 898 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 899 900 return other->CP0_EntryHi; 901 } 902 903 target_ulong helper_mftc0_cause(CPUMIPSState *env) 904 { 905 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 906 int32_t tccause; 907 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 908 909 if (other_tc == other->current_tc) { 910 tccause = other->CP0_Cause; 911 } else { 912 tccause = other->CP0_Cause; 913 } 914 915 return tccause; 916 } 917 918 target_ulong helper_mftc0_status(CPUMIPSState *env) 919 { 920 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 921 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 922 923 return other->CP0_Status; 924 } 925 926 target_ulong helper_mfc0_lladdr(CPUMIPSState *env) 927 { 928 return (int32_t)(env->CP0_LLAddr >> env->CP0_LLAddr_shift); 929 } 930 931 target_ulong helper_mfc0_maar(CPUMIPSState *env) 932 { 933 return (int32_t) env->CP0_MAAR[env->CP0_MAARI]; 934 } 935 936 target_ulong helper_mfhc0_maar(CPUMIPSState *env) 937 { 938 return env->CP0_MAAR[env->CP0_MAARI] >> 32; 939 } 940 941 target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel) 942 { 943 return (int32_t)env->CP0_WatchLo[sel]; 944 } 945 946 target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel) 947 { 948 return (int32_t) env->CP0_WatchHi[sel]; 949 } 950 951 target_ulong helper_mfhc0_watchhi(CPUMIPSState *env, uint32_t sel) 952 { 953 return env->CP0_WatchHi[sel] >> 32; 954 } 955 956 target_ulong helper_mfc0_debug(CPUMIPSState *env) 957 { 958 target_ulong t0 = env->CP0_Debug; 959 if (env->hflags & MIPS_HFLAG_DM) { 960 t0 |= 1 << CP0DB_DM; 961 } 962 963 return t0; 964 } 965 966 target_ulong helper_mftc0_debug(CPUMIPSState *env) 967 { 968 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 969 int32_t tcstatus; 970 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 971 972 if (other_tc == other->current_tc) { 973 tcstatus = other->active_tc.CP0_Debug_tcstatus; 974 } else { 975 tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus; 976 } 977 978 /* XXX: Might be wrong, check with EJTAG spec. */ 979 return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | 980 (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); 981 } 982 983 #if defined(TARGET_MIPS64) 984 target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env) 985 { 986 return env->active_tc.PC; 987 } 988 989 target_ulong helper_dmfc0_tchalt(CPUMIPSState *env) 990 { 991 return env->active_tc.CP0_TCHalt; 992 } 993 994 target_ulong helper_dmfc0_tccontext(CPUMIPSState *env) 995 { 996 return env->active_tc.CP0_TCContext; 997 } 998 999 target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env) 1000 { 1001 return env->active_tc.CP0_TCSchedule; 1002 } 1003 1004 target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env) 1005 { 1006 return env->active_tc.CP0_TCScheFBack; 1007 } 1008 1009 target_ulong helper_dmfc0_lladdr(CPUMIPSState *env) 1010 { 1011 return env->CP0_LLAddr >> env->CP0_LLAddr_shift; 1012 } 1013 1014 target_ulong helper_dmfc0_maar(CPUMIPSState *env) 1015 { 1016 return env->CP0_MAAR[env->CP0_MAARI]; 1017 } 1018 1019 target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel) 1020 { 1021 return env->CP0_WatchLo[sel]; 1022 } 1023 1024 target_ulong helper_dmfc0_watchhi(CPUMIPSState *env, uint32_t sel) 1025 { 1026 return env->CP0_WatchHi[sel]; 1027 } 1028 1029 target_ulong helper_dmfc0_saar(CPUMIPSState *env) 1030 { 1031 if ((env->CP0_SAARI & 0x3f) < 2) { 1032 return env->CP0_SAAR[env->CP0_SAARI & 0x3f]; 1033 } 1034 return 0; 1035 } 1036 #endif /* TARGET_MIPS64 */ 1037 1038 void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1) 1039 { 1040 uint32_t index_p = env->CP0_Index & 0x80000000; 1041 uint32_t tlb_index = arg1 & 0x7fffffff; 1042 if (tlb_index < env->tlb->nb_tlb) { 1043 if (env->insn_flags & ISA_MIPS32R6) { 1044 index_p |= arg1 & 0x80000000; 1045 } 1046 env->CP0_Index = index_p | tlb_index; 1047 } 1048 } 1049 1050 void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1) 1051 { 1052 uint32_t mask = 0; 1053 uint32_t newval; 1054 1055 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { 1056 mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) | 1057 (1 << CP0MVPCo_EVP); 1058 } 1059 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 1060 mask |= (1 << CP0MVPCo_STLB); 1061 } 1062 newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask); 1063 1064 /* TODO: Enable/disable shared TLB, enable/disable VPEs. */ 1065 1066 env->mvp->CP0_MVPControl = newval; 1067 } 1068 1069 void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) 1070 { 1071 uint32_t mask; 1072 uint32_t newval; 1073 1074 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | 1075 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); 1076 newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask); 1077 1078 /* 1079 * Yield scheduler intercept not implemented. 1080 * Gating storage scheduler intercept not implemented. 1081 */ 1082 1083 /* TODO: Enable/disable TCs. */ 1084 1085 env->CP0_VPEControl = newval; 1086 } 1087 1088 void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1) 1089 { 1090 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1091 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1092 uint32_t mask; 1093 uint32_t newval; 1094 1095 mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) | 1096 (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC); 1097 newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask); 1098 1099 /* TODO: Enable/disable TCs. */ 1100 1101 other->CP0_VPEControl = newval; 1102 } 1103 1104 target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env) 1105 { 1106 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1107 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1108 /* FIXME: Mask away return zero on read bits. */ 1109 return other->CP0_VPEControl; 1110 } 1111 1112 target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env) 1113 { 1114 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1115 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1116 1117 return other->CP0_VPEConf0; 1118 } 1119 1120 void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) 1121 { 1122 uint32_t mask = 0; 1123 uint32_t newval; 1124 1125 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { 1126 if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA)) { 1127 mask |= (0xff << CP0VPEC0_XTC); 1128 } 1129 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); 1130 } 1131 newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask); 1132 1133 /* TODO: TC exclusive handling due to ERL/EXL. */ 1134 1135 env->CP0_VPEConf0 = newval; 1136 } 1137 1138 void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1) 1139 { 1140 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1141 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1142 uint32_t mask = 0; 1143 uint32_t newval; 1144 1145 mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA); 1146 newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask); 1147 1148 /* TODO: TC exclusive handling due to ERL/EXL. */ 1149 other->CP0_VPEConf0 = newval; 1150 } 1151 1152 void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1) 1153 { 1154 uint32_t mask = 0; 1155 uint32_t newval; 1156 1157 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) 1158 mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) | 1159 (0xff << CP0VPEC1_NCP1); 1160 newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask); 1161 1162 /* UDI not implemented. */ 1163 /* CP2 not implemented. */ 1164 1165 /* TODO: Handle FPU (CP1) binding. */ 1166 1167 env->CP0_VPEConf1 = newval; 1168 } 1169 1170 void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1) 1171 { 1172 /* Yield qualifier inputs not implemented. */ 1173 env->CP0_YQMask = 0x00000000; 1174 } 1175 1176 void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1) 1177 { 1178 env->CP0_VPEOpt = arg1 & 0x0000ffff; 1179 } 1180 1181 #define MTC0_ENTRYLO_MASK(env) ((env->PAMask >> 6) & 0x3FFFFFFF) 1182 1183 void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1) 1184 { 1185 /* 1k pages not implemented */ 1186 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); 1187 env->CP0_EntryLo0 = (arg1 & MTC0_ENTRYLO_MASK(env)) 1188 | (rxi << (CP0EnLo_XI - 30)); 1189 } 1190 1191 #if defined(TARGET_MIPS64) 1192 #define DMTC0_ENTRYLO_MASK(env) (env->PAMask >> 6) 1193 1194 void helper_dmtc0_entrylo0(CPUMIPSState *env, uint64_t arg1) 1195 { 1196 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); 1197 env->CP0_EntryLo0 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; 1198 } 1199 #endif 1200 1201 void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1) 1202 { 1203 uint32_t mask = env->CP0_TCStatus_rw_bitmask; 1204 uint32_t newval; 1205 1206 newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask); 1207 1208 env->active_tc.CP0_TCStatus = newval; 1209 sync_c0_tcstatus(env, env->current_tc, newval); 1210 } 1211 1212 void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1) 1213 { 1214 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1215 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1216 1217 if (other_tc == other->current_tc) { 1218 other->active_tc.CP0_TCStatus = arg1; 1219 } else { 1220 other->tcs[other_tc].CP0_TCStatus = arg1; 1221 } 1222 sync_c0_tcstatus(other, other_tc, arg1); 1223 } 1224 1225 void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1) 1226 { 1227 uint32_t mask = (1 << CP0TCBd_TBE); 1228 uint32_t newval; 1229 1230 if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 1231 mask |= (1 << CP0TCBd_CurVPE); 1232 } 1233 newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); 1234 env->active_tc.CP0_TCBind = newval; 1235 } 1236 1237 void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1) 1238 { 1239 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1240 uint32_t mask = (1 << CP0TCBd_TBE); 1241 uint32_t newval; 1242 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1243 1244 if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC)) { 1245 mask |= (1 << CP0TCBd_CurVPE); 1246 } 1247 if (other_tc == other->current_tc) { 1248 newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask); 1249 other->active_tc.CP0_TCBind = newval; 1250 } else { 1251 newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask); 1252 other->tcs[other_tc].CP0_TCBind = newval; 1253 } 1254 } 1255 1256 void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1) 1257 { 1258 env->active_tc.PC = arg1; 1259 env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 1260 env->CP0_LLAddr = 0; 1261 env->lladdr = 0; 1262 /* MIPS16 not implemented. */ 1263 } 1264 1265 void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1) 1266 { 1267 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1268 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1269 1270 if (other_tc == other->current_tc) { 1271 other->active_tc.PC = arg1; 1272 other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 1273 other->CP0_LLAddr = 0; 1274 other->lladdr = 0; 1275 /* MIPS16 not implemented. */ 1276 } else { 1277 other->tcs[other_tc].PC = arg1; 1278 other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS); 1279 other->CP0_LLAddr = 0; 1280 other->lladdr = 0; 1281 /* MIPS16 not implemented. */ 1282 } 1283 } 1284 1285 void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1) 1286 { 1287 MIPSCPU *cpu = env_archcpu(env); 1288 1289 env->active_tc.CP0_TCHalt = arg1 & 0x1; 1290 1291 /* TODO: Halt TC / Restart (if allocated+active) TC. */ 1292 if (env->active_tc.CP0_TCHalt & 1) { 1293 mips_tc_sleep(cpu, env->current_tc); 1294 } else { 1295 mips_tc_wake(cpu, env->current_tc); 1296 } 1297 } 1298 1299 void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1) 1300 { 1301 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1302 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1303 MIPSCPU *other_cpu = env_archcpu(other); 1304 1305 /* TODO: Halt TC / Restart (if allocated+active) TC. */ 1306 1307 if (other_tc == other->current_tc) { 1308 other->active_tc.CP0_TCHalt = arg1; 1309 } else { 1310 other->tcs[other_tc].CP0_TCHalt = arg1; 1311 } 1312 1313 if (arg1 & 1) { 1314 mips_tc_sleep(other_cpu, other_tc); 1315 } else { 1316 mips_tc_wake(other_cpu, other_tc); 1317 } 1318 } 1319 1320 void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1) 1321 { 1322 env->active_tc.CP0_TCContext = arg1; 1323 } 1324 1325 void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1) 1326 { 1327 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1328 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1329 1330 if (other_tc == other->current_tc) { 1331 other->active_tc.CP0_TCContext = arg1; 1332 } else { 1333 other->tcs[other_tc].CP0_TCContext = arg1; 1334 } 1335 } 1336 1337 void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1) 1338 { 1339 env->active_tc.CP0_TCSchedule = arg1; 1340 } 1341 1342 void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1) 1343 { 1344 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1345 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1346 1347 if (other_tc == other->current_tc) { 1348 other->active_tc.CP0_TCSchedule = arg1; 1349 } else { 1350 other->tcs[other_tc].CP0_TCSchedule = arg1; 1351 } 1352 } 1353 1354 void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1) 1355 { 1356 env->active_tc.CP0_TCScheFBack = arg1; 1357 } 1358 1359 void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1) 1360 { 1361 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1362 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1363 1364 if (other_tc == other->current_tc) { 1365 other->active_tc.CP0_TCScheFBack = arg1; 1366 } else { 1367 other->tcs[other_tc].CP0_TCScheFBack = arg1; 1368 } 1369 } 1370 1371 void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1) 1372 { 1373 /* 1k pages not implemented */ 1374 target_ulong rxi = arg1 & (env->CP0_PageGrain & (3u << CP0PG_XIE)); 1375 env->CP0_EntryLo1 = (arg1 & MTC0_ENTRYLO_MASK(env)) 1376 | (rxi << (CP0EnLo_XI - 30)); 1377 } 1378 1379 #if defined(TARGET_MIPS64) 1380 void helper_dmtc0_entrylo1(CPUMIPSState *env, uint64_t arg1) 1381 { 1382 uint64_t rxi = arg1 & ((env->CP0_PageGrain & (3ull << CP0PG_XIE)) << 32); 1383 env->CP0_EntryLo1 = (arg1 & DMTC0_ENTRYLO_MASK(env)) | rxi; 1384 } 1385 #endif 1386 1387 void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1) 1388 { 1389 env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF); 1390 } 1391 1392 void helper_mtc0_memorymapid(CPUMIPSState *env, target_ulong arg1) 1393 { 1394 int32_t old; 1395 old = env->CP0_MemoryMapID; 1396 env->CP0_MemoryMapID = (int32_t) arg1; 1397 /* If the MemoryMapID changes, flush qemu's TLB. */ 1398 if (old != env->CP0_MemoryMapID) { 1399 cpu_mips_tlb_flush(env); 1400 } 1401 } 1402 1403 void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask) 1404 { 1405 uint64_t mask = arg1 >> (TARGET_PAGE_BITS + 1); 1406 if (!(env->insn_flags & ISA_MIPS32R6) || (arg1 == ~0) || 1407 (mask == 0x0000 || mask == 0x0003 || mask == 0x000F || 1408 mask == 0x003F || mask == 0x00FF || mask == 0x03FF || 1409 mask == 0x0FFF || mask == 0x3FFF || mask == 0xFFFF)) { 1410 env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1)); 1411 } 1412 } 1413 1414 void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1) 1415 { 1416 update_pagemask(env, arg1, &env->CP0_PageMask); 1417 } 1418 1419 void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1) 1420 { 1421 /* SmartMIPS not implemented */ 1422 /* 1k pages not implemented */ 1423 env->CP0_PageGrain = (arg1 & env->CP0_PageGrain_rw_bitmask) | 1424 (env->CP0_PageGrain & ~env->CP0_PageGrain_rw_bitmask); 1425 compute_hflags(env); 1426 restore_pamask(env); 1427 } 1428 1429 void helper_mtc0_segctl0(CPUMIPSState *env, target_ulong arg1) 1430 { 1431 CPUState *cs = env_cpu(env); 1432 1433 env->CP0_SegCtl0 = arg1 & CP0SC0_MASK; 1434 tlb_flush(cs); 1435 } 1436 1437 void helper_mtc0_segctl1(CPUMIPSState *env, target_ulong arg1) 1438 { 1439 CPUState *cs = env_cpu(env); 1440 1441 env->CP0_SegCtl1 = arg1 & CP0SC1_MASK; 1442 tlb_flush(cs); 1443 } 1444 1445 void helper_mtc0_segctl2(CPUMIPSState *env, target_ulong arg1) 1446 { 1447 CPUState *cs = env_cpu(env); 1448 1449 env->CP0_SegCtl2 = arg1 & CP0SC2_MASK; 1450 tlb_flush(cs); 1451 } 1452 1453 void helper_mtc0_pwfield(CPUMIPSState *env, target_ulong arg1) 1454 { 1455 #if defined(TARGET_MIPS64) 1456 uint64_t mask = 0x3F3FFFFFFFULL; 1457 uint32_t old_ptei = (env->CP0_PWField >> CP0PF_PTEI) & 0x3FULL; 1458 uint32_t new_ptei = (arg1 >> CP0PF_PTEI) & 0x3FULL; 1459 1460 if ((env->insn_flags & ISA_MIPS32R6)) { 1461 if (((arg1 >> CP0PF_BDI) & 0x3FULL) < 12) { 1462 mask &= ~(0x3FULL << CP0PF_BDI); 1463 } 1464 if (((arg1 >> CP0PF_GDI) & 0x3FULL) < 12) { 1465 mask &= ~(0x3FULL << CP0PF_GDI); 1466 } 1467 if (((arg1 >> CP0PF_UDI) & 0x3FULL) < 12) { 1468 mask &= ~(0x3FULL << CP0PF_UDI); 1469 } 1470 if (((arg1 >> CP0PF_MDI) & 0x3FULL) < 12) { 1471 mask &= ~(0x3FULL << CP0PF_MDI); 1472 } 1473 if (((arg1 >> CP0PF_PTI) & 0x3FULL) < 12) { 1474 mask &= ~(0x3FULL << CP0PF_PTI); 1475 } 1476 } 1477 env->CP0_PWField = arg1 & mask; 1478 1479 if ((new_ptei >= 32) || 1480 ((env->insn_flags & ISA_MIPS32R6) && 1481 (new_ptei == 0 || new_ptei == 1))) { 1482 env->CP0_PWField = (env->CP0_PWField & ~0x3FULL) | 1483 (old_ptei << CP0PF_PTEI); 1484 } 1485 #else 1486 uint32_t mask = 0x3FFFFFFF; 1487 uint32_t old_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; 1488 uint32_t new_ptew = (arg1 >> CP0PF_PTEW) & 0x3F; 1489 1490 if ((env->insn_flags & ISA_MIPS32R6)) { 1491 if (((arg1 >> CP0PF_GDW) & 0x3F) < 12) { 1492 mask &= ~(0x3F << CP0PF_GDW); 1493 } 1494 if (((arg1 >> CP0PF_UDW) & 0x3F) < 12) { 1495 mask &= ~(0x3F << CP0PF_UDW); 1496 } 1497 if (((arg1 >> CP0PF_MDW) & 0x3F) < 12) { 1498 mask &= ~(0x3F << CP0PF_MDW); 1499 } 1500 if (((arg1 >> CP0PF_PTW) & 0x3F) < 12) { 1501 mask &= ~(0x3F << CP0PF_PTW); 1502 } 1503 } 1504 env->CP0_PWField = arg1 & mask; 1505 1506 if ((new_ptew >= 32) || 1507 ((env->insn_flags & ISA_MIPS32R6) && 1508 (new_ptew == 0 || new_ptew == 1))) { 1509 env->CP0_PWField = (env->CP0_PWField & ~0x3F) | 1510 (old_ptew << CP0PF_PTEW); 1511 } 1512 #endif 1513 } 1514 1515 void helper_mtc0_pwsize(CPUMIPSState *env, target_ulong arg1) 1516 { 1517 #if defined(TARGET_MIPS64) 1518 env->CP0_PWSize = arg1 & 0x3F7FFFFFFFULL; 1519 #else 1520 env->CP0_PWSize = arg1 & 0x3FFFFFFF; 1521 #endif 1522 } 1523 1524 void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1) 1525 { 1526 if (env->insn_flags & ISA_MIPS32R6) { 1527 if (arg1 < env->tlb->nb_tlb) { 1528 env->CP0_Wired = arg1; 1529 } 1530 } else { 1531 env->CP0_Wired = arg1 % env->tlb->nb_tlb; 1532 } 1533 } 1534 1535 void helper_mtc0_pwctl(CPUMIPSState *env, target_ulong arg1) 1536 { 1537 #if defined(TARGET_MIPS64) 1538 /* PWEn = 0. Hardware page table walking is not implemented. */ 1539 env->CP0_PWCtl = (env->CP0_PWCtl & 0x000000C0) | (arg1 & 0x5C00003F); 1540 #else 1541 env->CP0_PWCtl = (arg1 & 0x800000FF); 1542 #endif 1543 } 1544 1545 void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1) 1546 { 1547 env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask; 1548 } 1549 1550 void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1) 1551 { 1552 env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask; 1553 } 1554 1555 void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1) 1556 { 1557 env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask; 1558 } 1559 1560 void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1) 1561 { 1562 env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask; 1563 } 1564 1565 void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1) 1566 { 1567 env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask; 1568 } 1569 1570 void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1) 1571 { 1572 uint32_t mask = 0x0000000F; 1573 1574 if ((env->CP0_Config1 & (1 << CP0C1_PC)) && 1575 (env->insn_flags & ISA_MIPS32R6)) { 1576 mask |= (1 << 4); 1577 } 1578 if (env->insn_flags & ISA_MIPS32R6) { 1579 mask |= (1 << 5); 1580 } 1581 if (env->CP0_Config3 & (1 << CP0C3_ULRI)) { 1582 mask |= (1 << 29); 1583 1584 if (arg1 & (1 << 29)) { 1585 env->hflags |= MIPS_HFLAG_HWRENA_ULR; 1586 } else { 1587 env->hflags &= ~MIPS_HFLAG_HWRENA_ULR; 1588 } 1589 } 1590 1591 env->CP0_HWREna = arg1 & mask; 1592 } 1593 1594 void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1) 1595 { 1596 cpu_mips_store_count(env, arg1); 1597 } 1598 1599 void helper_mtc0_saari(CPUMIPSState *env, target_ulong arg1) 1600 { 1601 uint32_t target = arg1 & 0x3f; 1602 if (target <= 1) { 1603 env->CP0_SAARI = target; 1604 } 1605 } 1606 1607 void helper_mtc0_saar(CPUMIPSState *env, target_ulong arg1) 1608 { 1609 uint32_t target = env->CP0_SAARI & 0x3f; 1610 if (target < 2) { 1611 env->CP0_SAAR[target] = arg1 & 0x00000ffffffff03fULL; 1612 switch (target) { 1613 case 0: 1614 if (env->itu) { 1615 itc_reconfigure(env->itu); 1616 } 1617 break; 1618 } 1619 } 1620 } 1621 1622 void helper_mthc0_saar(CPUMIPSState *env, target_ulong arg1) 1623 { 1624 uint32_t target = env->CP0_SAARI & 0x3f; 1625 if (target < 2) { 1626 env->CP0_SAAR[target] = 1627 (((uint64_t) arg1 << 32) & 0x00000fff00000000ULL) | 1628 (env->CP0_SAAR[target] & 0x00000000ffffffffULL); 1629 switch (target) { 1630 case 0: 1631 if (env->itu) { 1632 itc_reconfigure(env->itu); 1633 } 1634 break; 1635 } 1636 } 1637 } 1638 1639 void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1) 1640 { 1641 target_ulong old, val, mask; 1642 mask = (TARGET_PAGE_MASK << 1) | env->CP0_EntryHi_ASID_mask; 1643 if (((env->CP0_Config4 >> CP0C4_IE) & 0x3) >= 2) { 1644 mask |= 1 << CP0EnHi_EHINV; 1645 } 1646 1647 /* 1k pages not implemented */ 1648 #if defined(TARGET_MIPS64) 1649 if (env->insn_flags & ISA_MIPS32R6) { 1650 int entryhi_r = extract64(arg1, 62, 2); 1651 int config0_at = extract32(env->CP0_Config0, 13, 2); 1652 bool no_supervisor = (env->CP0_Status_rw_bitmask & 0x8) == 0; 1653 if ((entryhi_r == 2) || 1654 (entryhi_r == 1 && (no_supervisor || config0_at == 1))) { 1655 /* skip EntryHi.R field if new value is reserved */ 1656 mask &= ~(0x3ull << 62); 1657 } 1658 } 1659 mask &= env->SEGMask; 1660 #endif 1661 old = env->CP0_EntryHi; 1662 val = (arg1 & mask) | (old & ~mask); 1663 env->CP0_EntryHi = val; 1664 if (env->CP0_Config3 & (1 << CP0C3_MT)) { 1665 sync_c0_entryhi(env, env->current_tc); 1666 } 1667 /* If the ASID changes, flush qemu's TLB. */ 1668 if ((old & env->CP0_EntryHi_ASID_mask) != 1669 (val & env->CP0_EntryHi_ASID_mask)) { 1670 tlb_flush(env_cpu(env)); 1671 } 1672 } 1673 1674 void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1) 1675 { 1676 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1677 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1678 1679 other->CP0_EntryHi = arg1; 1680 sync_c0_entryhi(other, other_tc); 1681 } 1682 1683 void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1) 1684 { 1685 cpu_mips_store_compare(env, arg1); 1686 } 1687 1688 void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1) 1689 { 1690 uint32_t val, old; 1691 1692 old = env->CP0_Status; 1693 cpu_mips_store_status(env, arg1); 1694 val = env->CP0_Status; 1695 1696 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 1697 qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x", 1698 old, old & env->CP0_Cause & CP0Ca_IP_mask, 1699 val, val & env->CP0_Cause & CP0Ca_IP_mask, 1700 env->CP0_Cause); 1701 switch (cpu_mmu_index(env, false)) { 1702 case 3: 1703 qemu_log(", ERL\n"); 1704 break; 1705 case MIPS_HFLAG_UM: 1706 qemu_log(", UM\n"); 1707 break; 1708 case MIPS_HFLAG_SM: 1709 qemu_log(", SM\n"); 1710 break; 1711 case MIPS_HFLAG_KM: 1712 qemu_log("\n"); 1713 break; 1714 default: 1715 cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); 1716 break; 1717 } 1718 } 1719 } 1720 1721 void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1) 1722 { 1723 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1724 uint32_t mask = env->CP0_Status_rw_bitmask & ~0xf1000018; 1725 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1726 1727 other->CP0_Status = (other->CP0_Status & ~mask) | (arg1 & mask); 1728 sync_c0_status(env, other, other_tc); 1729 } 1730 1731 void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1) 1732 { 1733 env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0); 1734 } 1735 1736 void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1) 1737 { 1738 uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS); 1739 env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask); 1740 } 1741 1742 void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1) 1743 { 1744 cpu_mips_store_cause(env, arg1); 1745 } 1746 1747 void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1) 1748 { 1749 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1750 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1751 1752 cpu_mips_store_cause(other, arg1); 1753 } 1754 1755 target_ulong helper_mftc0_epc(CPUMIPSState *env) 1756 { 1757 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1758 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1759 1760 return other->CP0_EPC; 1761 } 1762 1763 target_ulong helper_mftc0_ebase(CPUMIPSState *env) 1764 { 1765 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1766 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1767 1768 return other->CP0_EBase; 1769 } 1770 1771 void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1) 1772 { 1773 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; 1774 if (arg1 & env->CP0_EBaseWG_rw_bitmask) { 1775 mask |= ~0x3FFFFFFF; 1776 } 1777 env->CP0_EBase = (env->CP0_EBase & ~mask) | (arg1 & mask); 1778 } 1779 1780 void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1) 1781 { 1782 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1783 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1784 target_ulong mask = 0x3FFFF000 | env->CP0_EBaseWG_rw_bitmask; 1785 if (arg1 & env->CP0_EBaseWG_rw_bitmask) { 1786 mask |= ~0x3FFFFFFF; 1787 } 1788 other->CP0_EBase = (other->CP0_EBase & ~mask) | (arg1 & mask); 1789 } 1790 1791 target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx) 1792 { 1793 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1794 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1795 1796 switch (idx) { 1797 case 0: return other->CP0_Config0; 1798 case 1: return other->CP0_Config1; 1799 case 2: return other->CP0_Config2; 1800 case 3: return other->CP0_Config3; 1801 /* 4 and 5 are reserved. */ 1802 case 6: return other->CP0_Config6; 1803 case 7: return other->CP0_Config7; 1804 default: 1805 break; 1806 } 1807 return 0; 1808 } 1809 1810 void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1) 1811 { 1812 env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007); 1813 } 1814 1815 void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1) 1816 { 1817 /* tertiary/secondary caches not implemented */ 1818 env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF); 1819 } 1820 1821 void helper_mtc0_config3(CPUMIPSState *env, target_ulong arg1) 1822 { 1823 if (env->insn_flags & ASE_MICROMIPS) { 1824 env->CP0_Config3 = (env->CP0_Config3 & ~(1 << CP0C3_ISA_ON_EXC)) | 1825 (arg1 & (1 << CP0C3_ISA_ON_EXC)); 1826 } 1827 } 1828 1829 void helper_mtc0_config4(CPUMIPSState *env, target_ulong arg1) 1830 { 1831 env->CP0_Config4 = (env->CP0_Config4 & (~env->CP0_Config4_rw_bitmask)) | 1832 (arg1 & env->CP0_Config4_rw_bitmask); 1833 } 1834 1835 void helper_mtc0_config5(CPUMIPSState *env, target_ulong arg1) 1836 { 1837 env->CP0_Config5 = (env->CP0_Config5 & (~env->CP0_Config5_rw_bitmask)) | 1838 (arg1 & env->CP0_Config5_rw_bitmask); 1839 env->CP0_EntryHi_ASID_mask = (env->CP0_Config5 & (1 << CP0C5_MI)) ? 1840 0x0 : (env->CP0_Config4 & (1 << CP0C4_AE)) ? 0x3ff : 0xff; 1841 compute_hflags(env); 1842 } 1843 1844 void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1) 1845 { 1846 target_long mask = env->CP0_LLAddr_rw_bitmask; 1847 arg1 = arg1 << env->CP0_LLAddr_shift; 1848 env->CP0_LLAddr = (env->CP0_LLAddr & ~mask) | (arg1 & mask); 1849 } 1850 1851 #define MTC0_MAAR_MASK(env) \ 1852 ((0x1ULL << 63) | ((env->PAMask >> 4) & ~0xFFFull) | 0x3) 1853 1854 void helper_mtc0_maar(CPUMIPSState *env, target_ulong arg1) 1855 { 1856 env->CP0_MAAR[env->CP0_MAARI] = arg1 & MTC0_MAAR_MASK(env); 1857 } 1858 1859 void helper_mthc0_maar(CPUMIPSState *env, target_ulong arg1) 1860 { 1861 env->CP0_MAAR[env->CP0_MAARI] = 1862 (((uint64_t) arg1 << 32) & MTC0_MAAR_MASK(env)) | 1863 (env->CP0_MAAR[env->CP0_MAARI] & 0x00000000ffffffffULL); 1864 } 1865 1866 void helper_mtc0_maari(CPUMIPSState *env, target_ulong arg1) 1867 { 1868 int index = arg1 & 0x3f; 1869 if (index == 0x3f) { 1870 /* 1871 * Software may write all ones to INDEX to determine the 1872 * maximum value supported. 1873 */ 1874 env->CP0_MAARI = MIPS_MAAR_MAX - 1; 1875 } else if (index < MIPS_MAAR_MAX) { 1876 env->CP0_MAARI = index; 1877 } 1878 /* 1879 * Other than the all ones, if the value written is not supported, 1880 * then INDEX is unchanged from its previous value. 1881 */ 1882 } 1883 1884 void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1885 { 1886 /* 1887 * Watch exceptions for instructions, data loads, data stores 1888 * not implemented. 1889 */ 1890 env->CP0_WatchLo[sel] = (arg1 & ~0x7); 1891 } 1892 1893 void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1894 { 1895 uint64_t mask = 0x40000FF8 | (env->CP0_EntryHi_ASID_mask << CP0WH_ASID); 1896 if ((env->CP0_Config5 >> CP0C5_MI) & 1) { 1897 mask |= 0xFFFFFFFF00000000ULL; /* MMID */ 1898 } 1899 env->CP0_WatchHi[sel] = arg1 & mask; 1900 env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7); 1901 } 1902 1903 void helper_mthc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 1904 { 1905 env->CP0_WatchHi[sel] = ((uint64_t) (arg1) << 32) | 1906 (env->CP0_WatchHi[sel] & 0x00000000ffffffffULL); 1907 } 1908 1909 void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1) 1910 { 1911 target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1; 1912 env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask); 1913 } 1914 1915 void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1) 1916 { 1917 env->CP0_Framemask = arg1; /* XXX */ 1918 } 1919 1920 void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1) 1921 { 1922 env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120); 1923 if (arg1 & (1 << CP0DB_DM)) { 1924 env->hflags |= MIPS_HFLAG_DM; 1925 } else { 1926 env->hflags &= ~MIPS_HFLAG_DM; 1927 } 1928 } 1929 1930 void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1) 1931 { 1932 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 1933 uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)); 1934 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 1935 1936 /* XXX: Might be wrong, check with EJTAG spec. */ 1937 if (other_tc == other->current_tc) { 1938 other->active_tc.CP0_Debug_tcstatus = val; 1939 } else { 1940 other->tcs[other_tc].CP0_Debug_tcstatus = val; 1941 } 1942 other->CP0_Debug = (other->CP0_Debug & 1943 ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) | 1944 (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))); 1945 } 1946 1947 void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1) 1948 { 1949 env->CP0_Performance0 = arg1 & 0x000007ff; 1950 } 1951 1952 void helper_mtc0_errctl(CPUMIPSState *env, target_ulong arg1) 1953 { 1954 int32_t wst = arg1 & (1 << CP0EC_WST); 1955 int32_t spr = arg1 & (1 << CP0EC_SPR); 1956 int32_t itc = env->itc_tag ? (arg1 & (1 << CP0EC_ITC)) : 0; 1957 1958 env->CP0_ErrCtl = wst | spr | itc; 1959 1960 if (itc && !wst && !spr) { 1961 env->hflags |= MIPS_HFLAG_ITC_CACHE; 1962 } else { 1963 env->hflags &= ~MIPS_HFLAG_ITC_CACHE; 1964 } 1965 } 1966 1967 void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1) 1968 { 1969 if (env->hflags & MIPS_HFLAG_ITC_CACHE) { 1970 /* 1971 * If CACHE instruction is configured for ITC tags then make all 1972 * CP0.TagLo bits writable. The actual write to ITC Configuration 1973 * Tag will take care of the read-only bits. 1974 */ 1975 env->CP0_TagLo = arg1; 1976 } else { 1977 env->CP0_TagLo = arg1 & 0xFFFFFCF6; 1978 } 1979 } 1980 1981 void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1) 1982 { 1983 env->CP0_DataLo = arg1; /* XXX */ 1984 } 1985 1986 void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1) 1987 { 1988 env->CP0_TagHi = arg1; /* XXX */ 1989 } 1990 1991 void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1) 1992 { 1993 env->CP0_DataHi = arg1; /* XXX */ 1994 } 1995 1996 /* MIPS MT functions */ 1997 target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel) 1998 { 1999 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2000 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2001 2002 if (other_tc == other->current_tc) { 2003 return other->active_tc.gpr[sel]; 2004 } else { 2005 return other->tcs[other_tc].gpr[sel]; 2006 } 2007 } 2008 2009 target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel) 2010 { 2011 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2012 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2013 2014 if (other_tc == other->current_tc) { 2015 return other->active_tc.LO[sel]; 2016 } else { 2017 return other->tcs[other_tc].LO[sel]; 2018 } 2019 } 2020 2021 target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel) 2022 { 2023 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2024 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2025 2026 if (other_tc == other->current_tc) { 2027 return other->active_tc.HI[sel]; 2028 } else { 2029 return other->tcs[other_tc].HI[sel]; 2030 } 2031 } 2032 2033 target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel) 2034 { 2035 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2036 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2037 2038 if (other_tc == other->current_tc) { 2039 return other->active_tc.ACX[sel]; 2040 } else { 2041 return other->tcs[other_tc].ACX[sel]; 2042 } 2043 } 2044 2045 target_ulong helper_mftdsp(CPUMIPSState *env) 2046 { 2047 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2048 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2049 2050 if (other_tc == other->current_tc) { 2051 return other->active_tc.DSPControl; 2052 } else { 2053 return other->tcs[other_tc].DSPControl; 2054 } 2055 } 2056 2057 void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 2058 { 2059 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2060 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2061 2062 if (other_tc == other->current_tc) { 2063 other->active_tc.gpr[sel] = arg1; 2064 } else { 2065 other->tcs[other_tc].gpr[sel] = arg1; 2066 } 2067 } 2068 2069 void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 2070 { 2071 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2072 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2073 2074 if (other_tc == other->current_tc) { 2075 other->active_tc.LO[sel] = arg1; 2076 } else { 2077 other->tcs[other_tc].LO[sel] = arg1; 2078 } 2079 } 2080 2081 void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 2082 { 2083 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2084 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2085 2086 if (other_tc == other->current_tc) { 2087 other->active_tc.HI[sel] = arg1; 2088 } else { 2089 other->tcs[other_tc].HI[sel] = arg1; 2090 } 2091 } 2092 2093 void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel) 2094 { 2095 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2096 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2097 2098 if (other_tc == other->current_tc) { 2099 other->active_tc.ACX[sel] = arg1; 2100 } else { 2101 other->tcs[other_tc].ACX[sel] = arg1; 2102 } 2103 } 2104 2105 void helper_mttdsp(CPUMIPSState *env, target_ulong arg1) 2106 { 2107 int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC); 2108 CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc); 2109 2110 if (other_tc == other->current_tc) { 2111 other->active_tc.DSPControl = arg1; 2112 } else { 2113 other->tcs[other_tc].DSPControl = arg1; 2114 } 2115 } 2116 2117 /* MIPS MT functions */ 2118 target_ulong helper_dmt(void) 2119 { 2120 /* TODO */ 2121 return 0; 2122 } 2123 2124 target_ulong helper_emt(void) 2125 { 2126 /* TODO */ 2127 return 0; 2128 } 2129 2130 target_ulong helper_dvpe(CPUMIPSState *env) 2131 { 2132 CPUState *other_cs = first_cpu; 2133 target_ulong prev = env->mvp->CP0_MVPControl; 2134 2135 CPU_FOREACH(other_cs) { 2136 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 2137 /* Turn off all VPEs except the one executing the dvpe. */ 2138 if (&other_cpu->env != env) { 2139 other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); 2140 mips_vpe_sleep(other_cpu); 2141 } 2142 } 2143 return prev; 2144 } 2145 2146 target_ulong helper_evpe(CPUMIPSState *env) 2147 { 2148 CPUState *other_cs = first_cpu; 2149 target_ulong prev = env->mvp->CP0_MVPControl; 2150 2151 CPU_FOREACH(other_cs) { 2152 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 2153 2154 if (&other_cpu->env != env 2155 /* If the VPE is WFI, don't disturb its sleep. */ 2156 && !mips_vpe_is_wfi(other_cpu)) { 2157 /* Enable the VPE. */ 2158 other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); 2159 mips_vpe_wake(other_cpu); /* And wake it up. */ 2160 } 2161 } 2162 return prev; 2163 } 2164 #endif /* !CONFIG_USER_ONLY */ 2165 2166 void helper_fork(target_ulong arg1, target_ulong arg2) 2167 { 2168 /* 2169 * arg1 = rt, arg2 = rs 2170 * TODO: store to TC register 2171 */ 2172 } 2173 2174 target_ulong helper_yield(CPUMIPSState *env, target_ulong arg) 2175 { 2176 target_long arg1 = arg; 2177 2178 if (arg1 < 0) { 2179 /* No scheduling policy implemented. */ 2180 if (arg1 != -2) { 2181 if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) && 2182 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) { 2183 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); 2184 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT; 2185 do_raise_exception(env, EXCP_THREAD, GETPC()); 2186 } 2187 } 2188 } else if (arg1 == 0) { 2189 if (0) { 2190 /* TODO: TC underflow */ 2191 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); 2192 do_raise_exception(env, EXCP_THREAD, GETPC()); 2193 } else { 2194 /* TODO: Deallocate TC */ 2195 } 2196 } else if (arg1 > 0) { 2197 /* Yield qualifier inputs not implemented. */ 2198 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT); 2199 env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT; 2200 do_raise_exception(env, EXCP_THREAD, GETPC()); 2201 } 2202 return env->CP0_YQMask; 2203 } 2204 2205 /* R6 Multi-threading */ 2206 #ifndef CONFIG_USER_ONLY 2207 target_ulong helper_dvp(CPUMIPSState *env) 2208 { 2209 CPUState *other_cs = first_cpu; 2210 target_ulong prev = env->CP0_VPControl; 2211 2212 if (!((env->CP0_VPControl >> CP0VPCtl_DIS) & 1)) { 2213 CPU_FOREACH(other_cs) { 2214 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 2215 /* Turn off all VPs except the one executing the dvp. */ 2216 if (&other_cpu->env != env) { 2217 mips_vpe_sleep(other_cpu); 2218 } 2219 } 2220 env->CP0_VPControl |= (1 << CP0VPCtl_DIS); 2221 } 2222 return prev; 2223 } 2224 2225 target_ulong helper_evp(CPUMIPSState *env) 2226 { 2227 CPUState *other_cs = first_cpu; 2228 target_ulong prev = env->CP0_VPControl; 2229 2230 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) { 2231 CPU_FOREACH(other_cs) { 2232 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 2233 if ((&other_cpu->env != env) && !mips_vp_is_wfi(other_cpu)) { 2234 /* 2235 * If the VP is WFI, don't disturb its sleep. 2236 * Otherwise, wake it up. 2237 */ 2238 mips_vpe_wake(other_cpu); 2239 } 2240 } 2241 env->CP0_VPControl &= ~(1 << CP0VPCtl_DIS); 2242 } 2243 return prev; 2244 } 2245 #endif /* !CONFIG_USER_ONLY */ 2246 2247 #ifndef CONFIG_USER_ONLY 2248 /* TLB management */ 2249 static void r4k_mips_tlb_flush_extra(CPUMIPSState *env, int first) 2250 { 2251 /* Discard entries from env->tlb[first] onwards. */ 2252 while (env->tlb->tlb_in_use > first) { 2253 r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0); 2254 } 2255 } 2256 2257 static inline uint64_t get_tlb_pfn_from_entrylo(uint64_t entrylo) 2258 { 2259 #if defined(TARGET_MIPS64) 2260 return extract64(entrylo, 6, 54); 2261 #else 2262 return extract64(entrylo, 6, 24) | /* PFN */ 2263 (extract64(entrylo, 32, 32) << 24); /* PFNX */ 2264 #endif 2265 } 2266 2267 static void r4k_fill_tlb(CPUMIPSState *env, int idx) 2268 { 2269 r4k_tlb_t *tlb; 2270 uint64_t mask = env->CP0_PageMask >> (TARGET_PAGE_BITS + 1); 2271 2272 /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */ 2273 tlb = &env->tlb->mmu.r4k.tlb[idx]; 2274 if (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) { 2275 tlb->EHINV = 1; 2276 return; 2277 } 2278 tlb->EHINV = 0; 2279 tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); 2280 #if defined(TARGET_MIPS64) 2281 tlb->VPN &= env->SEGMask; 2282 #endif 2283 tlb->ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; 2284 tlb->MMID = env->CP0_MemoryMapID; 2285 tlb->PageMask = env->CP0_PageMask; 2286 tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; 2287 tlb->V0 = (env->CP0_EntryLo0 & 2) != 0; 2288 tlb->D0 = (env->CP0_EntryLo0 & 4) != 0; 2289 tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7; 2290 tlb->XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) & 1; 2291 tlb->RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) & 1; 2292 tlb->PFN[0] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo0) & ~mask) << 12; 2293 tlb->V1 = (env->CP0_EntryLo1 & 2) != 0; 2294 tlb->D1 = (env->CP0_EntryLo1 & 4) != 0; 2295 tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7; 2296 tlb->XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) & 1; 2297 tlb->RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) & 1; 2298 tlb->PFN[1] = (get_tlb_pfn_from_entrylo(env->CP0_EntryLo1) & ~mask) << 12; 2299 } 2300 2301 void r4k_helper_tlbinv(CPUMIPSState *env) 2302 { 2303 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); 2304 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; 2305 uint32_t MMID = env->CP0_MemoryMapID; 2306 uint32_t tlb_mmid; 2307 r4k_tlb_t *tlb; 2308 int idx; 2309 2310 MMID = mi ? MMID : (uint32_t) ASID; 2311 for (idx = 0; idx < env->tlb->nb_tlb; idx++) { 2312 tlb = &env->tlb->mmu.r4k.tlb[idx]; 2313 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; 2314 if (!tlb->G && tlb_mmid == MMID) { 2315 tlb->EHINV = 1; 2316 } 2317 } 2318 cpu_mips_tlb_flush(env); 2319 } 2320 2321 void r4k_helper_tlbinvf(CPUMIPSState *env) 2322 { 2323 int idx; 2324 2325 for (idx = 0; idx < env->tlb->nb_tlb; idx++) { 2326 env->tlb->mmu.r4k.tlb[idx].EHINV = 1; 2327 } 2328 cpu_mips_tlb_flush(env); 2329 } 2330 2331 void r4k_helper_tlbwi(CPUMIPSState *env) 2332 { 2333 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); 2334 target_ulong VPN; 2335 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; 2336 uint32_t MMID = env->CP0_MemoryMapID; 2337 uint32_t tlb_mmid; 2338 bool EHINV, G, V0, D0, V1, D1, XI0, XI1, RI0, RI1; 2339 r4k_tlb_t *tlb; 2340 int idx; 2341 2342 MMID = mi ? MMID : (uint32_t) ASID; 2343 2344 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; 2345 tlb = &env->tlb->mmu.r4k.tlb[idx]; 2346 VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1); 2347 #if defined(TARGET_MIPS64) 2348 VPN &= env->SEGMask; 2349 #endif 2350 EHINV = (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) != 0; 2351 G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1; 2352 V0 = (env->CP0_EntryLo0 & 2) != 0; 2353 D0 = (env->CP0_EntryLo0 & 4) != 0; 2354 XI0 = (env->CP0_EntryLo0 >> CP0EnLo_XI) &1; 2355 RI0 = (env->CP0_EntryLo0 >> CP0EnLo_RI) &1; 2356 V1 = (env->CP0_EntryLo1 & 2) != 0; 2357 D1 = (env->CP0_EntryLo1 & 4) != 0; 2358 XI1 = (env->CP0_EntryLo1 >> CP0EnLo_XI) &1; 2359 RI1 = (env->CP0_EntryLo1 >> CP0EnLo_RI) &1; 2360 2361 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; 2362 /* 2363 * Discard cached TLB entries, unless tlbwi is just upgrading access 2364 * permissions on the current entry. 2365 */ 2366 if (tlb->VPN != VPN || tlb_mmid != MMID || tlb->G != G || 2367 (!tlb->EHINV && EHINV) || 2368 (tlb->V0 && !V0) || (tlb->D0 && !D0) || 2369 (!tlb->XI0 && XI0) || (!tlb->RI0 && RI0) || 2370 (tlb->V1 && !V1) || (tlb->D1 && !D1) || 2371 (!tlb->XI1 && XI1) || (!tlb->RI1 && RI1)) { 2372 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); 2373 } 2374 2375 r4k_invalidate_tlb(env, idx, 0); 2376 r4k_fill_tlb(env, idx); 2377 } 2378 2379 void r4k_helper_tlbwr(CPUMIPSState *env) 2380 { 2381 int r = cpu_mips_get_random(env); 2382 2383 r4k_invalidate_tlb(env, r, 1); 2384 r4k_fill_tlb(env, r); 2385 } 2386 2387 void r4k_helper_tlbp(CPUMIPSState *env) 2388 { 2389 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); 2390 r4k_tlb_t *tlb; 2391 target_ulong mask; 2392 target_ulong tag; 2393 target_ulong VPN; 2394 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; 2395 uint32_t MMID = env->CP0_MemoryMapID; 2396 uint32_t tlb_mmid; 2397 int i; 2398 2399 MMID = mi ? MMID : (uint32_t) ASID; 2400 for (i = 0; i < env->tlb->nb_tlb; i++) { 2401 tlb = &env->tlb->mmu.r4k.tlb[i]; 2402 /* 1k pages are not supported. */ 2403 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); 2404 tag = env->CP0_EntryHi & ~mask; 2405 VPN = tlb->VPN & ~mask; 2406 #if defined(TARGET_MIPS64) 2407 tag &= env->SEGMask; 2408 #endif 2409 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; 2410 /* Check ASID/MMID, virtual page number & size */ 2411 if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag && !tlb->EHINV) { 2412 /* TLB match */ 2413 env->CP0_Index = i; 2414 break; 2415 } 2416 } 2417 if (i == env->tlb->nb_tlb) { 2418 /* No match. Discard any shadow entries, if any of them match. */ 2419 for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) { 2420 tlb = &env->tlb->mmu.r4k.tlb[i]; 2421 /* 1k pages are not supported. */ 2422 mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); 2423 tag = env->CP0_EntryHi & ~mask; 2424 VPN = tlb->VPN & ~mask; 2425 #if defined(TARGET_MIPS64) 2426 tag &= env->SEGMask; 2427 #endif 2428 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; 2429 /* Check ASID/MMID, virtual page number & size */ 2430 if ((tlb->G == 1 || tlb_mmid == MMID) && VPN == tag) { 2431 r4k_mips_tlb_flush_extra(env, i); 2432 break; 2433 } 2434 } 2435 2436 env->CP0_Index |= 0x80000000; 2437 } 2438 } 2439 2440 static inline uint64_t get_entrylo_pfn_from_tlb(uint64_t tlb_pfn) 2441 { 2442 #if defined(TARGET_MIPS64) 2443 return tlb_pfn << 6; 2444 #else 2445 return (extract64(tlb_pfn, 0, 24) << 6) | /* PFN */ 2446 (extract64(tlb_pfn, 24, 32) << 32); /* PFNX */ 2447 #endif 2448 } 2449 2450 void r4k_helper_tlbr(CPUMIPSState *env) 2451 { 2452 bool mi = !!((env->CP0_Config5 >> CP0C5_MI) & 1); 2453 uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; 2454 uint32_t MMID = env->CP0_MemoryMapID; 2455 uint32_t tlb_mmid; 2456 r4k_tlb_t *tlb; 2457 int idx; 2458 2459 MMID = mi ? MMID : (uint32_t) ASID; 2460 idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb; 2461 tlb = &env->tlb->mmu.r4k.tlb[idx]; 2462 2463 tlb_mmid = mi ? tlb->MMID : (uint32_t) tlb->ASID; 2464 /* If this will change the current ASID/MMID, flush qemu's TLB. */ 2465 if (MMID != tlb_mmid) { 2466 cpu_mips_tlb_flush(env); 2467 } 2468 2469 r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb); 2470 2471 if (tlb->EHINV) { 2472 env->CP0_EntryHi = 1 << CP0EnHi_EHINV; 2473 env->CP0_PageMask = 0; 2474 env->CP0_EntryLo0 = 0; 2475 env->CP0_EntryLo1 = 0; 2476 } else { 2477 env->CP0_EntryHi = mi ? tlb->VPN : tlb->VPN | tlb->ASID; 2478 env->CP0_MemoryMapID = tlb->MMID; 2479 env->CP0_PageMask = tlb->PageMask; 2480 env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) | 2481 ((uint64_t)tlb->RI0 << CP0EnLo_RI) | 2482 ((uint64_t)tlb->XI0 << CP0EnLo_XI) | (tlb->C0 << 3) | 2483 get_entrylo_pfn_from_tlb(tlb->PFN[0] >> 12); 2484 env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) | 2485 ((uint64_t)tlb->RI1 << CP0EnLo_RI) | 2486 ((uint64_t)tlb->XI1 << CP0EnLo_XI) | (tlb->C1 << 3) | 2487 get_entrylo_pfn_from_tlb(tlb->PFN[1] >> 12); 2488 } 2489 } 2490 2491 void helper_tlbwi(CPUMIPSState *env) 2492 { 2493 env->tlb->helper_tlbwi(env); 2494 } 2495 2496 void helper_tlbwr(CPUMIPSState *env) 2497 { 2498 env->tlb->helper_tlbwr(env); 2499 } 2500 2501 void helper_tlbp(CPUMIPSState *env) 2502 { 2503 env->tlb->helper_tlbp(env); 2504 } 2505 2506 void helper_tlbr(CPUMIPSState *env) 2507 { 2508 env->tlb->helper_tlbr(env); 2509 } 2510 2511 void helper_tlbinv(CPUMIPSState *env) 2512 { 2513 env->tlb->helper_tlbinv(env); 2514 } 2515 2516 void helper_tlbinvf(CPUMIPSState *env) 2517 { 2518 env->tlb->helper_tlbinvf(env); 2519 } 2520 2521 static void global_invalidate_tlb(CPUMIPSState *env, 2522 uint32_t invMsgVPN2, 2523 uint8_t invMsgR, 2524 uint32_t invMsgMMid, 2525 bool invAll, 2526 bool invVAMMid, 2527 bool invMMid, 2528 bool invVA) 2529 { 2530 2531 int idx; 2532 r4k_tlb_t *tlb; 2533 bool VAMatch; 2534 bool MMidMatch; 2535 2536 for (idx = 0; idx < env->tlb->nb_tlb; idx++) { 2537 tlb = &env->tlb->mmu.r4k.tlb[idx]; 2538 VAMatch = 2539 (((tlb->VPN & ~tlb->PageMask) == (invMsgVPN2 & ~tlb->PageMask)) 2540 #ifdef TARGET_MIPS64 2541 && 2542 (extract64(env->CP0_EntryHi, 62, 2) == invMsgR) 2543 #endif 2544 ); 2545 MMidMatch = tlb->MMID == invMsgMMid; 2546 if ((invAll && (idx > env->CP0_Wired)) || 2547 (VAMatch && invVAMMid && (tlb->G || MMidMatch)) || 2548 (VAMatch && invVA) || 2549 (MMidMatch && !(tlb->G) && invMMid)) { 2550 tlb->EHINV = 1; 2551 } 2552 } 2553 cpu_mips_tlb_flush(env); 2554 } 2555 2556 void helper_ginvt(CPUMIPSState *env, target_ulong arg, uint32_t type) 2557 { 2558 bool invAll = type == 0; 2559 bool invVA = type == 1; 2560 bool invMMid = type == 2; 2561 bool invVAMMid = type == 3; 2562 uint32_t invMsgVPN2 = arg & (TARGET_PAGE_MASK << 1); 2563 uint8_t invMsgR = 0; 2564 uint32_t invMsgMMid = env->CP0_MemoryMapID; 2565 CPUState *other_cs = first_cpu; 2566 2567 #ifdef TARGET_MIPS64 2568 invMsgR = extract64(arg, 62, 2); 2569 #endif 2570 2571 CPU_FOREACH(other_cs) { 2572 MIPSCPU *other_cpu = MIPS_CPU(other_cs); 2573 global_invalidate_tlb(&other_cpu->env, invMsgVPN2, invMsgR, invMsgMMid, 2574 invAll, invVAMMid, invMMid, invVA); 2575 } 2576 } 2577 2578 /* Specials */ 2579 target_ulong helper_di(CPUMIPSState *env) 2580 { 2581 target_ulong t0 = env->CP0_Status; 2582 2583 env->CP0_Status = t0 & ~(1 << CP0St_IE); 2584 return t0; 2585 } 2586 2587 target_ulong helper_ei(CPUMIPSState *env) 2588 { 2589 target_ulong t0 = env->CP0_Status; 2590 2591 env->CP0_Status = t0 | (1 << CP0St_IE); 2592 return t0; 2593 } 2594 2595 static void debug_pre_eret(CPUMIPSState *env) 2596 { 2597 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 2598 qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, 2599 env->active_tc.PC, env->CP0_EPC); 2600 if (env->CP0_Status & (1 << CP0St_ERL)) { 2601 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); 2602 } 2603 if (env->hflags & MIPS_HFLAG_DM) { 2604 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); 2605 } 2606 qemu_log("\n"); 2607 } 2608 } 2609 2610 static void debug_post_eret(CPUMIPSState *env) 2611 { 2612 if (qemu_loglevel_mask(CPU_LOG_EXEC)) { 2613 qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx, 2614 env->active_tc.PC, env->CP0_EPC); 2615 if (env->CP0_Status & (1 << CP0St_ERL)) { 2616 qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC); 2617 } 2618 if (env->hflags & MIPS_HFLAG_DM) { 2619 qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC); 2620 } 2621 switch (cpu_mmu_index(env, false)) { 2622 case 3: 2623 qemu_log(", ERL\n"); 2624 break; 2625 case MIPS_HFLAG_UM: 2626 qemu_log(", UM\n"); 2627 break; 2628 case MIPS_HFLAG_SM: 2629 qemu_log(", SM\n"); 2630 break; 2631 case MIPS_HFLAG_KM: 2632 qemu_log("\n"); 2633 break; 2634 default: 2635 cpu_abort(env_cpu(env), "Invalid MMU mode!\n"); 2636 break; 2637 } 2638 } 2639 } 2640 2641 static void set_pc(CPUMIPSState *env, target_ulong error_pc) 2642 { 2643 env->active_tc.PC = error_pc & ~(target_ulong)1; 2644 if (error_pc & 1) { 2645 env->hflags |= MIPS_HFLAG_M16; 2646 } else { 2647 env->hflags &= ~(MIPS_HFLAG_M16); 2648 } 2649 } 2650 2651 static inline void exception_return(CPUMIPSState *env) 2652 { 2653 debug_pre_eret(env); 2654 if (env->CP0_Status & (1 << CP0St_ERL)) { 2655 set_pc(env, env->CP0_ErrorEPC); 2656 env->CP0_Status &= ~(1 << CP0St_ERL); 2657 } else { 2658 set_pc(env, env->CP0_EPC); 2659 env->CP0_Status &= ~(1 << CP0St_EXL); 2660 } 2661 compute_hflags(env); 2662 debug_post_eret(env); 2663 } 2664 2665 void helper_eret(CPUMIPSState *env) 2666 { 2667 exception_return(env); 2668 env->CP0_LLAddr = 1; 2669 env->lladdr = 1; 2670 } 2671 2672 void helper_eretnc(CPUMIPSState *env) 2673 { 2674 exception_return(env); 2675 } 2676 2677 void helper_deret(CPUMIPSState *env) 2678 { 2679 debug_pre_eret(env); 2680 2681 env->hflags &= ~MIPS_HFLAG_DM; 2682 compute_hflags(env); 2683 2684 set_pc(env, env->CP0_DEPC); 2685 2686 debug_post_eret(env); 2687 } 2688 #endif /* !CONFIG_USER_ONLY */ 2689 2690 static inline void check_hwrena(CPUMIPSState *env, int reg, uintptr_t pc) 2691 { 2692 if ((env->hflags & MIPS_HFLAG_CP0) || (env->CP0_HWREna & (1 << reg))) { 2693 return; 2694 } 2695 do_raise_exception(env, EXCP_RI, pc); 2696 } 2697 2698 target_ulong helper_rdhwr_cpunum(CPUMIPSState *env) 2699 { 2700 check_hwrena(env, 0, GETPC()); 2701 return env->CP0_EBase & 0x3ff; 2702 } 2703 2704 target_ulong helper_rdhwr_synci_step(CPUMIPSState *env) 2705 { 2706 check_hwrena(env, 1, GETPC()); 2707 return env->SYNCI_Step; 2708 } 2709 2710 target_ulong helper_rdhwr_cc(CPUMIPSState *env) 2711 { 2712 check_hwrena(env, 2, GETPC()); 2713 #ifdef CONFIG_USER_ONLY 2714 return env->CP0_Count; 2715 #else 2716 return (int32_t)cpu_mips_get_count(env); 2717 #endif 2718 } 2719 2720 target_ulong helper_rdhwr_ccres(CPUMIPSState *env) 2721 { 2722 check_hwrena(env, 3, GETPC()); 2723 return env->CCRes; 2724 } 2725 2726 target_ulong helper_rdhwr_performance(CPUMIPSState *env) 2727 { 2728 check_hwrena(env, 4, GETPC()); 2729 return env->CP0_Performance0; 2730 } 2731 2732 target_ulong helper_rdhwr_xnp(CPUMIPSState *env) 2733 { 2734 check_hwrena(env, 5, GETPC()); 2735 return (env->CP0_Config5 >> CP0C5_XNP) & 1; 2736 } 2737 2738 void helper_pmon(CPUMIPSState *env, int function) 2739 { 2740 function /= 2; 2741 switch (function) { 2742 case 2: /* TODO: char inbyte(int waitflag); */ 2743 if (env->active_tc.gpr[4] == 0) { 2744 env->active_tc.gpr[2] = -1; 2745 } 2746 /* Fall through */ 2747 case 11: /* TODO: char inbyte (void); */ 2748 env->active_tc.gpr[2] = -1; 2749 break; 2750 case 3: 2751 case 12: 2752 printf("%c", (char)(env->active_tc.gpr[4] & 0xFF)); 2753 break; 2754 case 17: 2755 break; 2756 case 158: 2757 { 2758 unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4]; 2759 printf("%s", fmt); 2760 } 2761 break; 2762 } 2763 } 2764 2765 void helper_wait(CPUMIPSState *env) 2766 { 2767 CPUState *cs = env_cpu(env); 2768 2769 cs->halted = 1; 2770 cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); 2771 /* 2772 * Last instruction in the block, PC was updated before 2773 * - no need to recover PC and icount. 2774 */ 2775 raise_exception(env, EXCP_HLT); 2776 } 2777 2778 #if !defined(CONFIG_USER_ONLY) 2779 2780 void mips_cpu_do_unaligned_access(CPUState *cs, vaddr addr, 2781 MMUAccessType access_type, 2782 int mmu_idx, uintptr_t retaddr) 2783 { 2784 MIPSCPU *cpu = MIPS_CPU(cs); 2785 CPUMIPSState *env = &cpu->env; 2786 int error_code = 0; 2787 int excp; 2788 2789 if (!(env->hflags & MIPS_HFLAG_DM)) { 2790 env->CP0_BadVAddr = addr; 2791 } 2792 2793 if (access_type == MMU_DATA_STORE) { 2794 excp = EXCP_AdES; 2795 } else { 2796 excp = EXCP_AdEL; 2797 if (access_type == MMU_INST_FETCH) { 2798 error_code |= EXCP_INST_NOTAVAIL; 2799 } 2800 } 2801 2802 do_raise_exception_err(env, excp, error_code, retaddr); 2803 } 2804 2805 void mips_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, 2806 vaddr addr, unsigned size, 2807 MMUAccessType access_type, 2808 int mmu_idx, MemTxAttrs attrs, 2809 MemTxResult response, uintptr_t retaddr) 2810 { 2811 MIPSCPU *cpu = MIPS_CPU(cs); 2812 CPUMIPSState *env = &cpu->env; 2813 2814 if (access_type == MMU_INST_FETCH) { 2815 do_raise_exception(env, EXCP_IBE, retaddr); 2816 } else { 2817 do_raise_exception(env, EXCP_DBE, retaddr); 2818 } 2819 } 2820 #endif /* !CONFIG_USER_ONLY */ 2821 2822 /* Complex FPU operations which may need stack space. */ 2823 2824 #define FLOAT_TWO32 make_float32(1 << 30) 2825 #define FLOAT_TWO64 make_float64(1ULL << 62) 2826 2827 #define FP_TO_INT32_OVERFLOW 0x7fffffff 2828 #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL 2829 2830 /* convert MIPS rounding mode in FCR31 to IEEE library */ 2831 unsigned int ieee_rm[] = { 2832 float_round_nearest_even, 2833 float_round_to_zero, 2834 float_round_up, 2835 float_round_down 2836 }; 2837 2838 target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg) 2839 { 2840 target_ulong arg1 = 0; 2841 2842 switch (reg) { 2843 case 0: 2844 arg1 = (int32_t)env->active_fpu.fcr0; 2845 break; 2846 case 1: 2847 /* UFR Support - Read Status FR */ 2848 if (env->active_fpu.fcr0 & (1 << FCR0_UFRP)) { 2849 if (env->CP0_Config5 & (1 << CP0C5_UFR)) { 2850 arg1 = (int32_t) 2851 ((env->CP0_Status & (1 << CP0St_FR)) >> CP0St_FR); 2852 } else { 2853 do_raise_exception(env, EXCP_RI, GETPC()); 2854 } 2855 } 2856 break; 2857 case 5: 2858 /* FRE Support - read Config5.FRE bit */ 2859 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) { 2860 if (env->CP0_Config5 & (1 << CP0C5_UFE)) { 2861 arg1 = (env->CP0_Config5 >> CP0C5_FRE) & 1; 2862 } else { 2863 helper_raise_exception(env, EXCP_RI); 2864 } 2865 } 2866 break; 2867 case 25: 2868 arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | 2869 ((env->active_fpu.fcr31 >> 23) & 0x1); 2870 break; 2871 case 26: 2872 arg1 = env->active_fpu.fcr31 & 0x0003f07c; 2873 break; 2874 case 28: 2875 arg1 = (env->active_fpu.fcr31 & 0x00000f83) | 2876 ((env->active_fpu.fcr31 >> 22) & 0x4); 2877 break; 2878 default: 2879 arg1 = (int32_t)env->active_fpu.fcr31; 2880 break; 2881 } 2882 2883 return arg1; 2884 } 2885 2886 void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t fs, uint32_t rt) 2887 { 2888 switch (fs) { 2889 case 1: 2890 /* UFR Alias - Reset Status FR */ 2891 if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { 2892 return; 2893 } 2894 if (env->CP0_Config5 & (1 << CP0C5_UFR)) { 2895 env->CP0_Status &= ~(1 << CP0St_FR); 2896 compute_hflags(env); 2897 } else { 2898 do_raise_exception(env, EXCP_RI, GETPC()); 2899 } 2900 break; 2901 case 4: 2902 /* UNFR Alias - Set Status FR */ 2903 if (!((env->active_fpu.fcr0 & (1 << FCR0_UFRP)) && (rt == 0))) { 2904 return; 2905 } 2906 if (env->CP0_Config5 & (1 << CP0C5_UFR)) { 2907 env->CP0_Status |= (1 << CP0St_FR); 2908 compute_hflags(env); 2909 } else { 2910 do_raise_exception(env, EXCP_RI, GETPC()); 2911 } 2912 break; 2913 case 5: 2914 /* FRE Support - clear Config5.FRE bit */ 2915 if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) { 2916 return; 2917 } 2918 if (env->CP0_Config5 & (1 << CP0C5_UFE)) { 2919 env->CP0_Config5 &= ~(1 << CP0C5_FRE); 2920 compute_hflags(env); 2921 } else { 2922 helper_raise_exception(env, EXCP_RI); 2923 } 2924 break; 2925 case 6: 2926 /* FRE Support - set Config5.FRE bit */ 2927 if (!((env->active_fpu.fcr0 & (1 << FCR0_FREP)) && (rt == 0))) { 2928 return; 2929 } 2930 if (env->CP0_Config5 & (1 << CP0C5_UFE)) { 2931 env->CP0_Config5 |= (1 << CP0C5_FRE); 2932 compute_hflags(env); 2933 } else { 2934 helper_raise_exception(env, EXCP_RI); 2935 } 2936 break; 2937 case 25: 2938 if ((env->insn_flags & ISA_MIPS32R6) || (arg1 & 0xffffff00)) { 2939 return; 2940 } 2941 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | 2942 ((arg1 & 0xfe) << 24) | 2943 ((arg1 & 0x1) << 23); 2944 break; 2945 case 26: 2946 if (arg1 & 0x007c0000) { 2947 return; 2948 } 2949 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | 2950 (arg1 & 0x0003f07c); 2951 break; 2952 case 28: 2953 if (arg1 & 0x007c0000) { 2954 return; 2955 } 2956 env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | 2957 (arg1 & 0x00000f83) | 2958 ((arg1 & 0x4) << 22); 2959 break; 2960 case 31: 2961 env->active_fpu.fcr31 = (arg1 & env->active_fpu.fcr31_rw_bitmask) | 2962 (env->active_fpu.fcr31 & ~(env->active_fpu.fcr31_rw_bitmask)); 2963 break; 2964 default: 2965 if (env->insn_flags & ISA_MIPS32R6) { 2966 do_raise_exception(env, EXCP_RI, GETPC()); 2967 } 2968 return; 2969 } 2970 restore_fp_status(env); 2971 set_float_exception_flags(0, &env->active_fpu.fp_status); 2972 if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & 2973 GET_FP_CAUSE(env->active_fpu.fcr31)) { 2974 do_raise_exception(env, EXCP_FPE, GETPC()); 2975 } 2976 } 2977 2978 int ieee_ex_to_mips(int xcpt) 2979 { 2980 int ret = 0; 2981 if (xcpt) { 2982 if (xcpt & float_flag_invalid) { 2983 ret |= FP_INVALID; 2984 } 2985 if (xcpt & float_flag_overflow) { 2986 ret |= FP_OVERFLOW; 2987 } 2988 if (xcpt & float_flag_underflow) { 2989 ret |= FP_UNDERFLOW; 2990 } 2991 if (xcpt & float_flag_divbyzero) { 2992 ret |= FP_DIV0; 2993 } 2994 if (xcpt & float_flag_inexact) { 2995 ret |= FP_INEXACT; 2996 } 2997 } 2998 return ret; 2999 } 3000 3001 static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc) 3002 { 3003 int tmp = ieee_ex_to_mips(get_float_exception_flags( 3004 &env->active_fpu.fp_status)); 3005 3006 SET_FP_CAUSE(env->active_fpu.fcr31, tmp); 3007 3008 if (tmp) { 3009 set_float_exception_flags(0, &env->active_fpu.fp_status); 3010 3011 if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) { 3012 do_raise_exception(env, EXCP_FPE, pc); 3013 } else { 3014 UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp); 3015 } 3016 } 3017 } 3018 3019 /* 3020 * Float support. 3021 * Single precition routines have a "s" suffix, double precision a 3022 * "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps", 3023 * paired single lower "pl", paired single upper "pu". 3024 */ 3025 3026 /* unary operations, modifying fp status */ 3027 uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0) 3028 { 3029 fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status); 3030 update_fcr31(env, GETPC()); 3031 return fdt0; 3032 } 3033 3034 uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0) 3035 { 3036 fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status); 3037 update_fcr31(env, GETPC()); 3038 return fst0; 3039 } 3040 3041 uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0) 3042 { 3043 uint64_t fdt2; 3044 3045 fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status); 3046 update_fcr31(env, GETPC()); 3047 return fdt2; 3048 } 3049 3050 uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0) 3051 { 3052 uint64_t fdt2; 3053 3054 fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status); 3055 update_fcr31(env, GETPC()); 3056 return fdt2; 3057 } 3058 3059 uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0) 3060 { 3061 uint64_t fdt2; 3062 3063 fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status); 3064 update_fcr31(env, GETPC()); 3065 return fdt2; 3066 } 3067 3068 uint64_t helper_float_cvt_l_d(CPUMIPSState *env, uint64_t fdt0) 3069 { 3070 uint64_t dt2; 3071 3072 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); 3073 if (get_float_exception_flags(&env->active_fpu.fp_status) 3074 & (float_flag_invalid | float_flag_overflow)) { 3075 dt2 = FP_TO_INT64_OVERFLOW; 3076 } 3077 update_fcr31(env, GETPC()); 3078 return dt2; 3079 } 3080 3081 uint64_t helper_float_cvt_l_s(CPUMIPSState *env, uint32_t fst0) 3082 { 3083 uint64_t dt2; 3084 3085 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); 3086 if (get_float_exception_flags(&env->active_fpu.fp_status) 3087 & (float_flag_invalid | float_flag_overflow)) { 3088 dt2 = FP_TO_INT64_OVERFLOW; 3089 } 3090 update_fcr31(env, GETPC()); 3091 return dt2; 3092 } 3093 3094 uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0) 3095 { 3096 uint32_t fst2; 3097 uint32_t fsth2; 3098 3099 fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); 3100 fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status); 3101 update_fcr31(env, GETPC()); 3102 return ((uint64_t)fsth2 << 32) | fst2; 3103 } 3104 3105 uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0) 3106 { 3107 uint32_t wt2; 3108 uint32_t wth2; 3109 int excp, excph; 3110 3111 wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); 3112 excp = get_float_exception_flags(&env->active_fpu.fp_status); 3113 if (excp & (float_flag_overflow | float_flag_invalid)) { 3114 wt2 = FP_TO_INT32_OVERFLOW; 3115 } 3116 3117 set_float_exception_flags(0, &env->active_fpu.fp_status); 3118 wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status); 3119 excph = get_float_exception_flags(&env->active_fpu.fp_status); 3120 if (excph & (float_flag_overflow | float_flag_invalid)) { 3121 wth2 = FP_TO_INT32_OVERFLOW; 3122 } 3123 3124 set_float_exception_flags(excp | excph, &env->active_fpu.fp_status); 3125 update_fcr31(env, GETPC()); 3126 3127 return ((uint64_t)wth2 << 32) | wt2; 3128 } 3129 3130 uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0) 3131 { 3132 uint32_t fst2; 3133 3134 fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status); 3135 update_fcr31(env, GETPC()); 3136 return fst2; 3137 } 3138 3139 uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0) 3140 { 3141 uint32_t fst2; 3142 3143 fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status); 3144 update_fcr31(env, GETPC()); 3145 return fst2; 3146 } 3147 3148 uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0) 3149 { 3150 uint32_t fst2; 3151 3152 fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status); 3153 update_fcr31(env, GETPC()); 3154 return fst2; 3155 } 3156 3157 uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0) 3158 { 3159 uint32_t wt2; 3160 3161 wt2 = wt0; 3162 update_fcr31(env, GETPC()); 3163 return wt2; 3164 } 3165 3166 uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0) 3167 { 3168 uint32_t wt2; 3169 3170 wt2 = wth0; 3171 update_fcr31(env, GETPC()); 3172 return wt2; 3173 } 3174 3175 uint32_t helper_float_cvt_w_s(CPUMIPSState *env, uint32_t fst0) 3176 { 3177 uint32_t wt2; 3178 3179 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); 3180 if (get_float_exception_flags(&env->active_fpu.fp_status) 3181 & (float_flag_invalid | float_flag_overflow)) { 3182 wt2 = FP_TO_INT32_OVERFLOW; 3183 } 3184 update_fcr31(env, GETPC()); 3185 return wt2; 3186 } 3187 3188 uint32_t helper_float_cvt_w_d(CPUMIPSState *env, uint64_t fdt0) 3189 { 3190 uint32_t wt2; 3191 3192 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); 3193 if (get_float_exception_flags(&env->active_fpu.fp_status) 3194 & (float_flag_invalid | float_flag_overflow)) { 3195 wt2 = FP_TO_INT32_OVERFLOW; 3196 } 3197 update_fcr31(env, GETPC()); 3198 return wt2; 3199 } 3200 3201 uint64_t helper_float_round_l_d(CPUMIPSState *env, uint64_t fdt0) 3202 { 3203 uint64_t dt2; 3204 3205 set_float_rounding_mode(float_round_nearest_even, 3206 &env->active_fpu.fp_status); 3207 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); 3208 restore_rounding_mode(env); 3209 if (get_float_exception_flags(&env->active_fpu.fp_status) 3210 & (float_flag_invalid | float_flag_overflow)) { 3211 dt2 = FP_TO_INT64_OVERFLOW; 3212 } 3213 update_fcr31(env, GETPC()); 3214 return dt2; 3215 } 3216 3217 uint64_t helper_float_round_l_s(CPUMIPSState *env, uint32_t fst0) 3218 { 3219 uint64_t dt2; 3220 3221 set_float_rounding_mode(float_round_nearest_even, 3222 &env->active_fpu.fp_status); 3223 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); 3224 restore_rounding_mode(env); 3225 if (get_float_exception_flags(&env->active_fpu.fp_status) 3226 & (float_flag_invalid | float_flag_overflow)) { 3227 dt2 = FP_TO_INT64_OVERFLOW; 3228 } 3229 update_fcr31(env, GETPC()); 3230 return dt2; 3231 } 3232 3233 uint32_t helper_float_round_w_d(CPUMIPSState *env, uint64_t fdt0) 3234 { 3235 uint32_t wt2; 3236 3237 set_float_rounding_mode(float_round_nearest_even, 3238 &env->active_fpu.fp_status); 3239 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); 3240 restore_rounding_mode(env); 3241 if (get_float_exception_flags(&env->active_fpu.fp_status) 3242 & (float_flag_invalid | float_flag_overflow)) { 3243 wt2 = FP_TO_INT32_OVERFLOW; 3244 } 3245 update_fcr31(env, GETPC()); 3246 return wt2; 3247 } 3248 3249 uint32_t helper_float_round_w_s(CPUMIPSState *env, uint32_t fst0) 3250 { 3251 uint32_t wt2; 3252 3253 set_float_rounding_mode(float_round_nearest_even, 3254 &env->active_fpu.fp_status); 3255 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); 3256 restore_rounding_mode(env); 3257 if (get_float_exception_flags(&env->active_fpu.fp_status) 3258 & (float_flag_invalid | float_flag_overflow)) { 3259 wt2 = FP_TO_INT32_OVERFLOW; 3260 } 3261 update_fcr31(env, GETPC()); 3262 return wt2; 3263 } 3264 3265 uint64_t helper_float_trunc_l_d(CPUMIPSState *env, uint64_t fdt0) 3266 { 3267 uint64_t dt2; 3268 3269 dt2 = float64_to_int64_round_to_zero(fdt0, 3270 &env->active_fpu.fp_status); 3271 if (get_float_exception_flags(&env->active_fpu.fp_status) 3272 & (float_flag_invalid | float_flag_overflow)) { 3273 dt2 = FP_TO_INT64_OVERFLOW; 3274 } 3275 update_fcr31(env, GETPC()); 3276 return dt2; 3277 } 3278 3279 uint64_t helper_float_trunc_l_s(CPUMIPSState *env, uint32_t fst0) 3280 { 3281 uint64_t dt2; 3282 3283 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); 3284 if (get_float_exception_flags(&env->active_fpu.fp_status) 3285 & (float_flag_invalid | float_flag_overflow)) { 3286 dt2 = FP_TO_INT64_OVERFLOW; 3287 } 3288 update_fcr31(env, GETPC()); 3289 return dt2; 3290 } 3291 3292 uint32_t helper_float_trunc_w_d(CPUMIPSState *env, uint64_t fdt0) 3293 { 3294 uint32_t wt2; 3295 3296 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); 3297 if (get_float_exception_flags(&env->active_fpu.fp_status) 3298 & (float_flag_invalid | float_flag_overflow)) { 3299 wt2 = FP_TO_INT32_OVERFLOW; 3300 } 3301 update_fcr31(env, GETPC()); 3302 return wt2; 3303 } 3304 3305 uint32_t helper_float_trunc_w_s(CPUMIPSState *env, uint32_t fst0) 3306 { 3307 uint32_t wt2; 3308 3309 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); 3310 if (get_float_exception_flags(&env->active_fpu.fp_status) 3311 & (float_flag_invalid | float_flag_overflow)) { 3312 wt2 = FP_TO_INT32_OVERFLOW; 3313 } 3314 update_fcr31(env, GETPC()); 3315 return wt2; 3316 } 3317 3318 uint64_t helper_float_ceil_l_d(CPUMIPSState *env, uint64_t fdt0) 3319 { 3320 uint64_t dt2; 3321 3322 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); 3323 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); 3324 restore_rounding_mode(env); 3325 if (get_float_exception_flags(&env->active_fpu.fp_status) 3326 & (float_flag_invalid | float_flag_overflow)) { 3327 dt2 = FP_TO_INT64_OVERFLOW; 3328 } 3329 update_fcr31(env, GETPC()); 3330 return dt2; 3331 } 3332 3333 uint64_t helper_float_ceil_l_s(CPUMIPSState *env, uint32_t fst0) 3334 { 3335 uint64_t dt2; 3336 3337 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); 3338 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); 3339 restore_rounding_mode(env); 3340 if (get_float_exception_flags(&env->active_fpu.fp_status) 3341 & (float_flag_invalid | float_flag_overflow)) { 3342 dt2 = FP_TO_INT64_OVERFLOW; 3343 } 3344 update_fcr31(env, GETPC()); 3345 return dt2; 3346 } 3347 3348 uint32_t helper_float_ceil_w_d(CPUMIPSState *env, uint64_t fdt0) 3349 { 3350 uint32_t wt2; 3351 3352 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); 3353 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); 3354 restore_rounding_mode(env); 3355 if (get_float_exception_flags(&env->active_fpu.fp_status) 3356 & (float_flag_invalid | float_flag_overflow)) { 3357 wt2 = FP_TO_INT32_OVERFLOW; 3358 } 3359 update_fcr31(env, GETPC()); 3360 return wt2; 3361 } 3362 3363 uint32_t helper_float_ceil_w_s(CPUMIPSState *env, uint32_t fst0) 3364 { 3365 uint32_t wt2; 3366 3367 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); 3368 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); 3369 restore_rounding_mode(env); 3370 if (get_float_exception_flags(&env->active_fpu.fp_status) 3371 & (float_flag_invalid | float_flag_overflow)) { 3372 wt2 = FP_TO_INT32_OVERFLOW; 3373 } 3374 update_fcr31(env, GETPC()); 3375 return wt2; 3376 } 3377 3378 uint64_t helper_float_floor_l_d(CPUMIPSState *env, uint64_t fdt0) 3379 { 3380 uint64_t dt2; 3381 3382 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); 3383 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); 3384 restore_rounding_mode(env); 3385 if (get_float_exception_flags(&env->active_fpu.fp_status) 3386 & (float_flag_invalid | float_flag_overflow)) { 3387 dt2 = FP_TO_INT64_OVERFLOW; 3388 } 3389 update_fcr31(env, GETPC()); 3390 return dt2; 3391 } 3392 3393 uint64_t helper_float_floor_l_s(CPUMIPSState *env, uint32_t fst0) 3394 { 3395 uint64_t dt2; 3396 3397 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); 3398 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); 3399 restore_rounding_mode(env); 3400 if (get_float_exception_flags(&env->active_fpu.fp_status) 3401 & (float_flag_invalid | float_flag_overflow)) { 3402 dt2 = FP_TO_INT64_OVERFLOW; 3403 } 3404 update_fcr31(env, GETPC()); 3405 return dt2; 3406 } 3407 3408 uint32_t helper_float_floor_w_d(CPUMIPSState *env, uint64_t fdt0) 3409 { 3410 uint32_t wt2; 3411 3412 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); 3413 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); 3414 restore_rounding_mode(env); 3415 if (get_float_exception_flags(&env->active_fpu.fp_status) 3416 & (float_flag_invalid | float_flag_overflow)) { 3417 wt2 = FP_TO_INT32_OVERFLOW; 3418 } 3419 update_fcr31(env, GETPC()); 3420 return wt2; 3421 } 3422 3423 uint32_t helper_float_floor_w_s(CPUMIPSState *env, uint32_t fst0) 3424 { 3425 uint32_t wt2; 3426 3427 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); 3428 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); 3429 restore_rounding_mode(env); 3430 if (get_float_exception_flags(&env->active_fpu.fp_status) 3431 & (float_flag_invalid | float_flag_overflow)) { 3432 wt2 = FP_TO_INT32_OVERFLOW; 3433 } 3434 update_fcr31(env, GETPC()); 3435 return wt2; 3436 } 3437 3438 uint64_t helper_float_cvt_2008_l_d(CPUMIPSState *env, uint64_t fdt0) 3439 { 3440 uint64_t dt2; 3441 3442 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); 3443 if (get_float_exception_flags(&env->active_fpu.fp_status) 3444 & float_flag_invalid) { 3445 if (float64_is_any_nan(fdt0)) { 3446 dt2 = 0; 3447 } 3448 } 3449 update_fcr31(env, GETPC()); 3450 return dt2; 3451 } 3452 3453 uint64_t helper_float_cvt_2008_l_s(CPUMIPSState *env, uint32_t fst0) 3454 { 3455 uint64_t dt2; 3456 3457 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); 3458 if (get_float_exception_flags(&env->active_fpu.fp_status) 3459 & float_flag_invalid) { 3460 if (float32_is_any_nan(fst0)) { 3461 dt2 = 0; 3462 } 3463 } 3464 update_fcr31(env, GETPC()); 3465 return dt2; 3466 } 3467 3468 uint32_t helper_float_cvt_2008_w_d(CPUMIPSState *env, uint64_t fdt0) 3469 { 3470 uint32_t wt2; 3471 3472 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); 3473 if (get_float_exception_flags(&env->active_fpu.fp_status) 3474 & float_flag_invalid) { 3475 if (float64_is_any_nan(fdt0)) { 3476 wt2 = 0; 3477 } 3478 } 3479 update_fcr31(env, GETPC()); 3480 return wt2; 3481 } 3482 3483 uint32_t helper_float_cvt_2008_w_s(CPUMIPSState *env, uint32_t fst0) 3484 { 3485 uint32_t wt2; 3486 3487 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); 3488 if (get_float_exception_flags(&env->active_fpu.fp_status) 3489 & float_flag_invalid) { 3490 if (float32_is_any_nan(fst0)) { 3491 wt2 = 0; 3492 } 3493 } 3494 update_fcr31(env, GETPC()); 3495 return wt2; 3496 } 3497 3498 uint64_t helper_float_round_2008_l_d(CPUMIPSState *env, uint64_t fdt0) 3499 { 3500 uint64_t dt2; 3501 3502 set_float_rounding_mode(float_round_nearest_even, 3503 &env->active_fpu.fp_status); 3504 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); 3505 restore_rounding_mode(env); 3506 if (get_float_exception_flags(&env->active_fpu.fp_status) 3507 & float_flag_invalid) { 3508 if (float64_is_any_nan(fdt0)) { 3509 dt2 = 0; 3510 } 3511 } 3512 update_fcr31(env, GETPC()); 3513 return dt2; 3514 } 3515 3516 uint64_t helper_float_round_2008_l_s(CPUMIPSState *env, uint32_t fst0) 3517 { 3518 uint64_t dt2; 3519 3520 set_float_rounding_mode(float_round_nearest_even, 3521 &env->active_fpu.fp_status); 3522 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); 3523 restore_rounding_mode(env); 3524 if (get_float_exception_flags(&env->active_fpu.fp_status) 3525 & float_flag_invalid) { 3526 if (float32_is_any_nan(fst0)) { 3527 dt2 = 0; 3528 } 3529 } 3530 update_fcr31(env, GETPC()); 3531 return dt2; 3532 } 3533 3534 uint32_t helper_float_round_2008_w_d(CPUMIPSState *env, uint64_t fdt0) 3535 { 3536 uint32_t wt2; 3537 3538 set_float_rounding_mode(float_round_nearest_even, 3539 &env->active_fpu.fp_status); 3540 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); 3541 restore_rounding_mode(env); 3542 if (get_float_exception_flags(&env->active_fpu.fp_status) 3543 & float_flag_invalid) { 3544 if (float64_is_any_nan(fdt0)) { 3545 wt2 = 0; 3546 } 3547 } 3548 update_fcr31(env, GETPC()); 3549 return wt2; 3550 } 3551 3552 uint32_t helper_float_round_2008_w_s(CPUMIPSState *env, uint32_t fst0) 3553 { 3554 uint32_t wt2; 3555 3556 set_float_rounding_mode(float_round_nearest_even, 3557 &env->active_fpu.fp_status); 3558 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); 3559 restore_rounding_mode(env); 3560 if (get_float_exception_flags(&env->active_fpu.fp_status) 3561 & float_flag_invalid) { 3562 if (float32_is_any_nan(fst0)) { 3563 wt2 = 0; 3564 } 3565 } 3566 update_fcr31(env, GETPC()); 3567 return wt2; 3568 } 3569 3570 uint64_t helper_float_trunc_2008_l_d(CPUMIPSState *env, uint64_t fdt0) 3571 { 3572 uint64_t dt2; 3573 3574 dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status); 3575 if (get_float_exception_flags(&env->active_fpu.fp_status) 3576 & float_flag_invalid) { 3577 if (float64_is_any_nan(fdt0)) { 3578 dt2 = 0; 3579 } 3580 } 3581 update_fcr31(env, GETPC()); 3582 return dt2; 3583 } 3584 3585 uint64_t helper_float_trunc_2008_l_s(CPUMIPSState *env, uint32_t fst0) 3586 { 3587 uint64_t dt2; 3588 3589 dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); 3590 if (get_float_exception_flags(&env->active_fpu.fp_status) 3591 & float_flag_invalid) { 3592 if (float32_is_any_nan(fst0)) { 3593 dt2 = 0; 3594 } 3595 } 3596 update_fcr31(env, GETPC()); 3597 return dt2; 3598 } 3599 3600 uint32_t helper_float_trunc_2008_w_d(CPUMIPSState *env, uint64_t fdt0) 3601 { 3602 uint32_t wt2; 3603 3604 wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); 3605 if (get_float_exception_flags(&env->active_fpu.fp_status) 3606 & float_flag_invalid) { 3607 if (float64_is_any_nan(fdt0)) { 3608 wt2 = 0; 3609 } 3610 } 3611 update_fcr31(env, GETPC()); 3612 return wt2; 3613 } 3614 3615 uint32_t helper_float_trunc_2008_w_s(CPUMIPSState *env, uint32_t fst0) 3616 { 3617 uint32_t wt2; 3618 3619 wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); 3620 if (get_float_exception_flags(&env->active_fpu.fp_status) 3621 & float_flag_invalid) { 3622 if (float32_is_any_nan(fst0)) { 3623 wt2 = 0; 3624 } 3625 } 3626 update_fcr31(env, GETPC()); 3627 return wt2; 3628 } 3629 3630 uint64_t helper_float_ceil_2008_l_d(CPUMIPSState *env, uint64_t fdt0) 3631 { 3632 uint64_t dt2; 3633 3634 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); 3635 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); 3636 restore_rounding_mode(env); 3637 if (get_float_exception_flags(&env->active_fpu.fp_status) 3638 & float_flag_invalid) { 3639 if (float64_is_any_nan(fdt0)) { 3640 dt2 = 0; 3641 } 3642 } 3643 update_fcr31(env, GETPC()); 3644 return dt2; 3645 } 3646 3647 uint64_t helper_float_ceil_2008_l_s(CPUMIPSState *env, uint32_t fst0) 3648 { 3649 uint64_t dt2; 3650 3651 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); 3652 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); 3653 restore_rounding_mode(env); 3654 if (get_float_exception_flags(&env->active_fpu.fp_status) 3655 & float_flag_invalid) { 3656 if (float32_is_any_nan(fst0)) { 3657 dt2 = 0; 3658 } 3659 } 3660 update_fcr31(env, GETPC()); 3661 return dt2; 3662 } 3663 3664 uint32_t helper_float_ceil_2008_w_d(CPUMIPSState *env, uint64_t fdt0) 3665 { 3666 uint32_t wt2; 3667 3668 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); 3669 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); 3670 restore_rounding_mode(env); 3671 if (get_float_exception_flags(&env->active_fpu.fp_status) 3672 & float_flag_invalid) { 3673 if (float64_is_any_nan(fdt0)) { 3674 wt2 = 0; 3675 } 3676 } 3677 update_fcr31(env, GETPC()); 3678 return wt2; 3679 } 3680 3681 uint32_t helper_float_ceil_2008_w_s(CPUMIPSState *env, uint32_t fst0) 3682 { 3683 uint32_t wt2; 3684 3685 set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); 3686 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); 3687 restore_rounding_mode(env); 3688 if (get_float_exception_flags(&env->active_fpu.fp_status) 3689 & float_flag_invalid) { 3690 if (float32_is_any_nan(fst0)) { 3691 wt2 = 0; 3692 } 3693 } 3694 update_fcr31(env, GETPC()); 3695 return wt2; 3696 } 3697 3698 uint64_t helper_float_floor_2008_l_d(CPUMIPSState *env, uint64_t fdt0) 3699 { 3700 uint64_t dt2; 3701 3702 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); 3703 dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); 3704 restore_rounding_mode(env); 3705 if (get_float_exception_flags(&env->active_fpu.fp_status) 3706 & float_flag_invalid) { 3707 if (float64_is_any_nan(fdt0)) { 3708 dt2 = 0; 3709 } 3710 } 3711 update_fcr31(env, GETPC()); 3712 return dt2; 3713 } 3714 3715 uint64_t helper_float_floor_2008_l_s(CPUMIPSState *env, uint32_t fst0) 3716 { 3717 uint64_t dt2; 3718 3719 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); 3720 dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); 3721 restore_rounding_mode(env); 3722 if (get_float_exception_flags(&env->active_fpu.fp_status) 3723 & float_flag_invalid) { 3724 if (float32_is_any_nan(fst0)) { 3725 dt2 = 0; 3726 } 3727 } 3728 update_fcr31(env, GETPC()); 3729 return dt2; 3730 } 3731 3732 uint32_t helper_float_floor_2008_w_d(CPUMIPSState *env, uint64_t fdt0) 3733 { 3734 uint32_t wt2; 3735 3736 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); 3737 wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); 3738 restore_rounding_mode(env); 3739 if (get_float_exception_flags(&env->active_fpu.fp_status) 3740 & float_flag_invalid) { 3741 if (float64_is_any_nan(fdt0)) { 3742 wt2 = 0; 3743 } 3744 } 3745 update_fcr31(env, GETPC()); 3746 return wt2; 3747 } 3748 3749 uint32_t helper_float_floor_2008_w_s(CPUMIPSState *env, uint32_t fst0) 3750 { 3751 uint32_t wt2; 3752 3753 set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); 3754 wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); 3755 restore_rounding_mode(env); 3756 if (get_float_exception_flags(&env->active_fpu.fp_status) 3757 & float_flag_invalid) { 3758 if (float32_is_any_nan(fst0)) { 3759 wt2 = 0; 3760 } 3761 } 3762 update_fcr31(env, GETPC()); 3763 return wt2; 3764 } 3765 3766 /* unary operations, not modifying fp status */ 3767 #define FLOAT_UNOP(name) \ 3768 uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \ 3769 { \ 3770 return float64_ ## name(fdt0); \ 3771 } \ 3772 uint32_t helper_float_ ## name ## _s(uint32_t fst0) \ 3773 { \ 3774 return float32_ ## name(fst0); \ 3775 } \ 3776 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \ 3777 { \ 3778 uint32_t wt0; \ 3779 uint32_t wth0; \ 3780 \ 3781 wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \ 3782 wth0 = float32_ ## name(fdt0 >> 32); \ 3783 return ((uint64_t)wth0 << 32) | wt0; \ 3784 } 3785 FLOAT_UNOP(abs) 3786 FLOAT_UNOP(chs) 3787 #undef FLOAT_UNOP 3788 3789 /* MIPS specific unary operations */ 3790 uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0) 3791 { 3792 uint64_t fdt2; 3793 3794 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); 3795 update_fcr31(env, GETPC()); 3796 return fdt2; 3797 } 3798 3799 uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0) 3800 { 3801 uint32_t fst2; 3802 3803 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); 3804 update_fcr31(env, GETPC()); 3805 return fst2; 3806 } 3807 3808 uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0) 3809 { 3810 uint64_t fdt2; 3811 3812 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); 3813 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); 3814 update_fcr31(env, GETPC()); 3815 return fdt2; 3816 } 3817 3818 uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0) 3819 { 3820 uint32_t fst2; 3821 3822 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); 3823 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); 3824 update_fcr31(env, GETPC()); 3825 return fst2; 3826 } 3827 3828 uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0) 3829 { 3830 uint64_t fdt2; 3831 3832 fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); 3833 update_fcr31(env, GETPC()); 3834 return fdt2; 3835 } 3836 3837 uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0) 3838 { 3839 uint32_t fst2; 3840 3841 fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); 3842 update_fcr31(env, GETPC()); 3843 return fst2; 3844 } 3845 3846 uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0) 3847 { 3848 uint32_t fst2; 3849 uint32_t fsth2; 3850 3851 fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, 3852 &env->active_fpu.fp_status); 3853 fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status); 3854 update_fcr31(env, GETPC()); 3855 return ((uint64_t)fsth2 << 32) | fst2; 3856 } 3857 3858 uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0) 3859 { 3860 uint64_t fdt2; 3861 3862 fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); 3863 fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); 3864 update_fcr31(env, GETPC()); 3865 return fdt2; 3866 } 3867 3868 uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0) 3869 { 3870 uint32_t fst2; 3871 3872 fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); 3873 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); 3874 update_fcr31(env, GETPC()); 3875 return fst2; 3876 } 3877 3878 uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0) 3879 { 3880 uint32_t fst2; 3881 uint32_t fsth2; 3882 3883 fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status); 3884 fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status); 3885 fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); 3886 fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status); 3887 update_fcr31(env, GETPC()); 3888 return ((uint64_t)fsth2 << 32) | fst2; 3889 } 3890 3891 #define FLOAT_RINT(name, bits) \ 3892 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ 3893 uint ## bits ## _t fs) \ 3894 { \ 3895 uint ## bits ## _t fdret; \ 3896 \ 3897 fdret = float ## bits ## _round_to_int(fs, &env->active_fpu.fp_status); \ 3898 update_fcr31(env, GETPC()); \ 3899 return fdret; \ 3900 } 3901 3902 FLOAT_RINT(rint_s, 32) 3903 FLOAT_RINT(rint_d, 64) 3904 #undef FLOAT_RINT 3905 3906 #define FLOAT_CLASS_SIGNALING_NAN 0x001 3907 #define FLOAT_CLASS_QUIET_NAN 0x002 3908 #define FLOAT_CLASS_NEGATIVE_INFINITY 0x004 3909 #define FLOAT_CLASS_NEGATIVE_NORMAL 0x008 3910 #define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010 3911 #define FLOAT_CLASS_NEGATIVE_ZERO 0x020 3912 #define FLOAT_CLASS_POSITIVE_INFINITY 0x040 3913 #define FLOAT_CLASS_POSITIVE_NORMAL 0x080 3914 #define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100 3915 #define FLOAT_CLASS_POSITIVE_ZERO 0x200 3916 3917 #define FLOAT_CLASS(name, bits) \ 3918 uint ## bits ## _t float_ ## name(uint ## bits ## _t arg, \ 3919 float_status *status) \ 3920 { \ 3921 if (float ## bits ## _is_signaling_nan(arg, status)) { \ 3922 return FLOAT_CLASS_SIGNALING_NAN; \ 3923 } else if (float ## bits ## _is_quiet_nan(arg, status)) { \ 3924 return FLOAT_CLASS_QUIET_NAN; \ 3925 } else if (float ## bits ## _is_neg(arg)) { \ 3926 if (float ## bits ## _is_infinity(arg)) { \ 3927 return FLOAT_CLASS_NEGATIVE_INFINITY; \ 3928 } else if (float ## bits ## _is_zero(arg)) { \ 3929 return FLOAT_CLASS_NEGATIVE_ZERO; \ 3930 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ 3931 return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \ 3932 } else { \ 3933 return FLOAT_CLASS_NEGATIVE_NORMAL; \ 3934 } \ 3935 } else { \ 3936 if (float ## bits ## _is_infinity(arg)) { \ 3937 return FLOAT_CLASS_POSITIVE_INFINITY; \ 3938 } else if (float ## bits ## _is_zero(arg)) { \ 3939 return FLOAT_CLASS_POSITIVE_ZERO; \ 3940 } else if (float ## bits ## _is_zero_or_denormal(arg)) { \ 3941 return FLOAT_CLASS_POSITIVE_SUBNORMAL; \ 3942 } else { \ 3943 return FLOAT_CLASS_POSITIVE_NORMAL; \ 3944 } \ 3945 } \ 3946 } \ 3947 \ 3948 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ 3949 uint ## bits ## _t arg) \ 3950 { \ 3951 return float_ ## name(arg, &env->active_fpu.fp_status); \ 3952 } 3953 3954 FLOAT_CLASS(class_s, 32) 3955 FLOAT_CLASS(class_d, 64) 3956 #undef FLOAT_CLASS 3957 3958 /* binary operations */ 3959 #define FLOAT_BINOP(name) \ 3960 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ 3961 uint64_t fdt0, uint64_t fdt1) \ 3962 { \ 3963 uint64_t dt2; \ 3964 \ 3965 dt2 = float64_ ## name(fdt0, fdt1, &env->active_fpu.fp_status);\ 3966 update_fcr31(env, GETPC()); \ 3967 return dt2; \ 3968 } \ 3969 \ 3970 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ 3971 uint32_t fst0, uint32_t fst1) \ 3972 { \ 3973 uint32_t wt2; \ 3974 \ 3975 wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status);\ 3976 update_fcr31(env, GETPC()); \ 3977 return wt2; \ 3978 } \ 3979 \ 3980 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ 3981 uint64_t fdt0, \ 3982 uint64_t fdt1) \ 3983 { \ 3984 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ 3985 uint32_t fsth0 = fdt0 >> 32; \ 3986 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ 3987 uint32_t fsth1 = fdt1 >> 32; \ 3988 uint32_t wt2; \ 3989 uint32_t wth2; \ 3990 \ 3991 wt2 = float32_ ## name(fst0, fst1, &env->active_fpu.fp_status); \ 3992 wth2 = float32_ ## name(fsth0, fsth1, &env->active_fpu.fp_status); \ 3993 update_fcr31(env, GETPC()); \ 3994 return ((uint64_t)wth2 << 32) | wt2; \ 3995 } 3996 3997 FLOAT_BINOP(add) 3998 FLOAT_BINOP(sub) 3999 FLOAT_BINOP(mul) 4000 FLOAT_BINOP(div) 4001 #undef FLOAT_BINOP 4002 4003 /* MIPS specific binary operations */ 4004 uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) 4005 { 4006 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); 4007 fdt2 = float64_chs(float64_sub(fdt2, float64_one, 4008 &env->active_fpu.fp_status)); 4009 update_fcr31(env, GETPC()); 4010 return fdt2; 4011 } 4012 4013 uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) 4014 { 4015 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); 4016 fst2 = float32_chs(float32_sub(fst2, float32_one, 4017 &env->active_fpu.fp_status)); 4018 update_fcr31(env, GETPC()); 4019 return fst2; 4020 } 4021 4022 uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) 4023 { 4024 uint32_t fst0 = fdt0 & 0XFFFFFFFF; 4025 uint32_t fsth0 = fdt0 >> 32; 4026 uint32_t fst2 = fdt2 & 0XFFFFFFFF; 4027 uint32_t fsth2 = fdt2 >> 32; 4028 4029 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); 4030 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); 4031 fst2 = float32_chs(float32_sub(fst2, float32_one, 4032 &env->active_fpu.fp_status)); 4033 fsth2 = float32_chs(float32_sub(fsth2, float32_one, 4034 &env->active_fpu.fp_status)); 4035 update_fcr31(env, GETPC()); 4036 return ((uint64_t)fsth2 << 32) | fst2; 4037 } 4038 4039 uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) 4040 { 4041 fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status); 4042 fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status); 4043 fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, 4044 &env->active_fpu.fp_status)); 4045 update_fcr31(env, GETPC()); 4046 return fdt2; 4047 } 4048 4049 uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2) 4050 { 4051 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); 4052 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); 4053 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, 4054 &env->active_fpu.fp_status)); 4055 update_fcr31(env, GETPC()); 4056 return fst2; 4057 } 4058 4059 uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2) 4060 { 4061 uint32_t fst0 = fdt0 & 0XFFFFFFFF; 4062 uint32_t fsth0 = fdt0 >> 32; 4063 uint32_t fst2 = fdt2 & 0XFFFFFFFF; 4064 uint32_t fsth2 = fdt2 >> 32; 4065 4066 fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status); 4067 fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status); 4068 fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status); 4069 fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status); 4070 fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, 4071 &env->active_fpu.fp_status)); 4072 fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, 4073 &env->active_fpu.fp_status)); 4074 update_fcr31(env, GETPC()); 4075 return ((uint64_t)fsth2 << 32) | fst2; 4076 } 4077 4078 uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) 4079 { 4080 uint32_t fst0 = fdt0 & 0XFFFFFFFF; 4081 uint32_t fsth0 = fdt0 >> 32; 4082 uint32_t fst1 = fdt1 & 0XFFFFFFFF; 4083 uint32_t fsth1 = fdt1 >> 32; 4084 uint32_t fst2; 4085 uint32_t fsth2; 4086 4087 fst2 = float32_add(fst0, fsth0, &env->active_fpu.fp_status); 4088 fsth2 = float32_add(fst1, fsth1, &env->active_fpu.fp_status); 4089 update_fcr31(env, GETPC()); 4090 return ((uint64_t)fsth2 << 32) | fst2; 4091 } 4092 4093 uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1) 4094 { 4095 uint32_t fst0 = fdt0 & 0XFFFFFFFF; 4096 uint32_t fsth0 = fdt0 >> 32; 4097 uint32_t fst1 = fdt1 & 0XFFFFFFFF; 4098 uint32_t fsth1 = fdt1 >> 32; 4099 uint32_t fst2; 4100 uint32_t fsth2; 4101 4102 fst2 = float32_mul(fst0, fsth0, &env->active_fpu.fp_status); 4103 fsth2 = float32_mul(fst1, fsth1, &env->active_fpu.fp_status); 4104 update_fcr31(env, GETPC()); 4105 return ((uint64_t)fsth2 << 32) | fst2; 4106 } 4107 4108 #define FLOAT_MINMAX(name, bits, minmaxfunc) \ 4109 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ 4110 uint ## bits ## _t fs, \ 4111 uint ## bits ## _t ft) \ 4112 { \ 4113 uint ## bits ## _t fdret; \ 4114 \ 4115 fdret = float ## bits ## _ ## minmaxfunc(fs, ft, \ 4116 &env->active_fpu.fp_status); \ 4117 update_fcr31(env, GETPC()); \ 4118 return fdret; \ 4119 } 4120 4121 FLOAT_MINMAX(max_s, 32, maxnum) 4122 FLOAT_MINMAX(max_d, 64, maxnum) 4123 FLOAT_MINMAX(maxa_s, 32, maxnummag) 4124 FLOAT_MINMAX(maxa_d, 64, maxnummag) 4125 4126 FLOAT_MINMAX(min_s, 32, minnum) 4127 FLOAT_MINMAX(min_d, 64, minnum) 4128 FLOAT_MINMAX(mina_s, 32, minnummag) 4129 FLOAT_MINMAX(mina_d, 64, minnummag) 4130 #undef FLOAT_MINMAX 4131 4132 /* ternary operations */ 4133 #define UNFUSED_FMA(prefix, a, b, c, flags) \ 4134 { \ 4135 a = prefix##_mul(a, b, &env->active_fpu.fp_status); \ 4136 if ((flags) & float_muladd_negate_c) { \ 4137 a = prefix##_sub(a, c, &env->active_fpu.fp_status); \ 4138 } else { \ 4139 a = prefix##_add(a, c, &env->active_fpu.fp_status); \ 4140 } \ 4141 if ((flags) & float_muladd_negate_result) { \ 4142 a = prefix##_chs(a); \ 4143 } \ 4144 } 4145 4146 /* FMA based operations */ 4147 #define FLOAT_FMA(name, type) \ 4148 uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \ 4149 uint64_t fdt0, uint64_t fdt1, \ 4150 uint64_t fdt2) \ 4151 { \ 4152 UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \ 4153 update_fcr31(env, GETPC()); \ 4154 return fdt0; \ 4155 } \ 4156 \ 4157 uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \ 4158 uint32_t fst0, uint32_t fst1, \ 4159 uint32_t fst2) \ 4160 { \ 4161 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ 4162 update_fcr31(env, GETPC()); \ 4163 return fst0; \ 4164 } \ 4165 \ 4166 uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \ 4167 uint64_t fdt0, uint64_t fdt1, \ 4168 uint64_t fdt2) \ 4169 { \ 4170 uint32_t fst0 = fdt0 & 0XFFFFFFFF; \ 4171 uint32_t fsth0 = fdt0 >> 32; \ 4172 uint32_t fst1 = fdt1 & 0XFFFFFFFF; \ 4173 uint32_t fsth1 = fdt1 >> 32; \ 4174 uint32_t fst2 = fdt2 & 0XFFFFFFFF; \ 4175 uint32_t fsth2 = fdt2 >> 32; \ 4176 \ 4177 UNFUSED_FMA(float32, fst0, fst1, fst2, type); \ 4178 UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \ 4179 update_fcr31(env, GETPC()); \ 4180 return ((uint64_t)fsth0 << 32) | fst0; \ 4181 } 4182 FLOAT_FMA(madd, 0) 4183 FLOAT_FMA(msub, float_muladd_negate_c) 4184 FLOAT_FMA(nmadd, float_muladd_negate_result) 4185 FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c) 4186 #undef FLOAT_FMA 4187 4188 #define FLOAT_FMADDSUB(name, bits, muladd_arg) \ 4189 uint ## bits ## _t helper_float_ ## name(CPUMIPSState *env, \ 4190 uint ## bits ## _t fs, \ 4191 uint ## bits ## _t ft, \ 4192 uint ## bits ## _t fd) \ 4193 { \ 4194 uint ## bits ## _t fdret; \ 4195 \ 4196 fdret = float ## bits ## _muladd(fs, ft, fd, muladd_arg, \ 4197 &env->active_fpu.fp_status); \ 4198 update_fcr31(env, GETPC()); \ 4199 return fdret; \ 4200 } 4201 4202 FLOAT_FMADDSUB(maddf_s, 32, 0) 4203 FLOAT_FMADDSUB(maddf_d, 64, 0) 4204 FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_product) 4205 FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_product) 4206 #undef FLOAT_FMADDSUB 4207 4208 /* compare operations */ 4209 #define FOP_COND_D(op, cond) \ 4210 void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ 4211 uint64_t fdt1, int cc) \ 4212 { \ 4213 int c; \ 4214 c = cond; \ 4215 update_fcr31(env, GETPC()); \ 4216 if (c) \ 4217 SET_FP_COND(cc, env->active_fpu); \ 4218 else \ 4219 CLEAR_FP_COND(cc, env->active_fpu); \ 4220 } \ 4221 void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ 4222 uint64_t fdt1, int cc) \ 4223 { \ 4224 int c; \ 4225 fdt0 = float64_abs(fdt0); \ 4226 fdt1 = float64_abs(fdt1); \ 4227 c = cond; \ 4228 update_fcr31(env, GETPC()); \ 4229 if (c) \ 4230 SET_FP_COND(cc, env->active_fpu); \ 4231 else \ 4232 CLEAR_FP_COND(cc, env->active_fpu); \ 4233 } 4234 4235 /* 4236 * NOTE: the comma operator will make "cond" to eval to false, 4237 * but float64_unordered_quiet() is still called. 4238 */ 4239 FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, 4240 &env->active_fpu.fp_status), 0)) 4241 FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, 4242 &env->active_fpu.fp_status)) 4243 FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, 4244 &env->active_fpu.fp_status)) 4245 FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, 4246 &env->active_fpu.fp_status) 4247 || float64_eq_quiet(fdt0, fdt1, 4248 &env->active_fpu.fp_status)) 4249 FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, 4250 &env->active_fpu.fp_status)) 4251 FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, 4252 &env->active_fpu.fp_status) 4253 || float64_lt_quiet(fdt0, fdt1, 4254 &env->active_fpu.fp_status)) 4255 FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, 4256 &env->active_fpu.fp_status)) 4257 FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, 4258 &env->active_fpu.fp_status) 4259 || float64_le_quiet(fdt0, fdt1, 4260 &env->active_fpu.fp_status)) 4261 /* 4262 * NOTE: the comma operator will make "cond" to eval to false, 4263 * but float64_unordered() is still called. 4264 */ 4265 FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, 4266 &env->active_fpu.fp_status), 0)) 4267 FOP_COND_D(ngle, float64_unordered(fdt1, fdt0, 4268 &env->active_fpu.fp_status)) 4269 FOP_COND_D(seq, float64_eq(fdt0, fdt1, 4270 &env->active_fpu.fp_status)) 4271 FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, 4272 &env->active_fpu.fp_status) 4273 || float64_eq(fdt0, fdt1, 4274 &env->active_fpu.fp_status)) 4275 FOP_COND_D(lt, float64_lt(fdt0, fdt1, 4276 &env->active_fpu.fp_status)) 4277 FOP_COND_D(nge, float64_unordered(fdt1, fdt0, 4278 &env->active_fpu.fp_status) 4279 || float64_lt(fdt0, fdt1, 4280 &env->active_fpu.fp_status)) 4281 FOP_COND_D(le, float64_le(fdt0, fdt1, 4282 &env->active_fpu.fp_status)) 4283 FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, 4284 &env->active_fpu.fp_status) 4285 || float64_le(fdt0, fdt1, 4286 &env->active_fpu.fp_status)) 4287 4288 #define FOP_COND_S(op, cond) \ 4289 void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ 4290 uint32_t fst1, int cc) \ 4291 { \ 4292 int c; \ 4293 c = cond; \ 4294 update_fcr31(env, GETPC()); \ 4295 if (c) \ 4296 SET_FP_COND(cc, env->active_fpu); \ 4297 else \ 4298 CLEAR_FP_COND(cc, env->active_fpu); \ 4299 } \ 4300 void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ 4301 uint32_t fst1, int cc) \ 4302 { \ 4303 int c; \ 4304 fst0 = float32_abs(fst0); \ 4305 fst1 = float32_abs(fst1); \ 4306 c = cond; \ 4307 update_fcr31(env, GETPC()); \ 4308 if (c) \ 4309 SET_FP_COND(cc, env->active_fpu); \ 4310 else \ 4311 CLEAR_FP_COND(cc, env->active_fpu); \ 4312 } 4313 4314 /* 4315 * NOTE: the comma operator will make "cond" to eval to false, 4316 * but float32_unordered_quiet() is still called. 4317 */ 4318 FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, 4319 &env->active_fpu.fp_status), 0)) 4320 FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, 4321 &env->active_fpu.fp_status)) 4322 FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, 4323 &env->active_fpu.fp_status)) 4324 FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, 4325 &env->active_fpu.fp_status) 4326 || float32_eq_quiet(fst0, fst1, 4327 &env->active_fpu.fp_status)) 4328 FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, 4329 &env->active_fpu.fp_status)) 4330 FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, 4331 &env->active_fpu.fp_status) 4332 || float32_lt_quiet(fst0, fst1, 4333 &env->active_fpu.fp_status)) 4334 FOP_COND_S(ole, float32_le_quiet(fst0, fst1, 4335 &env->active_fpu.fp_status)) 4336 FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, 4337 &env->active_fpu.fp_status) 4338 || float32_le_quiet(fst0, fst1, 4339 &env->active_fpu.fp_status)) 4340 /* 4341 * NOTE: the comma operator will make "cond" to eval to false, 4342 * but float32_unordered() is still called. 4343 */ 4344 FOP_COND_S(sf, (float32_unordered(fst1, fst0, 4345 &env->active_fpu.fp_status), 0)) 4346 FOP_COND_S(ngle, float32_unordered(fst1, fst0, 4347 &env->active_fpu.fp_status)) 4348 FOP_COND_S(seq, float32_eq(fst0, fst1, 4349 &env->active_fpu.fp_status)) 4350 FOP_COND_S(ngl, float32_unordered(fst1, fst0, 4351 &env->active_fpu.fp_status) 4352 || float32_eq(fst0, fst1, 4353 &env->active_fpu.fp_status)) 4354 FOP_COND_S(lt, float32_lt(fst0, fst1, 4355 &env->active_fpu.fp_status)) 4356 FOP_COND_S(nge, float32_unordered(fst1, fst0, 4357 &env->active_fpu.fp_status) 4358 || float32_lt(fst0, fst1, 4359 &env->active_fpu.fp_status)) 4360 FOP_COND_S(le, float32_le(fst0, fst1, 4361 &env->active_fpu.fp_status)) 4362 FOP_COND_S(ngt, float32_unordered(fst1, fst0, 4363 &env->active_fpu.fp_status) 4364 || float32_le(fst0, fst1, 4365 &env->active_fpu.fp_status)) 4366 4367 #define FOP_COND_PS(op, condl, condh) \ 4368 void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ 4369 uint64_t fdt1, int cc) \ 4370 { \ 4371 uint32_t fst0, fsth0, fst1, fsth1; \ 4372 int ch, cl; \ 4373 fst0 = fdt0 & 0XFFFFFFFF; \ 4374 fsth0 = fdt0 >> 32; \ 4375 fst1 = fdt1 & 0XFFFFFFFF; \ 4376 fsth1 = fdt1 >> 32; \ 4377 cl = condl; \ 4378 ch = condh; \ 4379 update_fcr31(env, GETPC()); \ 4380 if (cl) \ 4381 SET_FP_COND(cc, env->active_fpu); \ 4382 else \ 4383 CLEAR_FP_COND(cc, env->active_fpu); \ 4384 if (ch) \ 4385 SET_FP_COND(cc + 1, env->active_fpu); \ 4386 else \ 4387 CLEAR_FP_COND(cc + 1, env->active_fpu); \ 4388 } \ 4389 void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \ 4390 uint64_t fdt1, int cc) \ 4391 { \ 4392 uint32_t fst0, fsth0, fst1, fsth1; \ 4393 int ch, cl; \ 4394 fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \ 4395 fsth0 = float32_abs(fdt0 >> 32); \ 4396 fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \ 4397 fsth1 = float32_abs(fdt1 >> 32); \ 4398 cl = condl; \ 4399 ch = condh; \ 4400 update_fcr31(env, GETPC()); \ 4401 if (cl) \ 4402 SET_FP_COND(cc, env->active_fpu); \ 4403 else \ 4404 CLEAR_FP_COND(cc, env->active_fpu); \ 4405 if (ch) \ 4406 SET_FP_COND(cc + 1, env->active_fpu); \ 4407 else \ 4408 CLEAR_FP_COND(cc + 1, env->active_fpu); \ 4409 } 4410 4411 /* 4412 * NOTE: the comma operator will make "cond" to eval to false, 4413 * but float32_unordered_quiet() is still called. 4414 */ 4415 FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, 4416 &env->active_fpu.fp_status), 0), 4417 (float32_unordered_quiet(fsth1, fsth0, 4418 &env->active_fpu.fp_status), 0)) 4419 FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, 4420 &env->active_fpu.fp_status), 4421 float32_unordered_quiet(fsth1, fsth0, 4422 &env->active_fpu.fp_status)) 4423 FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, 4424 &env->active_fpu.fp_status), 4425 float32_eq_quiet(fsth0, fsth1, 4426 &env->active_fpu.fp_status)) 4427 FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, 4428 &env->active_fpu.fp_status) 4429 || float32_eq_quiet(fst0, fst1, 4430 &env->active_fpu.fp_status), 4431 float32_unordered_quiet(fsth1, fsth0, 4432 &env->active_fpu.fp_status) 4433 || float32_eq_quiet(fsth0, fsth1, 4434 &env->active_fpu.fp_status)) 4435 FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, 4436 &env->active_fpu.fp_status), 4437 float32_lt_quiet(fsth0, fsth1, 4438 &env->active_fpu.fp_status)) 4439 FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, 4440 &env->active_fpu.fp_status) 4441 || float32_lt_quiet(fst0, fst1, 4442 &env->active_fpu.fp_status), 4443 float32_unordered_quiet(fsth1, fsth0, 4444 &env->active_fpu.fp_status) 4445 || float32_lt_quiet(fsth0, fsth1, 4446 &env->active_fpu.fp_status)) 4447 FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, 4448 &env->active_fpu.fp_status), 4449 float32_le_quiet(fsth0, fsth1, 4450 &env->active_fpu.fp_status)) 4451 FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, 4452 &env->active_fpu.fp_status) 4453 || float32_le_quiet(fst0, fst1, 4454 &env->active_fpu.fp_status), 4455 float32_unordered_quiet(fsth1, fsth0, 4456 &env->active_fpu.fp_status) 4457 || float32_le_quiet(fsth0, fsth1, 4458 &env->active_fpu.fp_status)) 4459 /* 4460 * NOTE: the comma operator will make "cond" to eval to false, 4461 * but float32_unordered() is still called. 4462 */ 4463 FOP_COND_PS(sf, (float32_unordered(fst1, fst0, 4464 &env->active_fpu.fp_status), 0), 4465 (float32_unordered(fsth1, fsth0, 4466 &env->active_fpu.fp_status), 0)) 4467 FOP_COND_PS(ngle, float32_unordered(fst1, fst0, 4468 &env->active_fpu.fp_status), 4469 float32_unordered(fsth1, fsth0, 4470 &env->active_fpu.fp_status)) 4471 FOP_COND_PS(seq, float32_eq(fst0, fst1, 4472 &env->active_fpu.fp_status), 4473 float32_eq(fsth0, fsth1, 4474 &env->active_fpu.fp_status)) 4475 FOP_COND_PS(ngl, float32_unordered(fst1, fst0, 4476 &env->active_fpu.fp_status) 4477 || float32_eq(fst0, fst1, 4478 &env->active_fpu.fp_status), 4479 float32_unordered(fsth1, fsth0, 4480 &env->active_fpu.fp_status) 4481 || float32_eq(fsth0, fsth1, 4482 &env->active_fpu.fp_status)) 4483 FOP_COND_PS(lt, float32_lt(fst0, fst1, 4484 &env->active_fpu.fp_status), 4485 float32_lt(fsth0, fsth1, 4486 &env->active_fpu.fp_status)) 4487 FOP_COND_PS(nge, float32_unordered(fst1, fst0, 4488 &env->active_fpu.fp_status) 4489 || float32_lt(fst0, fst1, 4490 &env->active_fpu.fp_status), 4491 float32_unordered(fsth1, fsth0, 4492 &env->active_fpu.fp_status) 4493 || float32_lt(fsth0, fsth1, 4494 &env->active_fpu.fp_status)) 4495 FOP_COND_PS(le, float32_le(fst0, fst1, 4496 &env->active_fpu.fp_status), 4497 float32_le(fsth0, fsth1, 4498 &env->active_fpu.fp_status)) 4499 FOP_COND_PS(ngt, float32_unordered(fst1, fst0, 4500 &env->active_fpu.fp_status) 4501 || float32_le(fst0, fst1, 4502 &env->active_fpu.fp_status), 4503 float32_unordered(fsth1, fsth0, 4504 &env->active_fpu.fp_status) 4505 || float32_le(fsth0, fsth1, 4506 &env->active_fpu.fp_status)) 4507 4508 /* R6 compare operations */ 4509 #define FOP_CONDN_D(op, cond) \ 4510 uint64_t helper_r6_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \ 4511 uint64_t fdt1) \ 4512 { \ 4513 uint64_t c; \ 4514 c = cond; \ 4515 update_fcr31(env, GETPC()); \ 4516 if (c) { \ 4517 return -1; \ 4518 } else { \ 4519 return 0; \ 4520 } \ 4521 } 4522 4523 /* 4524 * NOTE: the comma operator will make "cond" to eval to false, 4525 * but float64_unordered_quiet() is still called. 4526 */ 4527 FOP_CONDN_D(af, (float64_unordered_quiet(fdt1, fdt0, 4528 &env->active_fpu.fp_status), 0)) 4529 FOP_CONDN_D(un, (float64_unordered_quiet(fdt1, fdt0, 4530 &env->active_fpu.fp_status))) 4531 FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, 4532 &env->active_fpu.fp_status))) 4533 FOP_CONDN_D(ueq, (float64_unordered_quiet(fdt1, fdt0, 4534 &env->active_fpu.fp_status) 4535 || float64_eq_quiet(fdt0, fdt1, 4536 &env->active_fpu.fp_status))) 4537 FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, 4538 &env->active_fpu.fp_status))) 4539 FOP_CONDN_D(ult, (float64_unordered_quiet(fdt1, fdt0, 4540 &env->active_fpu.fp_status) 4541 || float64_lt_quiet(fdt0, fdt1, 4542 &env->active_fpu.fp_status))) 4543 FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, 4544 &env->active_fpu.fp_status))) 4545 FOP_CONDN_D(ule, (float64_unordered_quiet(fdt1, fdt0, 4546 &env->active_fpu.fp_status) 4547 || float64_le_quiet(fdt0, fdt1, 4548 &env->active_fpu.fp_status))) 4549 /* 4550 * NOTE: the comma operator will make "cond" to eval to false, 4551 * but float64_unordered() is still called.\ 4552 */ 4553 FOP_CONDN_D(saf, (float64_unordered(fdt1, fdt0, 4554 &env->active_fpu.fp_status), 0)) 4555 FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, 4556 &env->active_fpu.fp_status))) 4557 FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, 4558 &env->active_fpu.fp_status))) 4559 FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, 4560 &env->active_fpu.fp_status) 4561 || float64_eq(fdt0, fdt1, 4562 &env->active_fpu.fp_status))) 4563 FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, 4564 &env->active_fpu.fp_status))) 4565 FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, 4566 &env->active_fpu.fp_status) 4567 || float64_lt(fdt0, fdt1, 4568 &env->active_fpu.fp_status))) 4569 FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, 4570 &env->active_fpu.fp_status))) 4571 FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, 4572 &env->active_fpu.fp_status) 4573 || float64_le(fdt0, fdt1, 4574 &env->active_fpu.fp_status))) 4575 FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, 4576 &env->active_fpu.fp_status) 4577 || float64_le_quiet(fdt0, fdt1, 4578 &env->active_fpu.fp_status))) 4579 FOP_CONDN_D(une, (float64_unordered_quiet(fdt1, fdt0, 4580 &env->active_fpu.fp_status) 4581 || float64_lt_quiet(fdt1, fdt0, 4582 &env->active_fpu.fp_status) 4583 || float64_lt_quiet(fdt0, fdt1, 4584 &env->active_fpu.fp_status))) 4585 FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, 4586 &env->active_fpu.fp_status) 4587 || float64_lt_quiet(fdt0, fdt1, 4588 &env->active_fpu.fp_status))) 4589 FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, 4590 &env->active_fpu.fp_status) 4591 || float64_le(fdt0, fdt1, 4592 &env->active_fpu.fp_status))) 4593 FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, 4594 &env->active_fpu.fp_status) 4595 || float64_lt(fdt1, fdt0, 4596 &env->active_fpu.fp_status) 4597 || float64_lt(fdt0, fdt1, 4598 &env->active_fpu.fp_status))) 4599 FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, 4600 &env->active_fpu.fp_status) 4601 || float64_lt(fdt0, fdt1, 4602 &env->active_fpu.fp_status))) 4603 4604 #define FOP_CONDN_S(op, cond) \ 4605 uint32_t helper_r6_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \ 4606 uint32_t fst1) \ 4607 { \ 4608 uint64_t c; \ 4609 c = cond; \ 4610 update_fcr31(env, GETPC()); \ 4611 if (c) { \ 4612 return -1; \ 4613 } else { \ 4614 return 0; \ 4615 } \ 4616 } 4617 4618 /* 4619 * NOTE: the comma operator will make "cond" to eval to false, 4620 * but float32_unordered_quiet() is still called. 4621 */ 4622 FOP_CONDN_S(af, (float32_unordered_quiet(fst1, fst0, 4623 &env->active_fpu.fp_status), 0)) 4624 FOP_CONDN_S(un, (float32_unordered_quiet(fst1, fst0, 4625 &env->active_fpu.fp_status))) 4626 FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, 4627 &env->active_fpu.fp_status))) 4628 FOP_CONDN_S(ueq, (float32_unordered_quiet(fst1, fst0, 4629 &env->active_fpu.fp_status) 4630 || float32_eq_quiet(fst0, fst1, 4631 &env->active_fpu.fp_status))) 4632 FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, 4633 &env->active_fpu.fp_status))) 4634 FOP_CONDN_S(ult, (float32_unordered_quiet(fst1, fst0, 4635 &env->active_fpu.fp_status) 4636 || float32_lt_quiet(fst0, fst1, 4637 &env->active_fpu.fp_status))) 4638 FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, 4639 &env->active_fpu.fp_status))) 4640 FOP_CONDN_S(ule, (float32_unordered_quiet(fst1, fst0, 4641 &env->active_fpu.fp_status) 4642 || float32_le_quiet(fst0, fst1, 4643 &env->active_fpu.fp_status))) 4644 /* 4645 * NOTE: the comma operator will make "cond" to eval to false, 4646 * but float32_unordered() is still called. 4647 */ 4648 FOP_CONDN_S(saf, (float32_unordered(fst1, fst0, 4649 &env->active_fpu.fp_status), 0)) 4650 FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, 4651 &env->active_fpu.fp_status))) 4652 FOP_CONDN_S(seq, (float32_eq(fst0, fst1, 4653 &env->active_fpu.fp_status))) 4654 FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, 4655 &env->active_fpu.fp_status) 4656 || float32_eq(fst0, fst1, 4657 &env->active_fpu.fp_status))) 4658 FOP_CONDN_S(slt, (float32_lt(fst0, fst1, 4659 &env->active_fpu.fp_status))) 4660 FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, 4661 &env->active_fpu.fp_status) 4662 || float32_lt(fst0, fst1, 4663 &env->active_fpu.fp_status))) 4664 FOP_CONDN_S(sle, (float32_le(fst0, fst1, 4665 &env->active_fpu.fp_status))) 4666 FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, 4667 &env->active_fpu.fp_status) 4668 || float32_le(fst0, fst1, 4669 &env->active_fpu.fp_status))) 4670 FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, 4671 &env->active_fpu.fp_status) 4672 || float32_le_quiet(fst0, fst1, 4673 &env->active_fpu.fp_status))) 4674 FOP_CONDN_S(une, (float32_unordered_quiet(fst1, fst0, 4675 &env->active_fpu.fp_status) 4676 || float32_lt_quiet(fst1, fst0, 4677 &env->active_fpu.fp_status) 4678 || float32_lt_quiet(fst0, fst1, 4679 &env->active_fpu.fp_status))) 4680 FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, 4681 &env->active_fpu.fp_status) 4682 || float32_lt_quiet(fst0, fst1, 4683 &env->active_fpu.fp_status))) 4684 FOP_CONDN_S(sor, (float32_le(fst1, fst0, 4685 &env->active_fpu.fp_status) 4686 || float32_le(fst0, fst1, 4687 &env->active_fpu.fp_status))) 4688 FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, 4689 &env->active_fpu.fp_status) 4690 || float32_lt(fst1, fst0, 4691 &env->active_fpu.fp_status) 4692 || float32_lt(fst0, fst1, 4693 &env->active_fpu.fp_status))) 4694 FOP_CONDN_S(sne, (float32_lt(fst1, fst0, 4695 &env->active_fpu.fp_status) 4696 || float32_lt(fst0, fst1, 4697 &env->active_fpu.fp_status))) 4698 4699 /* MSA */ 4700 /* Data format min and max values */ 4701 #define DF_BITS(df) (1 << ((df) + 3)) 4702 4703 /* Element-by-element access macros */ 4704 #define DF_ELEMENTS(df) (MSA_WRLEN / DF_BITS(df)) 4705 4706 #if !defined(CONFIG_USER_ONLY) 4707 #define MEMOP_IDX(DF) \ 4708 TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \ 4709 cpu_mmu_index(env, false)); 4710 #else 4711 #define MEMOP_IDX(DF) 4712 #endif 4713 4714 void helper_msa_ld_b(CPUMIPSState *env, uint32_t wd, 4715 target_ulong addr) 4716 { 4717 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); 4718 MEMOP_IDX(DF_BYTE) 4719 #if !defined(CONFIG_USER_ONLY) 4720 #if !defined(HOST_WORDS_BIGENDIAN) 4721 pwd->b[0] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); 4722 pwd->b[1] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); 4723 pwd->b[2] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); 4724 pwd->b[3] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); 4725 pwd->b[4] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); 4726 pwd->b[5] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); 4727 pwd->b[6] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); 4728 pwd->b[7] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); 4729 pwd->b[8] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); 4730 pwd->b[9] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); 4731 pwd->b[10] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); 4732 pwd->b[11] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); 4733 pwd->b[12] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); 4734 pwd->b[13] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); 4735 pwd->b[14] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); 4736 pwd->b[15] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); 4737 #else 4738 pwd->b[0] = helper_ret_ldub_mmu(env, addr + (7 << DF_BYTE), oi, GETPC()); 4739 pwd->b[1] = helper_ret_ldub_mmu(env, addr + (6 << DF_BYTE), oi, GETPC()); 4740 pwd->b[2] = helper_ret_ldub_mmu(env, addr + (5 << DF_BYTE), oi, GETPC()); 4741 pwd->b[3] = helper_ret_ldub_mmu(env, addr + (4 << DF_BYTE), oi, GETPC()); 4742 pwd->b[4] = helper_ret_ldub_mmu(env, addr + (3 << DF_BYTE), oi, GETPC()); 4743 pwd->b[5] = helper_ret_ldub_mmu(env, addr + (2 << DF_BYTE), oi, GETPC()); 4744 pwd->b[6] = helper_ret_ldub_mmu(env, addr + (1 << DF_BYTE), oi, GETPC()); 4745 pwd->b[7] = helper_ret_ldub_mmu(env, addr + (0 << DF_BYTE), oi, GETPC()); 4746 pwd->b[8] = helper_ret_ldub_mmu(env, addr + (15 << DF_BYTE), oi, GETPC()); 4747 pwd->b[9] = helper_ret_ldub_mmu(env, addr + (14 << DF_BYTE), oi, GETPC()); 4748 pwd->b[10] = helper_ret_ldub_mmu(env, addr + (13 << DF_BYTE), oi, GETPC()); 4749 pwd->b[11] = helper_ret_ldub_mmu(env, addr + (12 << DF_BYTE), oi, GETPC()); 4750 pwd->b[12] = helper_ret_ldub_mmu(env, addr + (11 << DF_BYTE), oi, GETPC()); 4751 pwd->b[13] = helper_ret_ldub_mmu(env, addr + (10 << DF_BYTE), oi, GETPC()); 4752 pwd->b[14] = helper_ret_ldub_mmu(env, addr + (9 << DF_BYTE), oi, GETPC()); 4753 pwd->b[15] = helper_ret_ldub_mmu(env, addr + (8 << DF_BYTE), oi, GETPC()); 4754 #endif 4755 #else 4756 #if !defined(HOST_WORDS_BIGENDIAN) 4757 pwd->b[0] = cpu_ldub_data(env, addr + (0 << DF_BYTE)); 4758 pwd->b[1] = cpu_ldub_data(env, addr + (1 << DF_BYTE)); 4759 pwd->b[2] = cpu_ldub_data(env, addr + (2 << DF_BYTE)); 4760 pwd->b[3] = cpu_ldub_data(env, addr + (3 << DF_BYTE)); 4761 pwd->b[4] = cpu_ldub_data(env, addr + (4 << DF_BYTE)); 4762 pwd->b[5] = cpu_ldub_data(env, addr + (5 << DF_BYTE)); 4763 pwd->b[6] = cpu_ldub_data(env, addr + (6 << DF_BYTE)); 4764 pwd->b[7] = cpu_ldub_data(env, addr + (7 << DF_BYTE)); 4765 pwd->b[8] = cpu_ldub_data(env, addr + (8 << DF_BYTE)); 4766 pwd->b[9] = cpu_ldub_data(env, addr + (9 << DF_BYTE)); 4767 pwd->b[10] = cpu_ldub_data(env, addr + (10 << DF_BYTE)); 4768 pwd->b[11] = cpu_ldub_data(env, addr + (11 << DF_BYTE)); 4769 pwd->b[12] = cpu_ldub_data(env, addr + (12 << DF_BYTE)); 4770 pwd->b[13] = cpu_ldub_data(env, addr + (13 << DF_BYTE)); 4771 pwd->b[14] = cpu_ldub_data(env, addr + (14 << DF_BYTE)); 4772 pwd->b[15] = cpu_ldub_data(env, addr + (15 << DF_BYTE)); 4773 #else 4774 pwd->b[0] = cpu_ldub_data(env, addr + (7 << DF_BYTE)); 4775 pwd->b[1] = cpu_ldub_data(env, addr + (6 << DF_BYTE)); 4776 pwd->b[2] = cpu_ldub_data(env, addr + (5 << DF_BYTE)); 4777 pwd->b[3] = cpu_ldub_data(env, addr + (4 << DF_BYTE)); 4778 pwd->b[4] = cpu_ldub_data(env, addr + (3 << DF_BYTE)); 4779 pwd->b[5] = cpu_ldub_data(env, addr + (2 << DF_BYTE)); 4780 pwd->b[6] = cpu_ldub_data(env, addr + (1 << DF_BYTE)); 4781 pwd->b[7] = cpu_ldub_data(env, addr + (0 << DF_BYTE)); 4782 pwd->b[8] = cpu_ldub_data(env, addr + (15 << DF_BYTE)); 4783 pwd->b[9] = cpu_ldub_data(env, addr + (14 << DF_BYTE)); 4784 pwd->b[10] = cpu_ldub_data(env, addr + (13 << DF_BYTE)); 4785 pwd->b[11] = cpu_ldub_data(env, addr + (12 << DF_BYTE)); 4786 pwd->b[12] = cpu_ldub_data(env, addr + (11 << DF_BYTE)); 4787 pwd->b[13] = cpu_ldub_data(env, addr + (10 << DF_BYTE)); 4788 pwd->b[14] = cpu_ldub_data(env, addr + (9 << DF_BYTE)); 4789 pwd->b[15] = cpu_ldub_data(env, addr + (8 << DF_BYTE)); 4790 #endif 4791 #endif 4792 } 4793 4794 void helper_msa_ld_h(CPUMIPSState *env, uint32_t wd, 4795 target_ulong addr) 4796 { 4797 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); 4798 MEMOP_IDX(DF_HALF) 4799 #if !defined(CONFIG_USER_ONLY) 4800 #if !defined(HOST_WORDS_BIGENDIAN) 4801 pwd->h[0] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); 4802 pwd->h[1] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); 4803 pwd->h[2] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); 4804 pwd->h[3] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); 4805 pwd->h[4] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); 4806 pwd->h[5] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); 4807 pwd->h[6] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); 4808 pwd->h[7] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); 4809 #else 4810 pwd->h[0] = helper_ret_lduw_mmu(env, addr + (3 << DF_HALF), oi, GETPC()); 4811 pwd->h[1] = helper_ret_lduw_mmu(env, addr + (2 << DF_HALF), oi, GETPC()); 4812 pwd->h[2] = helper_ret_lduw_mmu(env, addr + (1 << DF_HALF), oi, GETPC()); 4813 pwd->h[3] = helper_ret_lduw_mmu(env, addr + (0 << DF_HALF), oi, GETPC()); 4814 pwd->h[4] = helper_ret_lduw_mmu(env, addr + (7 << DF_HALF), oi, GETPC()); 4815 pwd->h[5] = helper_ret_lduw_mmu(env, addr + (6 << DF_HALF), oi, GETPC()); 4816 pwd->h[6] = helper_ret_lduw_mmu(env, addr + (5 << DF_HALF), oi, GETPC()); 4817 pwd->h[7] = helper_ret_lduw_mmu(env, addr + (4 << DF_HALF), oi, GETPC()); 4818 #endif 4819 #else 4820 #if !defined(HOST_WORDS_BIGENDIAN) 4821 pwd->h[0] = cpu_lduw_data(env, addr + (0 << DF_HALF)); 4822 pwd->h[1] = cpu_lduw_data(env, addr + (1 << DF_HALF)); 4823 pwd->h[2] = cpu_lduw_data(env, addr + (2 << DF_HALF)); 4824 pwd->h[3] = cpu_lduw_data(env, addr + (3 << DF_HALF)); 4825 pwd->h[4] = cpu_lduw_data(env, addr + (4 << DF_HALF)); 4826 pwd->h[5] = cpu_lduw_data(env, addr + (5 << DF_HALF)); 4827 pwd->h[6] = cpu_lduw_data(env, addr + (6 << DF_HALF)); 4828 pwd->h[7] = cpu_lduw_data(env, addr + (7 << DF_HALF)); 4829 #else 4830 pwd->h[0] = cpu_lduw_data(env, addr + (3 << DF_HALF)); 4831 pwd->h[1] = cpu_lduw_data(env, addr + (2 << DF_HALF)); 4832 pwd->h[2] = cpu_lduw_data(env, addr + (1 << DF_HALF)); 4833 pwd->h[3] = cpu_lduw_data(env, addr + (0 << DF_HALF)); 4834 pwd->h[4] = cpu_lduw_data(env, addr + (7 << DF_HALF)); 4835 pwd->h[5] = cpu_lduw_data(env, addr + (6 << DF_HALF)); 4836 pwd->h[6] = cpu_lduw_data(env, addr + (5 << DF_HALF)); 4837 pwd->h[7] = cpu_lduw_data(env, addr + (4 << DF_HALF)); 4838 #endif 4839 #endif 4840 } 4841 4842 void helper_msa_ld_w(CPUMIPSState *env, uint32_t wd, 4843 target_ulong addr) 4844 { 4845 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); 4846 MEMOP_IDX(DF_WORD) 4847 #if !defined(CONFIG_USER_ONLY) 4848 #if !defined(HOST_WORDS_BIGENDIAN) 4849 pwd->w[0] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); 4850 pwd->w[1] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); 4851 pwd->w[2] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); 4852 pwd->w[3] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); 4853 #else 4854 pwd->w[0] = helper_ret_ldul_mmu(env, addr + (1 << DF_WORD), oi, GETPC()); 4855 pwd->w[1] = helper_ret_ldul_mmu(env, addr + (0 << DF_WORD), oi, GETPC()); 4856 pwd->w[2] = helper_ret_ldul_mmu(env, addr + (3 << DF_WORD), oi, GETPC()); 4857 pwd->w[3] = helper_ret_ldul_mmu(env, addr + (2 << DF_WORD), oi, GETPC()); 4858 #endif 4859 #else 4860 #if !defined(HOST_WORDS_BIGENDIAN) 4861 pwd->w[0] = cpu_ldl_data(env, addr + (0 << DF_WORD)); 4862 pwd->w[1] = cpu_ldl_data(env, addr + (1 << DF_WORD)); 4863 pwd->w[2] = cpu_ldl_data(env, addr + (2 << DF_WORD)); 4864 pwd->w[3] = cpu_ldl_data(env, addr + (3 << DF_WORD)); 4865 #else 4866 pwd->w[0] = cpu_ldl_data(env, addr + (1 << DF_WORD)); 4867 pwd->w[1] = cpu_ldl_data(env, addr + (0 << DF_WORD)); 4868 pwd->w[2] = cpu_ldl_data(env, addr + (3 << DF_WORD)); 4869 pwd->w[3] = cpu_ldl_data(env, addr + (2 << DF_WORD)); 4870 #endif 4871 #endif 4872 } 4873 4874 void helper_msa_ld_d(CPUMIPSState *env, uint32_t wd, 4875 target_ulong addr) 4876 { 4877 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); 4878 MEMOP_IDX(DF_DOUBLE) 4879 #if !defined(CONFIG_USER_ONLY) 4880 pwd->d[0] = helper_ret_ldq_mmu(env, addr + (0 << DF_DOUBLE), oi, GETPC()); 4881 pwd->d[1] = helper_ret_ldq_mmu(env, addr + (1 << DF_DOUBLE), oi, GETPC()); 4882 #else 4883 pwd->d[0] = cpu_ldq_data(env, addr + (0 << DF_DOUBLE)); 4884 pwd->d[1] = cpu_ldq_data(env, addr + (1 << DF_DOUBLE)); 4885 #endif 4886 } 4887 4888 #define MSA_PAGESPAN(x) \ 4889 ((((x) & ~TARGET_PAGE_MASK) + MSA_WRLEN / 8 - 1) >= TARGET_PAGE_SIZE) 4890 4891 static inline void ensure_writable_pages(CPUMIPSState *env, 4892 target_ulong addr, 4893 int mmu_idx, 4894 uintptr_t retaddr) 4895 { 4896 /* FIXME: Probe the actual accesses (pass and use a size) */ 4897 if (unlikely(MSA_PAGESPAN(addr))) { 4898 /* first page */ 4899 probe_write(env, addr, 0, mmu_idx, retaddr); 4900 /* second page */ 4901 addr = (addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; 4902 probe_write(env, addr, 0, mmu_idx, retaddr); 4903 } 4904 } 4905 4906 void helper_msa_st_b(CPUMIPSState *env, uint32_t wd, 4907 target_ulong addr) 4908 { 4909 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); 4910 int mmu_idx = cpu_mmu_index(env, false); 4911 4912 MEMOP_IDX(DF_BYTE) 4913 ensure_writable_pages(env, addr, mmu_idx, GETPC()); 4914 #if !defined(CONFIG_USER_ONLY) 4915 #if !defined(HOST_WORDS_BIGENDIAN) 4916 helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[0], oi, GETPC()); 4917 helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[1], oi, GETPC()); 4918 helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[2], oi, GETPC()); 4919 helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[3], oi, GETPC()); 4920 helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[4], oi, GETPC()); 4921 helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[5], oi, GETPC()); 4922 helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[6], oi, GETPC()); 4923 helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[7], oi, GETPC()); 4924 helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[8], oi, GETPC()); 4925 helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[9], oi, GETPC()); 4926 helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[10], oi, GETPC()); 4927 helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[11], oi, GETPC()); 4928 helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[12], oi, GETPC()); 4929 helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[13], oi, GETPC()); 4930 helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[14], oi, GETPC()); 4931 helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[15], oi, GETPC()); 4932 #else 4933 helper_ret_stb_mmu(env, addr + (7 << DF_BYTE), pwd->b[0], oi, GETPC()); 4934 helper_ret_stb_mmu(env, addr + (6 << DF_BYTE), pwd->b[1], oi, GETPC()); 4935 helper_ret_stb_mmu(env, addr + (5 << DF_BYTE), pwd->b[2], oi, GETPC()); 4936 helper_ret_stb_mmu(env, addr + (4 << DF_BYTE), pwd->b[3], oi, GETPC()); 4937 helper_ret_stb_mmu(env, addr + (3 << DF_BYTE), pwd->b[4], oi, GETPC()); 4938 helper_ret_stb_mmu(env, addr + (2 << DF_BYTE), pwd->b[5], oi, GETPC()); 4939 helper_ret_stb_mmu(env, addr + (1 << DF_BYTE), pwd->b[6], oi, GETPC()); 4940 helper_ret_stb_mmu(env, addr + (0 << DF_BYTE), pwd->b[7], oi, GETPC()); 4941 helper_ret_stb_mmu(env, addr + (15 << DF_BYTE), pwd->b[8], oi, GETPC()); 4942 helper_ret_stb_mmu(env, addr + (14 << DF_BYTE), pwd->b[9], oi, GETPC()); 4943 helper_ret_stb_mmu(env, addr + (13 << DF_BYTE), pwd->b[10], oi, GETPC()); 4944 helper_ret_stb_mmu(env, addr + (12 << DF_BYTE), pwd->b[11], oi, GETPC()); 4945 helper_ret_stb_mmu(env, addr + (11 << DF_BYTE), pwd->b[12], oi, GETPC()); 4946 helper_ret_stb_mmu(env, addr + (10 << DF_BYTE), pwd->b[13], oi, GETPC()); 4947 helper_ret_stb_mmu(env, addr + (9 << DF_BYTE), pwd->b[14], oi, GETPC()); 4948 helper_ret_stb_mmu(env, addr + (8 << DF_BYTE), pwd->b[15], oi, GETPC()); 4949 #endif 4950 #else 4951 #if !defined(HOST_WORDS_BIGENDIAN) 4952 cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[0]); 4953 cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[1]); 4954 cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[2]); 4955 cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[3]); 4956 cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[4]); 4957 cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[5]); 4958 cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[6]); 4959 cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[7]); 4960 cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[8]); 4961 cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[9]); 4962 cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[10]); 4963 cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[11]); 4964 cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[12]); 4965 cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[13]); 4966 cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[14]); 4967 cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[15]); 4968 #else 4969 cpu_stb_data(env, addr + (7 << DF_BYTE), pwd->b[0]); 4970 cpu_stb_data(env, addr + (6 << DF_BYTE), pwd->b[1]); 4971 cpu_stb_data(env, addr + (5 << DF_BYTE), pwd->b[2]); 4972 cpu_stb_data(env, addr + (4 << DF_BYTE), pwd->b[3]); 4973 cpu_stb_data(env, addr + (3 << DF_BYTE), pwd->b[4]); 4974 cpu_stb_data(env, addr + (2 << DF_BYTE), pwd->b[5]); 4975 cpu_stb_data(env, addr + (1 << DF_BYTE), pwd->b[6]); 4976 cpu_stb_data(env, addr + (0 << DF_BYTE), pwd->b[7]); 4977 cpu_stb_data(env, addr + (15 << DF_BYTE), pwd->b[8]); 4978 cpu_stb_data(env, addr + (14 << DF_BYTE), pwd->b[9]); 4979 cpu_stb_data(env, addr + (13 << DF_BYTE), pwd->b[10]); 4980 cpu_stb_data(env, addr + (12 << DF_BYTE), pwd->b[11]); 4981 cpu_stb_data(env, addr + (11 << DF_BYTE), pwd->b[12]); 4982 cpu_stb_data(env, addr + (10 << DF_BYTE), pwd->b[13]); 4983 cpu_stb_data(env, addr + (9 << DF_BYTE), pwd->b[14]); 4984 cpu_stb_data(env, addr + (8 << DF_BYTE), pwd->b[15]); 4985 #endif 4986 #endif 4987 } 4988 4989 void helper_msa_st_h(CPUMIPSState *env, uint32_t wd, 4990 target_ulong addr) 4991 { 4992 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); 4993 int mmu_idx = cpu_mmu_index(env, false); 4994 4995 MEMOP_IDX(DF_HALF) 4996 ensure_writable_pages(env, addr, mmu_idx, GETPC()); 4997 #if !defined(CONFIG_USER_ONLY) 4998 #if !defined(HOST_WORDS_BIGENDIAN) 4999 helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[0], oi, GETPC()); 5000 helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[1], oi, GETPC()); 5001 helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[2], oi, GETPC()); 5002 helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[3], oi, GETPC()); 5003 helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[4], oi, GETPC()); 5004 helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[5], oi, GETPC()); 5005 helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[6], oi, GETPC()); 5006 helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[7], oi, GETPC()); 5007 #else 5008 helper_ret_stw_mmu(env, addr + (3 << DF_HALF), pwd->h[0], oi, GETPC()); 5009 helper_ret_stw_mmu(env, addr + (2 << DF_HALF), pwd->h[1], oi, GETPC()); 5010 helper_ret_stw_mmu(env, addr + (1 << DF_HALF), pwd->h[2], oi, GETPC()); 5011 helper_ret_stw_mmu(env, addr + (0 << DF_HALF), pwd->h[3], oi, GETPC()); 5012 helper_ret_stw_mmu(env, addr + (7 << DF_HALF), pwd->h[4], oi, GETPC()); 5013 helper_ret_stw_mmu(env, addr + (6 << DF_HALF), pwd->h[5], oi, GETPC()); 5014 helper_ret_stw_mmu(env, addr + (5 << DF_HALF), pwd->h[6], oi, GETPC()); 5015 helper_ret_stw_mmu(env, addr + (4 << DF_HALF), pwd->h[7], oi, GETPC()); 5016 #endif 5017 #else 5018 #if !defined(HOST_WORDS_BIGENDIAN) 5019 cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[0]); 5020 cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[1]); 5021 cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[2]); 5022 cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[3]); 5023 cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[4]); 5024 cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[5]); 5025 cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[6]); 5026 cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[7]); 5027 #else 5028 cpu_stw_data(env, addr + (3 << DF_HALF), pwd->h[0]); 5029 cpu_stw_data(env, addr + (2 << DF_HALF), pwd->h[1]); 5030 cpu_stw_data(env, addr + (1 << DF_HALF), pwd->h[2]); 5031 cpu_stw_data(env, addr + (0 << DF_HALF), pwd->h[3]); 5032 cpu_stw_data(env, addr + (7 << DF_HALF), pwd->h[4]); 5033 cpu_stw_data(env, addr + (6 << DF_HALF), pwd->h[5]); 5034 cpu_stw_data(env, addr + (5 << DF_HALF), pwd->h[6]); 5035 cpu_stw_data(env, addr + (4 << DF_HALF), pwd->h[7]); 5036 #endif 5037 #endif 5038 } 5039 5040 void helper_msa_st_w(CPUMIPSState *env, uint32_t wd, 5041 target_ulong addr) 5042 { 5043 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); 5044 int mmu_idx = cpu_mmu_index(env, false); 5045 5046 MEMOP_IDX(DF_WORD) 5047 ensure_writable_pages(env, addr, mmu_idx, GETPC()); 5048 #if !defined(CONFIG_USER_ONLY) 5049 #if !defined(HOST_WORDS_BIGENDIAN) 5050 helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[0], oi, GETPC()); 5051 helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[1], oi, GETPC()); 5052 helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[2], oi, GETPC()); 5053 helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[3], oi, GETPC()); 5054 #else 5055 helper_ret_stl_mmu(env, addr + (1 << DF_WORD), pwd->w[0], oi, GETPC()); 5056 helper_ret_stl_mmu(env, addr + (0 << DF_WORD), pwd->w[1], oi, GETPC()); 5057 helper_ret_stl_mmu(env, addr + (3 << DF_WORD), pwd->w[2], oi, GETPC()); 5058 helper_ret_stl_mmu(env, addr + (2 << DF_WORD), pwd->w[3], oi, GETPC()); 5059 #endif 5060 #else 5061 #if !defined(HOST_WORDS_BIGENDIAN) 5062 cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[0]); 5063 cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[1]); 5064 cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[2]); 5065 cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[3]); 5066 #else 5067 cpu_stl_data(env, addr + (1 << DF_WORD), pwd->w[0]); 5068 cpu_stl_data(env, addr + (0 << DF_WORD), pwd->w[1]); 5069 cpu_stl_data(env, addr + (3 << DF_WORD), pwd->w[2]); 5070 cpu_stl_data(env, addr + (2 << DF_WORD), pwd->w[3]); 5071 #endif 5072 #endif 5073 } 5074 5075 void helper_msa_st_d(CPUMIPSState *env, uint32_t wd, 5076 target_ulong addr) 5077 { 5078 wr_t *pwd = &(env->active_fpu.fpr[wd].wr); 5079 int mmu_idx = cpu_mmu_index(env, false); 5080 5081 MEMOP_IDX(DF_DOUBLE) 5082 ensure_writable_pages(env, addr, mmu_idx, GETPC()); 5083 #if !defined(CONFIG_USER_ONLY) 5084 helper_ret_stq_mmu(env, addr + (0 << DF_DOUBLE), pwd->d[0], oi, GETPC()); 5085 helper_ret_stq_mmu(env, addr + (1 << DF_DOUBLE), pwd->d[1], oi, GETPC()); 5086 #else 5087 cpu_stq_data(env, addr + (0 << DF_DOUBLE), pwd->d[0]); 5088 cpu_stq_data(env, addr + (1 << DF_DOUBLE), pwd->d[1]); 5089 #endif 5090 } 5091 5092 void helper_cache(CPUMIPSState *env, target_ulong addr, uint32_t op) 5093 { 5094 #ifndef CONFIG_USER_ONLY 5095 target_ulong index = addr & 0x1fffffff; 5096 if (op == 9) { 5097 /* Index Store Tag */ 5098 memory_region_dispatch_write(env->itc_tag, index, env->CP0_TagLo, 5099 MO_64, MEMTXATTRS_UNSPECIFIED); 5100 } else if (op == 5) { 5101 /* Index Load Tag */ 5102 memory_region_dispatch_read(env->itc_tag, index, &env->CP0_TagLo, 5103 MO_64, MEMTXATTRS_UNSPECIFIED); 5104 } 5105 #endif 5106 } 5107