Lines Matching +full:d +full:- +full:tlb +full:- +full:sets
23 #include <asm/cpu-info.h>
46 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc()
53 return -EINVAL; in kvm_compute_return_epc()
66 arch->gprs[insn.r_format.rd] = epc + 8; in kvm_compute_return_epc()
69 nextpc = arch->gprs[insn.r_format.rs]; in kvm_compute_return_epc()
72 return -EINVAL; in kvm_compute_return_epc()
85 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc()
94 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc()
103 arch->gprs[31] = epc + 8; in kvm_compute_return_epc()
104 if ((long)arch->gprs[insn.i_format.rs] < 0) in kvm_compute_return_epc()
113 arch->gprs[31] = epc + 8; in kvm_compute_return_epc()
114 if ((long)arch->gprs[insn.i_format.rs] >= 0) in kvm_compute_return_epc()
124 return -EINVAL; in kvm_compute_return_epc()
136 return -EINVAL; in kvm_compute_return_epc()
142 arch->gprs[31] = instpc + 8; in kvm_compute_return_epc()
155 if (arch->gprs[insn.i_format.rs] == in kvm_compute_return_epc()
156 arch->gprs[insn.i_format.rt]) in kvm_compute_return_epc()
165 if (arch->gprs[insn.i_format.rs] != in kvm_compute_return_epc()
166 arch->gprs[insn.i_format.rt]) in kvm_compute_return_epc()
179 if ((long)arch->gprs[insn.i_format.rs] <= 0) in kvm_compute_return_epc()
192 if ((long)arch->gprs[insn.i_format.rs] > 0) in kvm_compute_return_epc()
202 return -EINVAL; in kvm_compute_return_epc()
211 return -EINVAL; in kvm_compute_return_epc()
217 return -EINVAL; in kvm_compute_return_epc()
223 return -EINVAL; in kvm_compute_return_epc()
234 /* Fall through - Compact branches not supported before R6 */ in kvm_compute_return_epc()
237 return -EINVAL; in kvm_compute_return_epc()
249 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc()
250 &vcpu->arch.pc); in update_pc()
254 vcpu->arch.pc += 4; in update_pc()
257 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc()
263 * kvm_get_badinstr() - Get bad instruction encoding.
276 *out = vcpu->arch.host_cp0_badinstr; in kvm_get_badinstr()
284 * kvm_get_badinstrp() - Get bad prior instruction encoding.
297 *out = vcpu->arch.host_cp0_badinstrp; in kvm_get_badinstrp()
305 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
314 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_count_disabled()
316 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || in kvm_mips_count_disabled()
321 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
323 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
333 delta = now_ns + vcpu->arch.count_dyn_bias; in kvm_mips_ktime_to_count()
335 if (delta >= vcpu->arch.count_period) { in kvm_mips_ktime_to_count()
337 periods = div64_s64(now_ns, vcpu->arch.count_period); in kvm_mips_ktime_to_count()
338 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; in kvm_mips_ktime_to_count()
340 delta = now_ns + vcpu->arch.count_dyn_bias; in kvm_mips_ktime_to_count()
353 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); in kvm_mips_ktime_to_count()
357 * kvm_mips_count_time() - Get effective current time.
368 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) in kvm_mips_count_time()
369 return vcpu->arch.count_resume; in kvm_mips_count_time()
375 * kvm_mips_read_count_running() - Read the current count value as if running.
386 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_read_count_running()
392 count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); in kvm_mips_read_count_running()
399 if ((s32)(count - compare) < 0) in kvm_mips_read_count_running()
408 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); in kvm_mips_read_count_running()
409 threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); in kvm_mips_read_count_running()
415 running = hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_mips_read_count_running()
418 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_read_count_running()
426 vcpu->arch.count_period); in kvm_mips_read_count_running()
427 hrtimer_start(&vcpu->arch.comparecount_timer, expires, in kvm_mips_read_count_running()
436 * kvm_mips_read_count() - Read the current count value.
446 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_read_count()
456 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
476 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_mips_freeze_hrtimer()
486 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
504 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_resume_hrtimer()
511 delta = (u64)(u32)(compare - count - 1) + 1; in kvm_mips_resume_hrtimer()
512 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); in kvm_mips_resume_hrtimer()
516 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_mips_resume_hrtimer()
517 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); in kvm_mips_resume_hrtimer()
521 * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry.
551 before_count = vcpu->arch.count_bias + in kvm_mips_restore_hrtimer()
561 drift = count - before_count; in kvm_mips_restore_hrtimer()
564 vcpu->arch.count_bias += drift; in kvm_mips_restore_hrtimer()
571 now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); in kvm_mips_restore_hrtimer()
577 drift = count - now_count; in kvm_mips_restore_hrtimer()
580 vcpu->arch.count_bias += drift; in kvm_mips_restore_hrtimer()
586 delta = (u64)(u32)(now_count - count); in kvm_mips_restore_hrtimer()
587 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); in kvm_mips_restore_hrtimer()
597 * kvm_mips_write_count() - Modify the count and update timer.
601 * Sets the CP0_Count value and updates the timer accordingly.
605 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_write_count()
610 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); in kvm_mips_write_count()
621 * kvm_mips_init_count() - Initialise timer.
630 vcpu->arch.count_hz = count_hz; in kvm_mips_init_count()
631 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); in kvm_mips_init_count()
632 vcpu->arch.count_dyn_bias = 0; in kvm_mips_init_count()
639 * kvm_mips_set_count_hz() - Update the frequency of the timer.
646 * Returns: -EINVAL if @count_hz is out of range.
651 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_set_count_hz()
658 return -EINVAL; in kvm_mips_set_count_hz()
660 if (vcpu->arch.count_hz == count_hz) in kvm_mips_set_count_hz()
673 vcpu->arch.count_hz = count_hz; in kvm_mips_set_count_hz()
674 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); in kvm_mips_set_count_hz()
675 vcpu->arch.count_dyn_bias = 0; in kvm_mips_set_count_hz()
678 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); in kvm_mips_set_count_hz()
687 * kvm_mips_write_compare() - Modify compare and update timer.
698 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_write_compare()
701 s32 delta = compare - old_compare; in kvm_mips_write_compare()
710 kvm_mips_callbacks->dequeue_timer_int(vcpu); in kvm_mips_write_compare()
726 write_c0_gtoffset(compare - read_c0_count()); in kvm_mips_write_compare()
736 kvm_mips_callbacks->dequeue_timer_int(vcpu); in kvm_mips_write_compare()
766 write_c0_gtoffset(compare - read_c0_count()); in kvm_mips_write_compare()
770 * kvm_mips_count_disable() - Disable count.
783 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_count_disable()
788 hrtimer_cancel(&vcpu->arch.comparecount_timer); in kvm_mips_count_disable()
799 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
810 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_count_disable_cause()
813 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) in kvm_mips_count_disable_cause()
818 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
830 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_count_enable_cause()
845 * kvm_mips_set_count_ctl() - Update the count control KVM register.
851 * Returns: -EINVAL if reserved bits are set.
856 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_set_count_ctl()
857 s64 changed = count_ctl ^ vcpu->arch.count_ctl; in kvm_mips_set_count_ctl()
864 return -EINVAL; in kvm_mips_set_count_ctl()
867 vcpu->arch.count_ctl = count_ctl; in kvm_mips_set_count_ctl()
875 vcpu->arch.count_resume = ktime_get(); in kvm_mips_set_count_ctl()
878 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); in kvm_mips_set_count_ctl()
886 delta = (u64)(u32)(compare - count - 1) + 1; in kvm_mips_set_count_ctl()
888 vcpu->arch.count_hz); in kvm_mips_set_count_ctl()
889 expire = ktime_add_ns(vcpu->arch.count_resume, delta); in kvm_mips_set_count_ctl()
895 kvm_mips_callbacks->queue_timer_int(vcpu); in kvm_mips_set_count_ctl()
907 * kvm_mips_set_count_resume() - Update the count resume KVM register.
913 * Returns: -EINVAL if out of valid range (0..now).
924 return -EINVAL; in kvm_mips_set_count_resume()
926 vcpu->arch.count_resume = ns_to_ktime(count_resume); in kvm_mips_set_count_resume()
931 * kvm_mips_count_timeout() - Push timer forward on timeout.
941 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, in kvm_mips_count_timeout()
942 vcpu->arch.count_period); in kvm_mips_count_timeout()
948 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emul_eret()
953 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); in kvm_mips_emul_eret()
955 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, in kvm_mips_emul_eret()
958 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); in kvm_mips_emul_eret()
962 vcpu->arch.pc); in kvm_mips_emul_eret()
971 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, in kvm_mips_emul_wait()
972 vcpu->arch.pending_exceptions); in kvm_mips_emul_wait()
974 ++vcpu->stat.wait_exits; in kvm_mips_emul_wait()
976 if (!vcpu->arch.pending_exceptions) { in kvm_mips_emul_wait()
978 vcpu->arch.wait = 1; in kvm_mips_emul_wait()
987 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in kvm_mips_emul_wait()
997 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_change_entryhi()
998 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; in kvm_mips_change_entryhi()
1008 * Guest user page table will get flushed lazily on re-entry to in kvm_mips_change_entryhi()
1011 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN); in kvm_mips_change_entryhi()
1015 * The user MMU context will be regenerated lazily on re-entry in kvm_mips_change_entryhi()
1031 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emul_tlbr()
1032 struct kvm_mips_tlb *tlb; in kvm_mips_emul_tlbr() local
1033 unsigned long pc = vcpu->arch.pc; in kvm_mips_emul_tlbr()
1040 index &= KVM_MIPS_GUEST_TLB_SIZE - 1; in kvm_mips_emul_tlbr()
1043 tlb = &vcpu->arch.guest_tlb[index]; in kvm_mips_emul_tlbr()
1044 kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask); in kvm_mips_emul_tlbr()
1045 kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]); in kvm_mips_emul_tlbr()
1046 kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]); in kvm_mips_emul_tlbr()
1047 kvm_mips_change_entryhi(vcpu, tlb->tlb_hi); in kvm_mips_emul_tlbr()
1053 * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
1055 * @tlb: TLB entry being removed.
1058 * can arrange TLB flushes on this and other CPUs.
1061 struct kvm_mips_tlb *tlb) in kvm_mips_invalidate_guest_tlb() argument
1063 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; in kvm_mips_invalidate_guest_tlb()
1064 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; in kvm_mips_invalidate_guest_tlb()
1069 if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) in kvm_mips_invalidate_guest_tlb()
1071 /* Don't touch host kernel page tables or TLB mappings */ in kvm_mips_invalidate_guest_tlb()
1072 if ((unsigned long)tlb->tlb_hi > 0x7fffffff) in kvm_mips_invalidate_guest_tlb()
1075 user = tlb->tlb_hi < KVM_GUEST_KSEG0; in kvm_mips_invalidate_guest_tlb()
1080 kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); in kvm_mips_invalidate_guest_tlb()
1083 * Probe the shadow host TLB for the entry being overwritten, if one in kvm_mips_invalidate_guest_tlb()
1086 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); in kvm_mips_invalidate_guest_tlb()
1101 /* Write Guest TLB Entry @ Index */
1104 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emul_tlbwi()
1106 struct kvm_mips_tlb *tlb = NULL; in kvm_mips_emul_tlbwi() local
1107 unsigned long pc = vcpu->arch.pc; in kvm_mips_emul_tlbwi()
1110 kvm_debug("%s: illegal index: %d\n", __func__, index); in kvm_mips_emul_tlbwi()
1111 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", in kvm_mips_emul_tlbwi()
1119 tlb = &vcpu->arch.guest_tlb[index]; in kvm_mips_emul_tlbwi()
1121 kvm_mips_invalidate_guest_tlb(vcpu, tlb); in kvm_mips_emul_tlbwi()
1123 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); in kvm_mips_emul_tlbwi()
1124 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); in kvm_mips_emul_tlbwi()
1125 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); in kvm_mips_emul_tlbwi()
1126 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); in kvm_mips_emul_tlbwi()
1128 kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", in kvm_mips_emul_tlbwi()
1137 /* Write Guest TLB Entry @ Random Index */
1140 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emul_tlbwr()
1141 struct kvm_mips_tlb *tlb = NULL; in kvm_mips_emul_tlbwr() local
1142 unsigned long pc = vcpu->arch.pc; in kvm_mips_emul_tlbwr()
1146 tlb = &vcpu->arch.guest_tlb[index]; in kvm_mips_emul_tlbwr()
1148 kvm_mips_invalidate_guest_tlb(vcpu, tlb); in kvm_mips_emul_tlbwr()
1150 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); in kvm_mips_emul_tlbwr()
1151 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); in kvm_mips_emul_tlbwr()
1152 tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); in kvm_mips_emul_tlbwr()
1153 tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); in kvm_mips_emul_tlbwr()
1155 kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", in kvm_mips_emul_tlbwr()
1165 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emul_tlbp()
1167 unsigned long pc = vcpu->arch.pc; in kvm_mips_emul_tlbp()
1168 int index = -1; in kvm_mips_emul_tlbp()
1174 kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, in kvm_mips_emul_tlbp()
1181 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
1185 * register, by userland (currently read-only to the guest).
1192 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) in kvm_mips_config1_wrmask()
1199 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
1203 * register, by userland (currently read-only to the guest).
1211 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) in kvm_mips_config3_wrmask()
1218 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
1222 * register, by userland (currently read-only to the guest).
1236 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
1247 if (kvm_mips_guest_has_msa(&vcpu->arch)) in kvm_mips_config5_wrmask()
1254 if (kvm_mips_guest_has_fpu(&vcpu->arch)) { in kvm_mips_config5_wrmask()
1267 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_CP0()
1276 curr_pc = vcpu->arch.pc; in kvm_mips_emulate_CP0()
1283 case tlbr_op: /* Read indexed TLB entry */ in kvm_mips_emulate_CP0()
1292 case tlbp_op: /* TLB Probe */ in kvm_mips_emulate_CP0()
1316 cop0->stat[rd][sel]++; in kvm_mips_emulate_CP0()
1320 vcpu->arch.gprs[rt] = in kvm_mips_emulate_CP0()
1323 vcpu->arch.gprs[rt] = 0x0; in kvm_mips_emulate_CP0()
1328 vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; in kvm_mips_emulate_CP0()
1337 vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1341 vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; in kvm_mips_emulate_CP0()
1345 vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1350 cop0->stat[rd][sel]++; in kvm_mips_emulate_CP0()
1354 vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1357 && (vcpu->arch.gprs[rt] >= in kvm_mips_emulate_CP0()
1359 kvm_err("Invalid TLB Index: %ld", in kvm_mips_emulate_CP0()
1360 vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1370 vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1373 vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1377 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1383 vcpu->arch.gprs[rt], in kvm_mips_emulate_CP0()
1389 val = vcpu->arch.gprs[rt]; in kvm_mips_emulate_CP0()
1400 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_emulate_CP0()
1431 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_mips_emulate_CP0()
1442 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) in kvm_mips_emulate_CP0()
1454 if (!kvm_mips_guest_has_fpu(&vcpu->arch)) in kvm_mips_emulate_CP0()
1461 val = vcpu->arch.gprs[rt]; in kvm_mips_emulate_CP0()
1477 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) in kvm_mips_emulate_CP0()
1487 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) in kvm_mips_emulate_CP0()
1498 new_cause = vcpu->arch.gprs[rt]; in kvm_mips_emulate_CP0()
1518 cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; in kvm_mips_emulate_CP0()
1520 cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; in kvm_mips_emulate_CP0()
1528 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", in kvm_mips_emulate_CP0()
1529 vcpu->arch.pc, rt, rd, sel); in kvm_mips_emulate_CP0()
1532 vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1538 cop0->stat[MIPS_CP0_STATUS][0]++; in kvm_mips_emulate_CP0()
1541 vcpu->arch.gprs[rt] = in kvm_mips_emulate_CP0()
1546 vcpu->arch.pc); in kvm_mips_emulate_CP0()
1550 vcpu->arch.pc); in kvm_mips_emulate_CP0()
1558 u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; in kvm_mips_emulate_CP0()
1560 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; in kvm_mips_emulate_CP0()
1562 * We don't support any shadow register sets, so in kvm_mips_emulate_CP0()
1569 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, in kvm_mips_emulate_CP0()
1570 vcpu->arch.gprs[rt]); in kvm_mips_emulate_CP0()
1571 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; in kvm_mips_emulate_CP0()
1576 vcpu->arch.pc, inst.c0r_format.rs); in kvm_mips_emulate_CP0()
1585 vcpu->arch.pc = curr_pc; in kvm_mips_emulate_CP0()
1604 struct kvm_run *run = vcpu->run; in kvm_mips_emulate_store()
1605 void *data = run->mmio.data; in kvm_mips_emulate_store()
1613 curr_pc = vcpu->arch.pc; in kvm_mips_emulate_store()
1620 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1621 vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_store()
1622 if (run->mmio.phys_addr == KVM_INVALID_ADDR) in kvm_mips_emulate_store()
1628 run->mmio.len = 8; in kvm_mips_emulate_store()
1629 *(u64 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1632 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1633 vcpu->arch.gprs[rt], *(u64 *)data); in kvm_mips_emulate_store()
1638 run->mmio.len = 4; in kvm_mips_emulate_store()
1639 *(u32 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1642 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1643 vcpu->arch.gprs[rt], *(u32 *)data); in kvm_mips_emulate_store()
1647 run->mmio.len = 2; in kvm_mips_emulate_store()
1648 *(u16 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1651 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1652 vcpu->arch.gprs[rt], *(u16 *)data); in kvm_mips_emulate_store()
1656 run->mmio.len = 1; in kvm_mips_emulate_store()
1657 *(u8 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1660 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1661 vcpu->arch.gprs[rt], *(u8 *)data); in kvm_mips_emulate_store()
1665 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1666 vcpu->arch.host_cp0_badvaddr) & (~0x3); in kvm_mips_emulate_store()
1667 run->mmio.len = 4; in kvm_mips_emulate_store()
1668 imme = vcpu->arch.host_cp0_badvaddr & 0x3; in kvm_mips_emulate_store()
1672 (vcpu->arch.gprs[rt] >> 24); in kvm_mips_emulate_store()
1676 (vcpu->arch.gprs[rt] >> 16); in kvm_mips_emulate_store()
1680 (vcpu->arch.gprs[rt] >> 8); in kvm_mips_emulate_store()
1683 *(u32 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1690 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1691 vcpu->arch.gprs[rt], *(u32 *)data); in kvm_mips_emulate_store()
1695 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1696 vcpu->arch.host_cp0_badvaddr) & (~0x3); in kvm_mips_emulate_store()
1697 run->mmio.len = 4; in kvm_mips_emulate_store()
1698 imme = vcpu->arch.host_cp0_badvaddr & 0x3; in kvm_mips_emulate_store()
1701 *(u32 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1705 (vcpu->arch.gprs[rt] << 8); in kvm_mips_emulate_store()
1709 (vcpu->arch.gprs[rt] << 16); in kvm_mips_emulate_store()
1713 (vcpu->arch.gprs[rt] << 24); in kvm_mips_emulate_store()
1720 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1721 vcpu->arch.gprs[rt], *(u32 *)data); in kvm_mips_emulate_store()
1726 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1727 vcpu->arch.host_cp0_badvaddr) & (~0x7); in kvm_mips_emulate_store()
1729 run->mmio.len = 8; in kvm_mips_emulate_store()
1730 imme = vcpu->arch.host_cp0_badvaddr & 0x7; in kvm_mips_emulate_store()
1734 ((vcpu->arch.gprs[rt] >> 56) & 0xff); in kvm_mips_emulate_store()
1738 ((vcpu->arch.gprs[rt] >> 48) & 0xffff); in kvm_mips_emulate_store()
1742 ((vcpu->arch.gprs[rt] >> 40) & 0xffffff); in kvm_mips_emulate_store()
1746 ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff); in kvm_mips_emulate_store()
1750 ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff); in kvm_mips_emulate_store()
1754 ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff); in kvm_mips_emulate_store()
1758 ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff); in kvm_mips_emulate_store()
1761 *(u64 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1768 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1769 vcpu->arch.gprs[rt], *(u64 *)data); in kvm_mips_emulate_store()
1773 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_store()
1774 vcpu->arch.host_cp0_badvaddr) & (~0x7); in kvm_mips_emulate_store()
1776 run->mmio.len = 8; in kvm_mips_emulate_store()
1777 imme = vcpu->arch.host_cp0_badvaddr & 0x7; in kvm_mips_emulate_store()
1780 *(u64 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1784 (vcpu->arch.gprs[rt] << 8); in kvm_mips_emulate_store()
1788 (vcpu->arch.gprs[rt] << 16); in kvm_mips_emulate_store()
1792 (vcpu->arch.gprs[rt] << 24); in kvm_mips_emulate_store()
1796 (vcpu->arch.gprs[rt] << 32); in kvm_mips_emulate_store()
1800 (vcpu->arch.gprs[rt] << 40); in kvm_mips_emulate_store()
1804 (vcpu->arch.gprs[rt] << 48); in kvm_mips_emulate_store()
1808 (vcpu->arch.gprs[rt] << 56); in kvm_mips_emulate_store()
1815 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1816 vcpu->arch.gprs[rt], *(u64 *)data); in kvm_mips_emulate_store()
1825 * Loongson-3 overridden sdc2 instructions. in kvm_mips_emulate_store()
1833 run->mmio.len = 1; in kvm_mips_emulate_store()
1834 *(u8 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1837 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1838 vcpu->arch.gprs[rt], *(u8 *)data); in kvm_mips_emulate_store()
1841 run->mmio.len = 2; in kvm_mips_emulate_store()
1842 *(u16 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1845 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1846 vcpu->arch.gprs[rt], *(u16 *)data); in kvm_mips_emulate_store()
1849 run->mmio.len = 4; in kvm_mips_emulate_store()
1850 *(u32 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1853 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1854 vcpu->arch.gprs[rt], *(u32 *)data); in kvm_mips_emulate_store()
1857 run->mmio.len = 8; in kvm_mips_emulate_store()
1858 *(u64 *)data = vcpu->arch.gprs[rt]; in kvm_mips_emulate_store()
1861 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, in kvm_mips_emulate_store()
1862 vcpu->arch.gprs[rt], *(u64 *)data); in kvm_mips_emulate_store()
1865 kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n", in kvm_mips_emulate_store()
1877 vcpu->mmio_needed = 1; in kvm_mips_emulate_store()
1878 run->mmio.is_write = 1; in kvm_mips_emulate_store()
1879 vcpu->mmio_is_write = 1; in kvm_mips_emulate_store()
1882 run->mmio.phys_addr, run->mmio.len, data); in kvm_mips_emulate_store()
1885 vcpu->mmio_needed = 0; in kvm_mips_emulate_store()
1893 vcpu->arch.pc = curr_pc; in kvm_mips_emulate_store()
1900 struct kvm_run *run = vcpu->run; in kvm_mips_emulate_load()
1915 curr_pc = vcpu->arch.pc; in kvm_mips_emulate_load()
1919 vcpu->arch.io_pc = vcpu->arch.pc; in kvm_mips_emulate_load()
1920 vcpu->arch.pc = curr_pc; in kvm_mips_emulate_load()
1922 vcpu->arch.io_gpr = rt; in kvm_mips_emulate_load()
1924 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
1925 vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_load()
1926 if (run->mmio.phys_addr == KVM_INVALID_ADDR) in kvm_mips_emulate_load()
1929 vcpu->mmio_needed = 2; /* signed */ in kvm_mips_emulate_load()
1933 run->mmio.len = 8; in kvm_mips_emulate_load()
1937 vcpu->mmio_needed = 1; /* unsigned */ in kvm_mips_emulate_load()
1941 run->mmio.len = 4; in kvm_mips_emulate_load()
1945 vcpu->mmio_needed = 1; /* unsigned */ in kvm_mips_emulate_load()
1948 run->mmio.len = 2; in kvm_mips_emulate_load()
1952 vcpu->mmio_needed = 1; /* unsigned */ in kvm_mips_emulate_load()
1955 run->mmio.len = 1; in kvm_mips_emulate_load()
1959 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
1960 vcpu->arch.host_cp0_badvaddr) & (~0x3); in kvm_mips_emulate_load()
1962 run->mmio.len = 4; in kvm_mips_emulate_load()
1963 imme = vcpu->arch.host_cp0_badvaddr & 0x3; in kvm_mips_emulate_load()
1966 vcpu->mmio_needed = 3; /* 1 byte */ in kvm_mips_emulate_load()
1969 vcpu->mmio_needed = 4; /* 2 bytes */ in kvm_mips_emulate_load()
1972 vcpu->mmio_needed = 5; /* 3 bytes */ in kvm_mips_emulate_load()
1975 vcpu->mmio_needed = 6; /* 4 bytes */ in kvm_mips_emulate_load()
1983 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
1984 vcpu->arch.host_cp0_badvaddr) & (~0x3); in kvm_mips_emulate_load()
1986 run->mmio.len = 4; in kvm_mips_emulate_load()
1987 imme = vcpu->arch.host_cp0_badvaddr & 0x3; in kvm_mips_emulate_load()
1990 vcpu->mmio_needed = 7; /* 4 bytes */ in kvm_mips_emulate_load()
1993 vcpu->mmio_needed = 8; /* 3 bytes */ in kvm_mips_emulate_load()
1996 vcpu->mmio_needed = 9; /* 2 bytes */ in kvm_mips_emulate_load()
1999 vcpu->mmio_needed = 10; /* 1 byte */ in kvm_mips_emulate_load()
2008 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
2009 vcpu->arch.host_cp0_badvaddr) & (~0x7); in kvm_mips_emulate_load()
2011 run->mmio.len = 8; in kvm_mips_emulate_load()
2012 imme = vcpu->arch.host_cp0_badvaddr & 0x7; in kvm_mips_emulate_load()
2015 vcpu->mmio_needed = 11; /* 1 byte */ in kvm_mips_emulate_load()
2018 vcpu->mmio_needed = 12; /* 2 bytes */ in kvm_mips_emulate_load()
2021 vcpu->mmio_needed = 13; /* 3 bytes */ in kvm_mips_emulate_load()
2024 vcpu->mmio_needed = 14; /* 4 bytes */ in kvm_mips_emulate_load()
2027 vcpu->mmio_needed = 15; /* 5 bytes */ in kvm_mips_emulate_load()
2030 vcpu->mmio_needed = 16; /* 6 bytes */ in kvm_mips_emulate_load()
2033 vcpu->mmio_needed = 17; /* 7 bytes */ in kvm_mips_emulate_load()
2036 vcpu->mmio_needed = 18; /* 8 bytes */ in kvm_mips_emulate_load()
2044 run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( in kvm_mips_emulate_load()
2045 vcpu->arch.host_cp0_badvaddr) & (~0x7); in kvm_mips_emulate_load()
2047 run->mmio.len = 8; in kvm_mips_emulate_load()
2048 imme = vcpu->arch.host_cp0_badvaddr & 0x7; in kvm_mips_emulate_load()
2051 vcpu->mmio_needed = 19; /* 8 bytes */ in kvm_mips_emulate_load()
2054 vcpu->mmio_needed = 20; /* 7 bytes */ in kvm_mips_emulate_load()
2057 vcpu->mmio_needed = 21; /* 6 bytes */ in kvm_mips_emulate_load()
2060 vcpu->mmio_needed = 22; /* 5 bytes */ in kvm_mips_emulate_load()
2063 vcpu->mmio_needed = 23; /* 4 bytes */ in kvm_mips_emulate_load()
2066 vcpu->mmio_needed = 24; /* 3 bytes */ in kvm_mips_emulate_load()
2069 vcpu->mmio_needed = 25; /* 2 bytes */ in kvm_mips_emulate_load()
2072 vcpu->mmio_needed = 26; /* 1 byte */ in kvm_mips_emulate_load()
2085 * Loongson-3 overridden ldc2 instructions. in kvm_mips_emulate_load()
2093 run->mmio.len = 1; in kvm_mips_emulate_load()
2094 vcpu->mmio_needed = 27; /* signed */ in kvm_mips_emulate_load()
2097 run->mmio.len = 2; in kvm_mips_emulate_load()
2098 vcpu->mmio_needed = 28; /* signed */ in kvm_mips_emulate_load()
2101 run->mmio.len = 4; in kvm_mips_emulate_load()
2102 vcpu->mmio_needed = 29; /* signed */ in kvm_mips_emulate_load()
2105 run->mmio.len = 8; in kvm_mips_emulate_load()
2106 vcpu->mmio_needed = 30; /* signed */ in kvm_mips_emulate_load()
2109 kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n", in kvm_mips_emulate_load()
2119 vcpu->mmio_needed = 0; in kvm_mips_emulate_load()
2123 run->mmio.is_write = 0; in kvm_mips_emulate_load()
2124 vcpu->mmio_is_write = 0; in kvm_mips_emulate_load()
2127 run->mmio.phys_addr, run->mmio.len, run->mmio.data); in kvm_mips_emulate_load()
2131 vcpu->mmio_needed = 0; in kvm_mips_emulate_load()
2166 /* no matching guest TLB */ in kvm_mips_guest_cache_op()
2167 vcpu->arch.host_cp0_badvaddr = addr; in kvm_mips_guest_cache_op()
2168 vcpu->arch.pc = curr_pc; in kvm_mips_guest_cache_op()
2172 /* invalid matching guest TLB */ in kvm_mips_guest_cache_op()
2173 vcpu->arch.host_cp0_badvaddr = addr; in kvm_mips_guest_cache_op()
2174 vcpu->arch.pc = curr_pc; in kvm_mips_guest_cache_op()
2190 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_cache()
2198 curr_pc = vcpu->arch.pc; in kvm_mips_emulate_cache()
2212 va = arch->gprs[base] + offset; in kvm_mips_emulate_cache()
2214 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", in kvm_mips_emulate_cache()
2215 cache, op, base, arch->gprs[base], offset); in kvm_mips_emulate_cache()
2223 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", in kvm_mips_emulate_cache()
2224 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, in kvm_mips_emulate_cache()
2225 arch->gprs[base], offset); in kvm_mips_emulate_cache()
2300 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", in kvm_mips_emulate_cache()
2301 cache, op, base, arch->gprs[base], offset); in kvm_mips_emulate_cache()
2308 vcpu->arch.pc = curr_pc; in kvm_mips_emulate_cache()
2337 ++vcpu->stat.cache_exits; in kvm_mips_emulate_inst()
2345 ++vcpu->stat.cache_exits; in kvm_mips_emulate_inst()
2370 * kvm_mips_guest_exception_base() - Find guest exception vector base address.
2377 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_guest_exception_base()
2389 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_syscall()
2390 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_syscall()
2395 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_syscall()
2403 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); in kvm_mips_emulate_syscall()
2409 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_syscall()
2423 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_tlbmiss_ld()
2424 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_tlbmiss_ld()
2425 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | in kvm_mips_emulate_tlbmiss_ld()
2430 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_tlbmiss_ld()
2438 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", in kvm_mips_emulate_tlbmiss_ld()
2439 arch->pc); in kvm_mips_emulate_tlbmiss_ld()
2442 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; in kvm_mips_emulate_tlbmiss_ld()
2445 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", in kvm_mips_emulate_tlbmiss_ld()
2446 arch->pc); in kvm_mips_emulate_tlbmiss_ld()
2448 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_tlbmiss_ld()
2455 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_tlbmiss_ld()
2466 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_tlbinv_ld()
2467 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_tlbinv_ld()
2469 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | in kvm_mips_emulate_tlbinv_ld()
2474 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_tlbinv_ld()
2482 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", in kvm_mips_emulate_tlbinv_ld()
2483 arch->pc); in kvm_mips_emulate_tlbinv_ld()
2485 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", in kvm_mips_emulate_tlbinv_ld()
2486 arch->pc); in kvm_mips_emulate_tlbinv_ld()
2490 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_tlbinv_ld()
2496 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_tlbinv_ld()
2507 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_tlbmiss_st()
2508 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_tlbmiss_st()
2509 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | in kvm_mips_emulate_tlbmiss_st()
2514 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_tlbmiss_st()
2522 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", in kvm_mips_emulate_tlbmiss_st()
2523 arch->pc); in kvm_mips_emulate_tlbmiss_st()
2526 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; in kvm_mips_emulate_tlbmiss_st()
2528 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", in kvm_mips_emulate_tlbmiss_st()
2529 arch->pc); in kvm_mips_emulate_tlbmiss_st()
2530 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_tlbmiss_st()
2537 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_tlbmiss_st()
2548 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_tlbinv_st()
2549 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_tlbinv_st()
2550 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | in kvm_mips_emulate_tlbinv_st()
2555 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_tlbinv_st()
2563 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", in kvm_mips_emulate_tlbinv_st()
2564 arch->pc); in kvm_mips_emulate_tlbinv_st()
2566 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", in kvm_mips_emulate_tlbinv_st()
2567 arch->pc); in kvm_mips_emulate_tlbinv_st()
2571 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_tlbinv_st()
2577 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_tlbinv_st()
2588 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_tlbmod()
2589 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | in kvm_mips_emulate_tlbmod()
2591 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_tlbmod()
2595 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_tlbmod()
2603 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", in kvm_mips_emulate_tlbmod()
2604 arch->pc); in kvm_mips_emulate_tlbmod()
2606 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", in kvm_mips_emulate_tlbmod()
2607 arch->pc); in kvm_mips_emulate_tlbmod()
2610 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_tlbmod()
2616 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_tlbmod()
2627 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_fpu_exc()
2628 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_fpu_exc()
2632 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_fpu_exc()
2642 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_fpu_exc()
2655 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_ri_exc()
2656 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_ri_exc()
2661 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_ri_exc()
2669 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); in kvm_mips_emulate_ri_exc()
2675 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_ri_exc()
2689 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_bp_exc()
2690 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_bp_exc()
2695 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_bp_exc()
2703 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); in kvm_mips_emulate_bp_exc()
2709 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_bp_exc()
2723 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_trap_exc()
2724 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_trap_exc()
2729 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_trap_exc()
2737 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); in kvm_mips_emulate_trap_exc()
2743 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_trap_exc()
2757 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_msafpe_exc()
2758 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_msafpe_exc()
2763 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_msafpe_exc()
2771 kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); in kvm_mips_emulate_msafpe_exc()
2777 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_msafpe_exc()
2791 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_fpe_exc()
2792 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_fpe_exc()
2797 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_fpe_exc()
2805 kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); in kvm_mips_emulate_fpe_exc()
2811 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_fpe_exc()
2825 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_msadis_exc()
2826 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_msadis_exc()
2831 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_msadis_exc()
2839 kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); in kvm_mips_emulate_msadis_exc()
2845 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_msadis_exc()
2858 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_handle_ri()
2859 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_handle_ri()
2869 curr_pc = vcpu->arch.pc; in kvm_mips_handle_ri()
2879 kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); in kvm_mips_handle_ri()
2900 arch->gprs[rt] = vcpu->vcpu_id; in kvm_mips_handle_ri()
2903 arch->gprs[rt] = min(current_cpu_data.dcache.linesz, in kvm_mips_handle_ri()
2907 arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); in kvm_mips_handle_ri()
2913 arch->gprs[rt] = 1; in kvm_mips_handle_ri()
2916 arch->gprs[rt] = 2; in kvm_mips_handle_ri()
2920 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); in kvm_mips_handle_ri()
2929 vcpu->arch.gprs[rt]); in kvm_mips_handle_ri()
2943 vcpu->arch.pc = curr_pc; in kvm_mips_handle_ri()
2949 struct kvm_run *run = vcpu->run; in kvm_mips_complete_mmio_load()
2950 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; in kvm_mips_complete_mmio_load()
2953 if (run->mmio.len > sizeof(*gpr)) { in kvm_mips_complete_mmio_load()
2954 kvm_err("Bad MMIO length: %d", run->mmio.len); in kvm_mips_complete_mmio_load()
2960 vcpu->arch.pc = vcpu->arch.io_pc; in kvm_mips_complete_mmio_load()
2962 switch (run->mmio.len) { in kvm_mips_complete_mmio_load()
2964 switch (vcpu->mmio_needed) { in kvm_mips_complete_mmio_load()
2966 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) | in kvm_mips_complete_mmio_load()
2967 (((*(s64 *)run->mmio.data) & 0xff) << 56); in kvm_mips_complete_mmio_load()
2970 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) | in kvm_mips_complete_mmio_load()
2971 (((*(s64 *)run->mmio.data) & 0xffff) << 48); in kvm_mips_complete_mmio_load()
2974 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) | in kvm_mips_complete_mmio_load()
2975 (((*(s64 *)run->mmio.data) & 0xffffff) << 40); in kvm_mips_complete_mmio_load()
2978 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) | in kvm_mips_complete_mmio_load()
2979 (((*(s64 *)run->mmio.data) & 0xffffffff) << 32); in kvm_mips_complete_mmio_load()
2982 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | in kvm_mips_complete_mmio_load()
2983 (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24); in kvm_mips_complete_mmio_load()
2986 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | in kvm_mips_complete_mmio_load()
2987 (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16); in kvm_mips_complete_mmio_load()
2990 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | in kvm_mips_complete_mmio_load()
2991 (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8); in kvm_mips_complete_mmio_load()
2995 *gpr = *(s64 *)run->mmio.data; in kvm_mips_complete_mmio_load()
2998 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) | in kvm_mips_complete_mmio_load()
2999 ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff); in kvm_mips_complete_mmio_load()
3002 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) | in kvm_mips_complete_mmio_load()
3003 ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff); in kvm_mips_complete_mmio_load()
3006 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) | in kvm_mips_complete_mmio_load()
3007 ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff); in kvm_mips_complete_mmio_load()
3010 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) | in kvm_mips_complete_mmio_load()
3011 ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff); in kvm_mips_complete_mmio_load()
3014 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) | in kvm_mips_complete_mmio_load()
3015 ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff); in kvm_mips_complete_mmio_load()
3018 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) | in kvm_mips_complete_mmio_load()
3019 ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff); in kvm_mips_complete_mmio_load()
3022 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) | in kvm_mips_complete_mmio_load()
3023 ((((*(s64 *)run->mmio.data)) >> 56) & 0xff); in kvm_mips_complete_mmio_load()
3026 *gpr = *(s64 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3031 switch (vcpu->mmio_needed) { in kvm_mips_complete_mmio_load()
3033 *gpr = *(u32 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3036 *gpr = *(s32 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3039 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | in kvm_mips_complete_mmio_load()
3040 (((*(s32 *)run->mmio.data) & 0xff) << 24); in kvm_mips_complete_mmio_load()
3043 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | in kvm_mips_complete_mmio_load()
3044 (((*(s32 *)run->mmio.data) & 0xffff) << 16); in kvm_mips_complete_mmio_load()
3047 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | in kvm_mips_complete_mmio_load()
3048 (((*(s32 *)run->mmio.data) & 0xffffff) << 8); in kvm_mips_complete_mmio_load()
3052 *gpr = *(s32 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3055 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) | in kvm_mips_complete_mmio_load()
3056 ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff); in kvm_mips_complete_mmio_load()
3059 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) | in kvm_mips_complete_mmio_load()
3060 ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff); in kvm_mips_complete_mmio_load()
3063 *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) | in kvm_mips_complete_mmio_load()
3064 ((((*(s32 *)run->mmio.data)) >> 24) & 0xff); in kvm_mips_complete_mmio_load()
3067 *gpr = *(s32 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3072 if (vcpu->mmio_needed == 1) in kvm_mips_complete_mmio_load()
3073 *gpr = *(u16 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3075 *gpr = *(s16 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3079 if (vcpu->mmio_needed == 1) in kvm_mips_complete_mmio_load()
3080 *gpr = *(u8 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3082 *gpr = *(s8 *)run->mmio.data; in kvm_mips_complete_mmio_load()
3095 struct mips_coproc *cop0 = vcpu->arch.cop0; in kvm_mips_emulate_exc()
3096 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_mips_emulate_exc()
3101 kvm_write_c0_guest_epc(cop0, arch->pc); in kvm_mips_emulate_exc()
3113 arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; in kvm_mips_emulate_exc()
3114 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); in kvm_mips_emulate_exc()
3116 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", in kvm_mips_emulate_exc()
3133 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; in kvm_mips_check_privilege()
3217 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
3219 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
3220 * case we inject the TLB from the Guest TLB into the shadow host TLB
3229 unsigned long va = vcpu->arch.host_cp0_badvaddr; in kvm_mips_handle_tlbmiss()
3233 vcpu->arch.host_cp0_badvaddr); in kvm_mips_handle_tlbmiss()
3237 * shadow host TLB. Check the Guest TLB, if the entry is not there then in kvm_mips_handle_tlbmiss()
3239 * an entry into the guest TLB. in kvm_mips_handle_tlbmiss()
3243 (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & in kvm_mips_handle_tlbmiss()
3251 kvm_err("%s: invalid exc code: %d\n", __func__, in kvm_mips_handle_tlbmiss()
3256 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; in kvm_mips_handle_tlbmiss() local
3259 * Check if the entry is valid, if not then setup a TLB invalid in kvm_mips_handle_tlbmiss()
3262 if (!TLB_IS_VALID(*tlb, va)) { in kvm_mips_handle_tlbmiss()
3270 kvm_err("%s: invalid exc code: %d\n", __func__, in kvm_mips_handle_tlbmiss()
3275 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", in kvm_mips_handle_tlbmiss()
3276 tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); in kvm_mips_handle_tlbmiss()
3278 * OK we have a Guest TLB entry, now inject it into the in kvm_mips_handle_tlbmiss()
3279 * shadow host TLB in kvm_mips_handle_tlbmiss()
3281 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, in kvm_mips_handle_tlbmiss()
3283 kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", in kvm_mips_handle_tlbmiss()