Lines Matching full:lpcr

510 	pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n",  in kvmppc_dump_regs()
511 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
2154 * Enforce limits on guest LPCR values based on hardware availability,
2158 unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr) in kvmppc_filter_lpcr_hv() argument
2162 lpcr &= ~LPCR_TC; in kvmppc_filter_lpcr_hv()
2166 lpcr &= ~LPCR_AIL; in kvmppc_filter_lpcr_hv()
2167 if ((lpcr & LPCR_AIL) != LPCR_AIL_3) in kvmppc_filter_lpcr_hv()
2168 lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ in kvmppc_filter_lpcr_hv()
2176 lpcr &= ~LPCR_AIL; in kvmppc_filter_lpcr_hv()
2183 lpcr &= ~LPCR_LD; in kvmppc_filter_lpcr_hv()
2185 return lpcr; in kvmppc_filter_lpcr_hv()
2188 static void verify_lpcr(struct kvm *kvm, unsigned long lpcr) in verify_lpcr() argument
2190 if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) { in verify_lpcr()
2191 WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n", in verify_lpcr()
2192 lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr)); in verify_lpcr()
2214 /* Broken 32-bit version of LPCR must not clear top bits */ in kvmppc_set_lpcr()
2219 (vc->lpcr & ~mask) | (new_lpcr & mask)); in kvmppc_set_lpcr()
2225 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { in kvmppc_set_lpcr()
2239 vc->lpcr = new_lpcr; in kvmppc_set_lpcr()
2799 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
3284 if (kvm->arch.lpcr & LPCR_GTSE) in do_migrate_away_vcpu()
4111 unsigned long lpcr, u64 *tb) in kvmhv_vcpu_entry_nestedv2() argument
4129 kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr); in kvmhv_vcpu_entry_nestedv2()
4162 static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u6… in kvmhv_vcpu_entry_p9_nested() argument
4194 hvregs.lpcr = lpcr; in kvmhv_vcpu_entry_p9_nested()
4214 * irq_work_raise could check a flag (or possibly LPCR[HDICE] in kvmhv_vcpu_entry_p9_nested()
4240 if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ in kvmhv_vcpu_entry_p9_nested()
4258 unsigned long lpcr, u64 *tb) in kvmhv_p9_guest_entry() argument
4279 trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4281 trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4293 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4300 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4712 unsigned long lpcr) in kvmhv_run_single_vcpu() argument
4795 lpcr |= LPCR_MER; in kvmhv_run_single_vcpu()
4822 trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb); in kvmhv_run_single_vcpu()
4995 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
5241 * Update LPCR values in kvm->arch and in vcores.
5243 * of kvm->arch.lpcr update).
5245 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) in kvmppc_update_lpcr() argument
5250 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
5253 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5261 vc->lpcr = (vc->lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5262 verify_lpcr(kvm, vc->lpcr); in kvmppc_update_lpcr()
5309 unsigned long lpcr = 0, senc; in kvmppc_hv_setup_htab_rma() local
5368 /* Update VRMASD field in the LPCR */ in kvmppc_hv_setup_htab_rma()
5371 lpcr = senc << (LPCR_VRMASD_SH - 4); in kvmppc_hv_setup_htab_rma()
5372 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); in kvmppc_hv_setup_htab_rma()
5375 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
5394 unsigned long lpcr, lpcr_mask; in kvmppc_switch_mmu_to_hpt() local
5406 lpcr = LPCR_VPM1; in kvmppc_switch_mmu_to_hpt()
5410 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); in kvmppc_switch_mmu_to_hpt()
5421 unsigned long lpcr, lpcr_mask; in kvmppc_switch_mmu_to_radix() local
5434 lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR; in kvmppc_switch_mmu_to_radix()
5440 lpcr |= LPCR_HAIL; in kvmppc_switch_mmu_to_radix()
5442 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); in kvmppc_switch_mmu_to_radix()
5532 unsigned long lpcr, lpid; in kvmppc_core_init_vm_hv() local
5595 /* Init LPCR for virtual RMA mode */ in kvmppc_core_init_vm_hv()
5598 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
5599 lpcr &= LPCR_PECE | LPCR_LPES; in kvmppc_core_init_vm_hv()
5605 lpcr = 0; in kvmppc_core_init_vm_hv()
5607 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | in kvmppc_core_init_vm_hv()
5613 lpcr |= LPCR_ONL; in kvmppc_core_init_vm_hv()
5619 * EE in HV mode with this LPCR still set) in kvmppc_core_init_vm_hv()
5622 lpcr &= ~LPCR_VPM0; in kvmppc_core_init_vm_hv()
5623 lpcr |= LPCR_HVICE | LPCR_HEIC; in kvmppc_core_init_vm_hv()
5630 lpcr |= LPCR_LPES; in kvmppc_core_init_vm_hv()
5639 lpcr &= ~LPCR_VPM1; in kvmppc_core_init_vm_hv()
5640 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; in kvmppc_core_init_vm_hv()
5644 lpcr |= LPCR_HAIL; in kvmppc_core_init_vm_hv()
5656 verify_lpcr(kvm, lpcr); in kvmppc_core_init_vm_hv()
5657 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
6092 unsigned long lpcr; in kvmhv_configure_mmu() local
6144 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; in kvmhv_configure_mmu()
6145 kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); in kvmhv_configure_mmu()