Lines Matching refs:kvm

31  * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
233 struct kvm *kvm;
248 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
263 mutex_lock(&kvm->arch.uvmem_lock);
264 list_add(&p->list, &kvm->arch.uvmem_pfns);
265 mutex_unlock(&kvm->arch.uvmem_lock);
273 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
277 mutex_lock(&kvm->arch.uvmem_lock);
278 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
286 mutex_unlock(&kvm->arch.uvmem_lock);
289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
294 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
309 unsigned long uvmem_pfn, struct kvm *kvm)
311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
329 kvmppc_mark_gfn(gfn, kvm, 0, 0);
333 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
338 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
359 * Must be called with kvm->arch.uvmem_lock held.
362 struct kvm *kvm, unsigned long *gfn)
368 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
391 static int kvmppc_memslot_page_merge(struct kvm *kvm,
395 unsigned long end, start = gfn_to_hva(kvm, gfn);
406 mmap_write_lock(kvm->mm);
408 vma = find_vma_intersection(kvm->mm, start, end);
426 mmap_write_unlock(kvm->mm);
430 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
433 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
434 kvmppc_uvmem_slot_free(kvm, memslot);
435 kvmppc_memslot_page_merge(kvm, memslot, true);
438 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
443 if (kvmppc_memslot_page_merge(kvm, memslot, false))
446 if (kvmppc_uvmem_slot_init(kvm, memslot))
449 ret = uv_register_mem_slot(kvm->arch.lpid,
459 kvmppc_uvmem_slot_free(kvm, memslot);
461 kvmppc_memslot_page_merge(kvm, memslot, true);
465 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
472 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
478 if (!kvm_is_radix(kvm))
482 if (!kvm->arch.svm_enabled)
485 srcu_idx = srcu_read_lock(&kvm->srcu);
488 slots = kvm_memslots(kvm);
490 ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
496 slots = kvm_memslots(kvm);
500 __kvmppc_uvmem_memslot_delete(kvm, memslot);
504 srcu_read_unlock(&kvm->srcu, srcu_idx);
511 * Caller must held kvm->arch.uvmem_lock.
516 struct kvm *kvm, unsigned long gpa, struct page *fault_page)
536 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
568 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
589 struct kvm *kvm, unsigned long gpa,
594 mutex_lock(&kvm->arch.uvmem_lock);
595 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa,
597 mutex_unlock(&kvm->arch.uvmem_lock);
611 struct kvm *kvm, bool skip_page_out)
620 mmap_read_lock(kvm->mm);
629 vma = vma_lookup(kvm->mm, addr);
636 mutex_lock(&kvm->arch.uvmem_lock);
638 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
645 PAGE_SHIFT, kvm, pvt->gpa, NULL))
650 kvmppc_gfn_remove(gfn, kvm);
653 mutex_unlock(&kvm->arch.uvmem_lock);
656 mmap_read_unlock(kvm->mm);
659 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
668 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
671 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
674 srcu_idx = srcu_read_lock(&kvm->srcu);
676 kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
677 kvmppc_uvmem_drop_pages(memslot, kvm, false);
679 srcu_read_unlock(&kvm->srcu, srcu_idx);
681 kvm->arch.secure_guest = 0;
682 uv_svm_terminate(kvm->arch.lpid);
693 * Called with kvm->arch.uvmem_lock held
695 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
719 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
722 pvt->kvm = kvm;
742 unsigned long end, unsigned long gpa, struct kvm *kvm,
770 dpage = kvmppc_uvmem_get_page(gpa, kvm);
780 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
794 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
802 mmap_read_lock(kvm->mm);
803 mutex_lock(&kvm->arch.uvmem_lock);
804 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
806 start = gfn_to_hva(kvm, gfn);
811 vma = find_vma_intersection(kvm->mm, start, end);
816 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
825 mutex_unlock(&kvm->arch.uvmem_lock);
826 mmap_read_unlock(kvm->mm);
830 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
837 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
841 srcu_idx = srcu_read_lock(&kvm->srcu);
842 slots = kvm_memslots(kvm);
844 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
860 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
861 pr_info("LPID %lld went secure\n", kvm->arch.lpid);
864 srcu_read_unlock(&kvm->srcu, srcu_idx);
877 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
888 srcu_idx = srcu_read_lock(&kvm->srcu);
889 mutex_lock(&kvm->arch.uvmem_lock);
890 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
902 mutex_unlock(&kvm->arch.uvmem_lock);
903 page = gfn_to_page(kvm, gfn);
907 mutex_lock(&kvm->arch.uvmem_lock);
908 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
917 if (!uv_page_in(kvm->arch.lpid, page_to_pfn(page) << page_shift, gpa, 0,
919 kvmppc_gfn_shared(gfn, kvm);
923 mutex_unlock(&kvm->arch.uvmem_lock);
925 srcu_read_unlock(&kvm->srcu, srcu_idx);
935 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
945 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
955 return kvmppc_share_page(kvm, gpa, page_shift);
958 srcu_idx = srcu_read_lock(&kvm->srcu);
959 mmap_read_lock(kvm->mm);
961 start = gfn_to_hva(kvm, gfn);
965 mutex_lock(&kvm->arch.uvmem_lock);
967 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
971 vma = find_vma_intersection(kvm->mm, start, end);
975 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
982 mutex_unlock(&kvm->arch.uvmem_lock);
984 mmap_read_unlock(kvm->mm);
985 srcu_read_unlock(&kvm->srcu, srcu_idx);
1004 pvt->kvm, pvt->gpa, vmf->page))
1015 * Gets called with kvm->arch.uvmem_lock held.
1030 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1032 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1045 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1054 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1064 srcu_idx = srcu_read_lock(&kvm->srcu);
1065 mmap_read_lock(kvm->mm);
1066 start = gfn_to_hva(kvm, gfn);
1071 vma = find_vma_intersection(kvm->mm, start, end);
1075 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL))
1078 mmap_read_unlock(kvm->mm);
1079 srcu_read_unlock(&kvm->srcu, srcu_idx);
1083 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1088 page = gfn_to_page(kvm, gfn);
1092 mutex_lock(&kvm->arch.uvmem_lock);
1093 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1096 ret = uv_page_in(kvm->arch.lpid, page_to_pfn(page) << PAGE_SHIFT,
1100 mutex_unlock(&kvm->arch.uvmem_lock);
1104 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1106 int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1109 ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1114 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1116 __kvmppc_uvmem_memslot_delete(kvm, old);
1168 * Don't fail the initialization of kvm-hv module if