xref: /linux/arch/powerpc/kvm/book3s_hv_uvmem.c (revision 9a5788c615f52f6d7bf0b61986a632d4ec86791d)
1ca9f4942SBharata B Rao // SPDX-License-Identifier: GPL-2.0
2ca9f4942SBharata B Rao /*
3ca9f4942SBharata B Rao  * Secure pages management: Migration of pages between normal and secure
4ca9f4942SBharata B Rao  * memory of KVM guests.
5ca9f4942SBharata B Rao  *
6ca9f4942SBharata B Rao  * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
7ca9f4942SBharata B Rao  */
8ca9f4942SBharata B Rao 
9ca9f4942SBharata B Rao /*
10ca9f4942SBharata B Rao  * A pseries guest can be run as secure guest on Ultravisor-enabled
11ca9f4942SBharata B Rao  * POWER platforms. On such platforms, this driver will be used to manage
12ca9f4942SBharata B Rao  * the movement of guest pages between the normal memory managed by
13ca9f4942SBharata B Rao  * hypervisor (HV) and secure memory managed by Ultravisor (UV).
14ca9f4942SBharata B Rao  *
15ca9f4942SBharata B Rao  * The page-in or page-out requests from UV will come to HV as hcalls and
16ca9f4942SBharata B Rao  * HV will call back into UV via ultracalls to satisfy these page requests.
17ca9f4942SBharata B Rao  *
18ca9f4942SBharata B Rao  * Private ZONE_DEVICE memory equal to the amount of secure memory
19ca9f4942SBharata B Rao  * available in the platform for running secure guests is hotplugged.
20ca9f4942SBharata B Rao  * Whenever a page belonging to the guest becomes secure, a page from this
21ca9f4942SBharata B Rao  * private device memory is used to represent and track that secure page
2260f0a643SBharata B Rao  * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
2360f0a643SBharata B Rao  * shared between UV and HV. However such pages aren't represented by
2460f0a643SBharata B Rao  * device private memory and mappings to shared memory exist in both
2560f0a643SBharata B Rao  * UV and HV page tables.
26ca9f4942SBharata B Rao  */
27ca9f4942SBharata B Rao 
28ca9f4942SBharata B Rao /*
29ca9f4942SBharata B Rao  * Notes on locking
30ca9f4942SBharata B Rao  *
31ca9f4942SBharata B Rao  * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32ca9f4942SBharata B Rao  * page-in and page-out requests for the same GPA. Concurrent accesses
33ca9f4942SBharata B Rao  * can either come via UV (guest vCPUs requesting for same page)
34ca9f4942SBharata B Rao  * or when HV and guest simultaneously access the same page.
35ca9f4942SBharata B Rao  * This mutex serializes the migration of page from HV(normal) to
36ca9f4942SBharata B Rao  * UV(secure) and vice versa. So the serialization points are around
37ca9f4942SBharata B Rao  * migrate_vma routines and page-in/out routines.
38ca9f4942SBharata B Rao  *
39ca9f4942SBharata B Rao  * Per-guest mutex comes with a cost though. Mainly it serializes the
40ca9f4942SBharata B Rao  * fault path as page-out can occur when HV faults on accessing secure
41ca9f4942SBharata B Rao  * guest pages. Currently UV issues page-in requests for all the guest
42ca9f4942SBharata B Rao  * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43ca9f4942SBharata B Rao  * not a cause for concern. Also currently the number of page-outs caused
44ca9f4942SBharata B Rao  * by HV touching secure pages is very very low. If an when UV supports
45ca9f4942SBharata B Rao  * overcommitting, then we might see concurrent guest driven page-outs.
46ca9f4942SBharata B Rao  *
47ca9f4942SBharata B Rao  * Locking order
48ca9f4942SBharata B Rao  *
49ca9f4942SBharata B Rao  * 1. kvm->srcu - Protects KVM memslots
50ca9f4942SBharata B Rao  * 2. kvm->mm->mmap_sem - find_vma, migrate_vma_pages and helpers, ksm_madvise
51ca9f4942SBharata B Rao  * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52ca9f4942SBharata B Rao  *			     as sync-points for page-in/out
53ca9f4942SBharata B Rao  */
54ca9f4942SBharata B Rao 
55ca9f4942SBharata B Rao /*
56ca9f4942SBharata B Rao  * Notes on page size
57ca9f4942SBharata B Rao  *
58ca9f4942SBharata B Rao  * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59ca9f4942SBharata B Rao  * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60ca9f4942SBharata B Rao  * secure GPAs at 64K page size and maintains one device PFN for each
61ca9f4942SBharata B Rao  * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62ca9f4942SBharata B Rao  * for 64K page at a time.
63ca9f4942SBharata B Rao  *
64ca9f4942SBharata B Rao  * HV faulting on secure pages: When HV touches any secure page, it
65ca9f4942SBharata B Rao  * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66ca9f4942SBharata B Rao  * UV splits and remaps the 2MB page if necessary and copies out the
67ca9f4942SBharata B Rao  * required 64K page contents.
68ca9f4942SBharata B Rao  *
6960f0a643SBharata B Rao  * Shared pages: Whenever guest shares a secure page, UV will split and
7060f0a643SBharata B Rao  * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
7160f0a643SBharata B Rao  *
72008e359cSBharata B Rao  * HV invalidating a page: When a regular page belonging to secure
73008e359cSBharata B Rao  * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74008e359cSBharata B Rao  * page size. Using 64K page size is correct here because any non-secure
75008e359cSBharata B Rao  * page will essentially be of 64K page size. Splitting by UV during sharing
76008e359cSBharata B Rao  * and page-out ensures this.
77008e359cSBharata B Rao  *
78008e359cSBharata B Rao  * Page fault handling: When HV handles page fault of a page belonging
79008e359cSBharata B Rao  * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80008e359cSBharata B Rao  * Using 64K size is correct here too as UV would have split the 2MB page
81008e359cSBharata B Rao  * into 64k mappings and would have done page-outs earlier.
82008e359cSBharata B Rao  *
83ca9f4942SBharata B Rao  * In summary, the current secure pages handling code in HV assumes
84ca9f4942SBharata B Rao  * 64K page size and in fact fails any page-in/page-out requests of
85ca9f4942SBharata B Rao  * non-64K size upfront. If and when UV starts supporting multiple
86ca9f4942SBharata B Rao  * page-sizes, we need to break this assumption.
87ca9f4942SBharata B Rao  */
88ca9f4942SBharata B Rao 
89ca9f4942SBharata B Rao #include <linux/pagemap.h>
90ca9f4942SBharata B Rao #include <linux/migrate.h>
91ca9f4942SBharata B Rao #include <linux/kvm_host.h>
92ca9f4942SBharata B Rao #include <linux/ksm.h>
93ca9f4942SBharata B Rao #include <asm/ultravisor.h>
94ca9f4942SBharata B Rao #include <asm/mman.h>
95ca9f4942SBharata B Rao #include <asm/kvm_ppc.h>
96ca9f4942SBharata B Rao 
97ca9f4942SBharata B Rao static struct dev_pagemap kvmppc_uvmem_pgmap;
98ca9f4942SBharata B Rao static unsigned long *kvmppc_uvmem_bitmap;
99ca9f4942SBharata B Rao static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
100ca9f4942SBharata B Rao 
101ca9f4942SBharata B Rao #define KVMPPC_UVMEM_PFN	(1UL << 63)
102ca9f4942SBharata B Rao 
103ca9f4942SBharata B Rao struct kvmppc_uvmem_slot {
104ca9f4942SBharata B Rao 	struct list_head list;
105ca9f4942SBharata B Rao 	unsigned long nr_pfns;
106ca9f4942SBharata B Rao 	unsigned long base_pfn;
107ca9f4942SBharata B Rao 	unsigned long *pfns;
108ca9f4942SBharata B Rao };
109ca9f4942SBharata B Rao 
110ca9f4942SBharata B Rao struct kvmppc_uvmem_page_pvt {
111ca9f4942SBharata B Rao 	struct kvm *kvm;
112ca9f4942SBharata B Rao 	unsigned long gpa;
11360f0a643SBharata B Rao 	bool skip_page_out;
114ca9f4942SBharata B Rao };
115ca9f4942SBharata B Rao 
116*9a5788c6SPaul Mackerras bool kvmppc_uvmem_available(void)
117*9a5788c6SPaul Mackerras {
118*9a5788c6SPaul Mackerras 	/*
119*9a5788c6SPaul Mackerras 	 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
120*9a5788c6SPaul Mackerras 	 * and our data structures have been initialized successfully.
121*9a5788c6SPaul Mackerras 	 */
122*9a5788c6SPaul Mackerras 	return !!kvmppc_uvmem_bitmap;
123*9a5788c6SPaul Mackerras }
124*9a5788c6SPaul Mackerras 
125ca9f4942SBharata B Rao int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
126ca9f4942SBharata B Rao {
127ca9f4942SBharata B Rao 	struct kvmppc_uvmem_slot *p;
128ca9f4942SBharata B Rao 
129ca9f4942SBharata B Rao 	p = kzalloc(sizeof(*p), GFP_KERNEL);
130ca9f4942SBharata B Rao 	if (!p)
131ca9f4942SBharata B Rao 		return -ENOMEM;
132ca9f4942SBharata B Rao 	p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns)));
133ca9f4942SBharata B Rao 	if (!p->pfns) {
134ca9f4942SBharata B Rao 		kfree(p);
135ca9f4942SBharata B Rao 		return -ENOMEM;
136ca9f4942SBharata B Rao 	}
137ca9f4942SBharata B Rao 	p->nr_pfns = slot->npages;
138ca9f4942SBharata B Rao 	p->base_pfn = slot->base_gfn;
139ca9f4942SBharata B Rao 
140ca9f4942SBharata B Rao 	mutex_lock(&kvm->arch.uvmem_lock);
141ca9f4942SBharata B Rao 	list_add(&p->list, &kvm->arch.uvmem_pfns);
142ca9f4942SBharata B Rao 	mutex_unlock(&kvm->arch.uvmem_lock);
143ca9f4942SBharata B Rao 
144ca9f4942SBharata B Rao 	return 0;
145ca9f4942SBharata B Rao }
146ca9f4942SBharata B Rao 
147ca9f4942SBharata B Rao /*
148ca9f4942SBharata B Rao  * All device PFNs are already released by the time we come here.
149ca9f4942SBharata B Rao  */
150ca9f4942SBharata B Rao void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
151ca9f4942SBharata B Rao {
152ca9f4942SBharata B Rao 	struct kvmppc_uvmem_slot *p, *next;
153ca9f4942SBharata B Rao 
154ca9f4942SBharata B Rao 	mutex_lock(&kvm->arch.uvmem_lock);
155ca9f4942SBharata B Rao 	list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
156ca9f4942SBharata B Rao 		if (p->base_pfn == slot->base_gfn) {
157ca9f4942SBharata B Rao 			vfree(p->pfns);
158ca9f4942SBharata B Rao 			list_del(&p->list);
159ca9f4942SBharata B Rao 			kfree(p);
160ca9f4942SBharata B Rao 			break;
161ca9f4942SBharata B Rao 		}
162ca9f4942SBharata B Rao 	}
163ca9f4942SBharata B Rao 	mutex_unlock(&kvm->arch.uvmem_lock);
164ca9f4942SBharata B Rao }
165ca9f4942SBharata B Rao 
166ca9f4942SBharata B Rao static void kvmppc_uvmem_pfn_insert(unsigned long gfn, unsigned long uvmem_pfn,
167ca9f4942SBharata B Rao 				    struct kvm *kvm)
168ca9f4942SBharata B Rao {
169ca9f4942SBharata B Rao 	struct kvmppc_uvmem_slot *p;
170ca9f4942SBharata B Rao 
171ca9f4942SBharata B Rao 	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
172ca9f4942SBharata B Rao 		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
173ca9f4942SBharata B Rao 			unsigned long index = gfn - p->base_pfn;
174ca9f4942SBharata B Rao 
175ca9f4942SBharata B Rao 			p->pfns[index] = uvmem_pfn | KVMPPC_UVMEM_PFN;
176ca9f4942SBharata B Rao 			return;
177ca9f4942SBharata B Rao 		}
178ca9f4942SBharata B Rao 	}
179ca9f4942SBharata B Rao }
180ca9f4942SBharata B Rao 
181ca9f4942SBharata B Rao static void kvmppc_uvmem_pfn_remove(unsigned long gfn, struct kvm *kvm)
182ca9f4942SBharata B Rao {
183ca9f4942SBharata B Rao 	struct kvmppc_uvmem_slot *p;
184ca9f4942SBharata B Rao 
185ca9f4942SBharata B Rao 	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
186ca9f4942SBharata B Rao 		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
187ca9f4942SBharata B Rao 			p->pfns[gfn - p->base_pfn] = 0;
188ca9f4942SBharata B Rao 			return;
189ca9f4942SBharata B Rao 		}
190ca9f4942SBharata B Rao 	}
191ca9f4942SBharata B Rao }
192ca9f4942SBharata B Rao 
193ca9f4942SBharata B Rao static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
194ca9f4942SBharata B Rao 				    unsigned long *uvmem_pfn)
195ca9f4942SBharata B Rao {
196ca9f4942SBharata B Rao 	struct kvmppc_uvmem_slot *p;
197ca9f4942SBharata B Rao 
198ca9f4942SBharata B Rao 	list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
199ca9f4942SBharata B Rao 		if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
200ca9f4942SBharata B Rao 			unsigned long index = gfn - p->base_pfn;
201ca9f4942SBharata B Rao 
202ca9f4942SBharata B Rao 			if (p->pfns[index] & KVMPPC_UVMEM_PFN) {
203ca9f4942SBharata B Rao 				if (uvmem_pfn)
204ca9f4942SBharata B Rao 					*uvmem_pfn = p->pfns[index] &
205ca9f4942SBharata B Rao 						     ~KVMPPC_UVMEM_PFN;
206ca9f4942SBharata B Rao 				return true;
207ca9f4942SBharata B Rao 			} else
208ca9f4942SBharata B Rao 				return false;
209ca9f4942SBharata B Rao 		}
210ca9f4942SBharata B Rao 	}
211ca9f4942SBharata B Rao 	return false;
212ca9f4942SBharata B Rao }
213ca9f4942SBharata B Rao 
214ca9f4942SBharata B Rao unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
215ca9f4942SBharata B Rao {
216ca9f4942SBharata B Rao 	struct kvm_memslots *slots;
217ca9f4942SBharata B Rao 	struct kvm_memory_slot *memslot;
218ca9f4942SBharata B Rao 	int ret = H_SUCCESS;
219ca9f4942SBharata B Rao 	int srcu_idx;
220ca9f4942SBharata B Rao 
221377f02d4SLaurent Dufour 	kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
222377f02d4SLaurent Dufour 
223ca9f4942SBharata B Rao 	if (!kvmppc_uvmem_bitmap)
224ca9f4942SBharata B Rao 		return H_UNSUPPORTED;
225ca9f4942SBharata B Rao 
226ca9f4942SBharata B Rao 	/* Only radix guests can be secure guests */
227ca9f4942SBharata B Rao 	if (!kvm_is_radix(kvm))
228ca9f4942SBharata B Rao 		return H_UNSUPPORTED;
229ca9f4942SBharata B Rao 
230*9a5788c6SPaul Mackerras 	/* NAK the transition to secure if not enabled */
231*9a5788c6SPaul Mackerras 	if (!kvm->arch.svm_enabled)
232*9a5788c6SPaul Mackerras 		return H_AUTHORITY;
233*9a5788c6SPaul Mackerras 
234ca9f4942SBharata B Rao 	srcu_idx = srcu_read_lock(&kvm->srcu);
235ca9f4942SBharata B Rao 	slots = kvm_memslots(kvm);
236ca9f4942SBharata B Rao 	kvm_for_each_memslot(memslot, slots) {
237ca9f4942SBharata B Rao 		if (kvmppc_uvmem_slot_init(kvm, memslot)) {
238ca9f4942SBharata B Rao 			ret = H_PARAMETER;
239ca9f4942SBharata B Rao 			goto out;
240ca9f4942SBharata B Rao 		}
241ca9f4942SBharata B Rao 		ret = uv_register_mem_slot(kvm->arch.lpid,
242ca9f4942SBharata B Rao 					   memslot->base_gfn << PAGE_SHIFT,
243ca9f4942SBharata B Rao 					   memslot->npages * PAGE_SIZE,
244ca9f4942SBharata B Rao 					   0, memslot->id);
245ca9f4942SBharata B Rao 		if (ret < 0) {
246ca9f4942SBharata B Rao 			kvmppc_uvmem_slot_free(kvm, memslot);
247ca9f4942SBharata B Rao 			ret = H_PARAMETER;
248ca9f4942SBharata B Rao 			goto out;
249ca9f4942SBharata B Rao 		}
250ca9f4942SBharata B Rao 	}
251ca9f4942SBharata B Rao out:
252ca9f4942SBharata B Rao 	srcu_read_unlock(&kvm->srcu, srcu_idx);
253ca9f4942SBharata B Rao 	return ret;
254ca9f4942SBharata B Rao }
255ca9f4942SBharata B Rao 
256ca9f4942SBharata B Rao unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
257ca9f4942SBharata B Rao {
258ca9f4942SBharata B Rao 	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
259ca9f4942SBharata B Rao 		return H_UNSUPPORTED;
260ca9f4942SBharata B Rao 
261ca9f4942SBharata B Rao 	kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
262ca9f4942SBharata B Rao 	pr_info("LPID %d went secure\n", kvm->arch.lpid);
263ca9f4942SBharata B Rao 	return H_SUCCESS;
264ca9f4942SBharata B Rao }
265ca9f4942SBharata B Rao 
266ca9f4942SBharata B Rao /*
267c3262257SBharata B Rao  * Drop device pages that we maintain for the secure guest
268c3262257SBharata B Rao  *
269c3262257SBharata B Rao  * We first mark the pages to be skipped from UV_PAGE_OUT when there
270c3262257SBharata B Rao  * is HV side fault on these pages. Next we *get* these pages, forcing
271c3262257SBharata B Rao  * fault on them, do fault time migration to replace the device PTEs in
272c3262257SBharata B Rao  * QEMU page table with normal PTEs from newly allocated pages.
273c3262257SBharata B Rao  */
274c3262257SBharata B Rao void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
275ce477a7aSSukadev Bhattiprolu 			     struct kvm *kvm, bool skip_page_out)
276c3262257SBharata B Rao {
277c3262257SBharata B Rao 	int i;
278c3262257SBharata B Rao 	struct kvmppc_uvmem_page_pvt *pvt;
279c3262257SBharata B Rao 	unsigned long pfn, uvmem_pfn;
280c3262257SBharata B Rao 	unsigned long gfn = free->base_gfn;
281c3262257SBharata B Rao 
282c3262257SBharata B Rao 	for (i = free->npages; i; --i, ++gfn) {
283c3262257SBharata B Rao 		struct page *uvmem_page;
284c3262257SBharata B Rao 
285c3262257SBharata B Rao 		mutex_lock(&kvm->arch.uvmem_lock);
286c3262257SBharata B Rao 		if (!kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
287c3262257SBharata B Rao 			mutex_unlock(&kvm->arch.uvmem_lock);
288c3262257SBharata B Rao 			continue;
289c3262257SBharata B Rao 		}
290c3262257SBharata B Rao 
291c3262257SBharata B Rao 		uvmem_page = pfn_to_page(uvmem_pfn);
292c3262257SBharata B Rao 		pvt = uvmem_page->zone_device_data;
293ce477a7aSSukadev Bhattiprolu 		pvt->skip_page_out = skip_page_out;
294c3262257SBharata B Rao 		mutex_unlock(&kvm->arch.uvmem_lock);
295c3262257SBharata B Rao 
296c3262257SBharata B Rao 		pfn = gfn_to_pfn(kvm, gfn);
297c3262257SBharata B Rao 		if (is_error_noslot_pfn(pfn))
298c3262257SBharata B Rao 			continue;
299c3262257SBharata B Rao 		kvm_release_pfn_clean(pfn);
300c3262257SBharata B Rao 	}
301c3262257SBharata B Rao }
302c3262257SBharata B Rao 
3033a43970dSSukadev Bhattiprolu unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
3043a43970dSSukadev Bhattiprolu {
3053a43970dSSukadev Bhattiprolu 	int srcu_idx;
3063a43970dSSukadev Bhattiprolu 	struct kvm_memory_slot *memslot;
3073a43970dSSukadev Bhattiprolu 
3083a43970dSSukadev Bhattiprolu 	/*
3093a43970dSSukadev Bhattiprolu 	 * Expect to be called only after INIT_START and before INIT_DONE.
3103a43970dSSukadev Bhattiprolu 	 * If INIT_DONE was completed, use normal VM termination sequence.
3113a43970dSSukadev Bhattiprolu 	 */
3123a43970dSSukadev Bhattiprolu 	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
3133a43970dSSukadev Bhattiprolu 		return H_UNSUPPORTED;
3143a43970dSSukadev Bhattiprolu 
3153a43970dSSukadev Bhattiprolu 	if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
3163a43970dSSukadev Bhattiprolu 		return H_STATE;
3173a43970dSSukadev Bhattiprolu 
3183a43970dSSukadev Bhattiprolu 	srcu_idx = srcu_read_lock(&kvm->srcu);
3193a43970dSSukadev Bhattiprolu 
3203a43970dSSukadev Bhattiprolu 	kvm_for_each_memslot(memslot, kvm_memslots(kvm))
3213a43970dSSukadev Bhattiprolu 		kvmppc_uvmem_drop_pages(memslot, kvm, false);
3223a43970dSSukadev Bhattiprolu 
3233a43970dSSukadev Bhattiprolu 	srcu_read_unlock(&kvm->srcu, srcu_idx);
3243a43970dSSukadev Bhattiprolu 
3253a43970dSSukadev Bhattiprolu 	kvm->arch.secure_guest = 0;
3263a43970dSSukadev Bhattiprolu 	uv_svm_terminate(kvm->arch.lpid);
3273a43970dSSukadev Bhattiprolu 
3283a43970dSSukadev Bhattiprolu 	return H_PARAMETER;
3293a43970dSSukadev Bhattiprolu }
3303a43970dSSukadev Bhattiprolu 
331c3262257SBharata B Rao /*
332ca9f4942SBharata B Rao  * Get a free device PFN from the pool
333ca9f4942SBharata B Rao  *
334ca9f4942SBharata B Rao  * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
335ca9f4942SBharata B Rao  * PFN will be used to keep track of the secure page on HV side.
336ca9f4942SBharata B Rao  *
337ca9f4942SBharata B Rao  * Called with kvm->arch.uvmem_lock held
338ca9f4942SBharata B Rao  */
339ca9f4942SBharata B Rao static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
340ca9f4942SBharata B Rao {
341ca9f4942SBharata B Rao 	struct page *dpage = NULL;
342ca9f4942SBharata B Rao 	unsigned long bit, uvmem_pfn;
343ca9f4942SBharata B Rao 	struct kvmppc_uvmem_page_pvt *pvt;
344ca9f4942SBharata B Rao 	unsigned long pfn_last, pfn_first;
345ca9f4942SBharata B Rao 
346ca9f4942SBharata B Rao 	pfn_first = kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT;
347ca9f4942SBharata B Rao 	pfn_last = pfn_first +
348ca9f4942SBharata B Rao 		   (resource_size(&kvmppc_uvmem_pgmap.res) >> PAGE_SHIFT);
349ca9f4942SBharata B Rao 
350ca9f4942SBharata B Rao 	spin_lock(&kvmppc_uvmem_bitmap_lock);
351ca9f4942SBharata B Rao 	bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
352ca9f4942SBharata B Rao 				  pfn_last - pfn_first);
353ca9f4942SBharata B Rao 	if (bit >= (pfn_last - pfn_first))
354ca9f4942SBharata B Rao 		goto out;
355ca9f4942SBharata B Rao 	bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
356ca9f4942SBharata B Rao 	spin_unlock(&kvmppc_uvmem_bitmap_lock);
357ca9f4942SBharata B Rao 
358ca9f4942SBharata B Rao 	pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
359ca9f4942SBharata B Rao 	if (!pvt)
360ca9f4942SBharata B Rao 		goto out_clear;
361ca9f4942SBharata B Rao 
362ca9f4942SBharata B Rao 	uvmem_pfn = bit + pfn_first;
363ca9f4942SBharata B Rao 	kvmppc_uvmem_pfn_insert(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
364ca9f4942SBharata B Rao 
365ca9f4942SBharata B Rao 	pvt->gpa = gpa;
366ca9f4942SBharata B Rao 	pvt->kvm = kvm;
367ca9f4942SBharata B Rao 
368ca9f4942SBharata B Rao 	dpage = pfn_to_page(uvmem_pfn);
369ca9f4942SBharata B Rao 	dpage->zone_device_data = pvt;
370ca9f4942SBharata B Rao 	get_page(dpage);
371ca9f4942SBharata B Rao 	lock_page(dpage);
372ca9f4942SBharata B Rao 	return dpage;
373ca9f4942SBharata B Rao out_clear:
374ca9f4942SBharata B Rao 	spin_lock(&kvmppc_uvmem_bitmap_lock);
375ca9f4942SBharata B Rao 	bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
376ca9f4942SBharata B Rao out:
377ca9f4942SBharata B Rao 	spin_unlock(&kvmppc_uvmem_bitmap_lock);
378ca9f4942SBharata B Rao 	return NULL;
379ca9f4942SBharata B Rao }
380ca9f4942SBharata B Rao 
381ca9f4942SBharata B Rao /*
382ca9f4942SBharata B Rao  * Alloc a PFN from private device memory pool and copy page from normal
383ca9f4942SBharata B Rao  * memory to secure memory using UV_PAGE_IN uvcall.
384ca9f4942SBharata B Rao  */
385ca9f4942SBharata B Rao static int
386ca9f4942SBharata B Rao kvmppc_svm_page_in(struct vm_area_struct *vma, unsigned long start,
387ca9f4942SBharata B Rao 		   unsigned long end, unsigned long gpa, struct kvm *kvm,
388ca9f4942SBharata B Rao 		   unsigned long page_shift, bool *downgrade)
389ca9f4942SBharata B Rao {
390ca9f4942SBharata B Rao 	unsigned long src_pfn, dst_pfn = 0;
391ca9f4942SBharata B Rao 	struct migrate_vma mig;
392ca9f4942SBharata B Rao 	struct page *spage;
393ca9f4942SBharata B Rao 	unsigned long pfn;
394ca9f4942SBharata B Rao 	struct page *dpage;
395ca9f4942SBharata B Rao 	int ret = 0;
396ca9f4942SBharata B Rao 
397ca9f4942SBharata B Rao 	memset(&mig, 0, sizeof(mig));
398ca9f4942SBharata B Rao 	mig.vma = vma;
399ca9f4942SBharata B Rao 	mig.start = start;
400ca9f4942SBharata B Rao 	mig.end = end;
401ca9f4942SBharata B Rao 	mig.src = &src_pfn;
402ca9f4942SBharata B Rao 	mig.dst = &dst_pfn;
403ca9f4942SBharata B Rao 
404ca9f4942SBharata B Rao 	/*
405ca9f4942SBharata B Rao 	 * We come here with mmap_sem write lock held just for
406ca9f4942SBharata B Rao 	 * ksm_madvise(), otherwise we only need read mmap_sem.
407ca9f4942SBharata B Rao 	 * Hence downgrade to read lock once ksm_madvise() is done.
408ca9f4942SBharata B Rao 	 */
409ca9f4942SBharata B Rao 	ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
410ca9f4942SBharata B Rao 			  MADV_UNMERGEABLE, &vma->vm_flags);
411ca9f4942SBharata B Rao 	downgrade_write(&kvm->mm->mmap_sem);
412ca9f4942SBharata B Rao 	*downgrade = true;
413ca9f4942SBharata B Rao 	if (ret)
414ca9f4942SBharata B Rao 		return ret;
415ca9f4942SBharata B Rao 
416ca9f4942SBharata B Rao 	ret = migrate_vma_setup(&mig);
417ca9f4942SBharata B Rao 	if (ret)
418ca9f4942SBharata B Rao 		return ret;
419ca9f4942SBharata B Rao 
420ca9f4942SBharata B Rao 	if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
421ca9f4942SBharata B Rao 		ret = -1;
422ca9f4942SBharata B Rao 		goto out_finalize;
423ca9f4942SBharata B Rao 	}
424ca9f4942SBharata B Rao 
425ca9f4942SBharata B Rao 	dpage = kvmppc_uvmem_get_page(gpa, kvm);
426ca9f4942SBharata B Rao 	if (!dpage) {
427ca9f4942SBharata B Rao 		ret = -1;
428ca9f4942SBharata B Rao 		goto out_finalize;
429ca9f4942SBharata B Rao 	}
430ca9f4942SBharata B Rao 
431ca9f4942SBharata B Rao 	pfn = *mig.src >> MIGRATE_PFN_SHIFT;
432ca9f4942SBharata B Rao 	spage = migrate_pfn_to_page(*mig.src);
433ca9f4942SBharata B Rao 	if (spage)
434ca9f4942SBharata B Rao 		uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
435ca9f4942SBharata B Rao 			   page_shift);
436ca9f4942SBharata B Rao 
437ca9f4942SBharata B Rao 	*mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
438ca9f4942SBharata B Rao 	migrate_vma_pages(&mig);
439ca9f4942SBharata B Rao out_finalize:
440ca9f4942SBharata B Rao 	migrate_vma_finalize(&mig);
441ca9f4942SBharata B Rao 	return ret;
442ca9f4942SBharata B Rao }
443ca9f4942SBharata B Rao 
444ca9f4942SBharata B Rao /*
44560f0a643SBharata B Rao  * Shares the page with HV, thus making it a normal page.
44660f0a643SBharata B Rao  *
44760f0a643SBharata B Rao  * - If the page is already secure, then provision a new page and share
44860f0a643SBharata B Rao  * - If the page is a normal page, share the existing page
44960f0a643SBharata B Rao  *
45060f0a643SBharata B Rao  * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
45160f0a643SBharata B Rao  * to unmap the device page from QEMU's page tables.
45260f0a643SBharata B Rao  */
45360f0a643SBharata B Rao static unsigned long
45460f0a643SBharata B Rao kvmppc_share_page(struct kvm *kvm, unsigned long gpa, unsigned long page_shift)
45560f0a643SBharata B Rao {
45660f0a643SBharata B Rao 
45760f0a643SBharata B Rao 	int ret = H_PARAMETER;
45860f0a643SBharata B Rao 	struct page *uvmem_page;
45960f0a643SBharata B Rao 	struct kvmppc_uvmem_page_pvt *pvt;
46060f0a643SBharata B Rao 	unsigned long pfn;
46160f0a643SBharata B Rao 	unsigned long gfn = gpa >> page_shift;
46260f0a643SBharata B Rao 	int srcu_idx;
46360f0a643SBharata B Rao 	unsigned long uvmem_pfn;
46460f0a643SBharata B Rao 
46560f0a643SBharata B Rao 	srcu_idx = srcu_read_lock(&kvm->srcu);
46660f0a643SBharata B Rao 	mutex_lock(&kvm->arch.uvmem_lock);
46760f0a643SBharata B Rao 	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
46860f0a643SBharata B Rao 		uvmem_page = pfn_to_page(uvmem_pfn);
46960f0a643SBharata B Rao 		pvt = uvmem_page->zone_device_data;
47060f0a643SBharata B Rao 		pvt->skip_page_out = true;
47160f0a643SBharata B Rao 	}
47260f0a643SBharata B Rao 
47360f0a643SBharata B Rao retry:
47460f0a643SBharata B Rao 	mutex_unlock(&kvm->arch.uvmem_lock);
47560f0a643SBharata B Rao 	pfn = gfn_to_pfn(kvm, gfn);
47660f0a643SBharata B Rao 	if (is_error_noslot_pfn(pfn))
47760f0a643SBharata B Rao 		goto out;
47860f0a643SBharata B Rao 
47960f0a643SBharata B Rao 	mutex_lock(&kvm->arch.uvmem_lock);
48060f0a643SBharata B Rao 	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
48160f0a643SBharata B Rao 		uvmem_page = pfn_to_page(uvmem_pfn);
48260f0a643SBharata B Rao 		pvt = uvmem_page->zone_device_data;
48360f0a643SBharata B Rao 		pvt->skip_page_out = true;
48460f0a643SBharata B Rao 		kvm_release_pfn_clean(pfn);
48560f0a643SBharata B Rao 		goto retry;
48660f0a643SBharata B Rao 	}
48760f0a643SBharata B Rao 
48860f0a643SBharata B Rao 	if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, page_shift))
48960f0a643SBharata B Rao 		ret = H_SUCCESS;
49060f0a643SBharata B Rao 	kvm_release_pfn_clean(pfn);
49160f0a643SBharata B Rao 	mutex_unlock(&kvm->arch.uvmem_lock);
49260f0a643SBharata B Rao out:
49360f0a643SBharata B Rao 	srcu_read_unlock(&kvm->srcu, srcu_idx);
49460f0a643SBharata B Rao 	return ret;
49560f0a643SBharata B Rao }
49660f0a643SBharata B Rao 
49760f0a643SBharata B Rao /*
498ca9f4942SBharata B Rao  * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
49960f0a643SBharata B Rao  *
50060f0a643SBharata B Rao  * H_PAGE_IN_SHARED flag makes the page shared which means that the same
50160f0a643SBharata B Rao  * memory in is visible from both UV and HV.
502ca9f4942SBharata B Rao  */
503ca9f4942SBharata B Rao unsigned long
504ca9f4942SBharata B Rao kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
505ca9f4942SBharata B Rao 		     unsigned long flags, unsigned long page_shift)
506ca9f4942SBharata B Rao {
507ca9f4942SBharata B Rao 	bool downgrade = false;
508ca9f4942SBharata B Rao 	unsigned long start, end;
509ca9f4942SBharata B Rao 	struct vm_area_struct *vma;
510ca9f4942SBharata B Rao 	int srcu_idx;
511ca9f4942SBharata B Rao 	unsigned long gfn = gpa >> page_shift;
512ca9f4942SBharata B Rao 	int ret;
513ca9f4942SBharata B Rao 
514ca9f4942SBharata B Rao 	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
515ca9f4942SBharata B Rao 		return H_UNSUPPORTED;
516ca9f4942SBharata B Rao 
517ca9f4942SBharata B Rao 	if (page_shift != PAGE_SHIFT)
518ca9f4942SBharata B Rao 		return H_P3;
519ca9f4942SBharata B Rao 
52060f0a643SBharata B Rao 	if (flags & ~H_PAGE_IN_SHARED)
521ca9f4942SBharata B Rao 		return H_P2;
522ca9f4942SBharata B Rao 
52360f0a643SBharata B Rao 	if (flags & H_PAGE_IN_SHARED)
52460f0a643SBharata B Rao 		return kvmppc_share_page(kvm, gpa, page_shift);
52560f0a643SBharata B Rao 
526ca9f4942SBharata B Rao 	ret = H_PARAMETER;
527ca9f4942SBharata B Rao 	srcu_idx = srcu_read_lock(&kvm->srcu);
528ca9f4942SBharata B Rao 	down_write(&kvm->mm->mmap_sem);
529ca9f4942SBharata B Rao 
530ca9f4942SBharata B Rao 	start = gfn_to_hva(kvm, gfn);
531ca9f4942SBharata B Rao 	if (kvm_is_error_hva(start))
532ca9f4942SBharata B Rao 		goto out;
533ca9f4942SBharata B Rao 
534ca9f4942SBharata B Rao 	mutex_lock(&kvm->arch.uvmem_lock);
535ca9f4942SBharata B Rao 	/* Fail the page-in request of an already paged-in page */
536ca9f4942SBharata B Rao 	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
537ca9f4942SBharata B Rao 		goto out_unlock;
538ca9f4942SBharata B Rao 
539ca9f4942SBharata B Rao 	end = start + (1UL << page_shift);
540ca9f4942SBharata B Rao 	vma = find_vma_intersection(kvm->mm, start, end);
541ca9f4942SBharata B Rao 	if (!vma || vma->vm_start > start || vma->vm_end < end)
542ca9f4942SBharata B Rao 		goto out_unlock;
543ca9f4942SBharata B Rao 
544ca9f4942SBharata B Rao 	if (!kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
545ca9f4942SBharata B Rao 				&downgrade))
546ca9f4942SBharata B Rao 		ret = H_SUCCESS;
547ca9f4942SBharata B Rao out_unlock:
548ca9f4942SBharata B Rao 	mutex_unlock(&kvm->arch.uvmem_lock);
549ca9f4942SBharata B Rao out:
550ca9f4942SBharata B Rao 	if (downgrade)
551ca9f4942SBharata B Rao 		up_read(&kvm->mm->mmap_sem);
552ca9f4942SBharata B Rao 	else
553ca9f4942SBharata B Rao 		up_write(&kvm->mm->mmap_sem);
554ca9f4942SBharata B Rao 	srcu_read_unlock(&kvm->srcu, srcu_idx);
555ca9f4942SBharata B Rao 	return ret;
556ca9f4942SBharata B Rao }
557ca9f4942SBharata B Rao 
558ca9f4942SBharata B Rao /*
559ca9f4942SBharata B Rao  * Provision a new page on HV side and copy over the contents
560ca9f4942SBharata B Rao  * from secure memory using UV_PAGE_OUT uvcall.
561ca9f4942SBharata B Rao  */
562ca9f4942SBharata B Rao static int
563ca9f4942SBharata B Rao kvmppc_svm_page_out(struct vm_area_struct *vma, unsigned long start,
564ca9f4942SBharata B Rao 		    unsigned long end, unsigned long page_shift,
565ca9f4942SBharata B Rao 		    struct kvm *kvm, unsigned long gpa)
566ca9f4942SBharata B Rao {
567ca9f4942SBharata B Rao 	unsigned long src_pfn, dst_pfn = 0;
568ca9f4942SBharata B Rao 	struct migrate_vma mig;
569ca9f4942SBharata B Rao 	struct page *dpage, *spage;
57060f0a643SBharata B Rao 	struct kvmppc_uvmem_page_pvt *pvt;
571ca9f4942SBharata B Rao 	unsigned long pfn;
572ca9f4942SBharata B Rao 	int ret = U_SUCCESS;
573ca9f4942SBharata B Rao 
574ca9f4942SBharata B Rao 	memset(&mig, 0, sizeof(mig));
575ca9f4942SBharata B Rao 	mig.vma = vma;
576ca9f4942SBharata B Rao 	mig.start = start;
577ca9f4942SBharata B Rao 	mig.end = end;
578ca9f4942SBharata B Rao 	mig.src = &src_pfn;
579ca9f4942SBharata B Rao 	mig.dst = &dst_pfn;
580ca9f4942SBharata B Rao 
581ca9f4942SBharata B Rao 	mutex_lock(&kvm->arch.uvmem_lock);
582ca9f4942SBharata B Rao 	/* The requested page is already paged-out, nothing to do */
583ca9f4942SBharata B Rao 	if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
584ca9f4942SBharata B Rao 		goto out;
585ca9f4942SBharata B Rao 
586ca9f4942SBharata B Rao 	ret = migrate_vma_setup(&mig);
587ca9f4942SBharata B Rao 	if (ret)
588e032e3b5SBharata B Rao 		goto out;
589ca9f4942SBharata B Rao 
590ca9f4942SBharata B Rao 	spage = migrate_pfn_to_page(*mig.src);
591ca9f4942SBharata B Rao 	if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
592ca9f4942SBharata B Rao 		goto out_finalize;
593ca9f4942SBharata B Rao 
594ca9f4942SBharata B Rao 	if (!is_zone_device_page(spage))
595ca9f4942SBharata B Rao 		goto out_finalize;
596ca9f4942SBharata B Rao 
597ca9f4942SBharata B Rao 	dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
598ca9f4942SBharata B Rao 	if (!dpage) {
599ca9f4942SBharata B Rao 		ret = -1;
600ca9f4942SBharata B Rao 		goto out_finalize;
601ca9f4942SBharata B Rao 	}
602ca9f4942SBharata B Rao 
603ca9f4942SBharata B Rao 	lock_page(dpage);
60460f0a643SBharata B Rao 	pvt = spage->zone_device_data;
605ca9f4942SBharata B Rao 	pfn = page_to_pfn(dpage);
606ca9f4942SBharata B Rao 
60760f0a643SBharata B Rao 	/*
60860f0a643SBharata B Rao 	 * This function is used in two cases:
60960f0a643SBharata B Rao 	 * - When HV touches a secure page, for which we do UV_PAGE_OUT
61060f0a643SBharata B Rao 	 * - When a secure page is converted to shared page, we *get*
61160f0a643SBharata B Rao 	 *   the page to essentially unmap the device page. In this
61260f0a643SBharata B Rao 	 *   case we skip page-out.
61360f0a643SBharata B Rao 	 */
61460f0a643SBharata B Rao 	if (!pvt->skip_page_out)
615ca9f4942SBharata B Rao 		ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
616ca9f4942SBharata B Rao 				  gpa, 0, page_shift);
61760f0a643SBharata B Rao 
618ca9f4942SBharata B Rao 	if (ret == U_SUCCESS)
619ca9f4942SBharata B Rao 		*mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
620ca9f4942SBharata B Rao 	else {
621ca9f4942SBharata B Rao 		unlock_page(dpage);
622ca9f4942SBharata B Rao 		__free_page(dpage);
623ca9f4942SBharata B Rao 		goto out_finalize;
624ca9f4942SBharata B Rao 	}
625ca9f4942SBharata B Rao 
626ca9f4942SBharata B Rao 	migrate_vma_pages(&mig);
627ca9f4942SBharata B Rao out_finalize:
628ca9f4942SBharata B Rao 	migrate_vma_finalize(&mig);
629ca9f4942SBharata B Rao out:
630ca9f4942SBharata B Rao 	mutex_unlock(&kvm->arch.uvmem_lock);
631ca9f4942SBharata B Rao 	return ret;
632ca9f4942SBharata B Rao }
633ca9f4942SBharata B Rao 
634ca9f4942SBharata B Rao /*
635ca9f4942SBharata B Rao  * Fault handler callback that gets called when HV touches any page that
636ca9f4942SBharata B Rao  * has been moved to secure memory, we ask UV to give back the page by
637ca9f4942SBharata B Rao  * issuing UV_PAGE_OUT uvcall.
638ca9f4942SBharata B Rao  *
639ca9f4942SBharata B Rao  * This eventually results in dropping of device PFN and the newly
640ca9f4942SBharata B Rao  * provisioned page/PFN gets populated in QEMU page tables.
641ca9f4942SBharata B Rao  */
642ca9f4942SBharata B Rao static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
643ca9f4942SBharata B Rao {
644ca9f4942SBharata B Rao 	struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
645ca9f4942SBharata B Rao 
646ca9f4942SBharata B Rao 	if (kvmppc_svm_page_out(vmf->vma, vmf->address,
647ca9f4942SBharata B Rao 				vmf->address + PAGE_SIZE, PAGE_SHIFT,
648ca9f4942SBharata B Rao 				pvt->kvm, pvt->gpa))
649ca9f4942SBharata B Rao 		return VM_FAULT_SIGBUS;
650ca9f4942SBharata B Rao 	else
651ca9f4942SBharata B Rao 		return 0;
652ca9f4942SBharata B Rao }
653ca9f4942SBharata B Rao 
654ca9f4942SBharata B Rao /*
655ca9f4942SBharata B Rao  * Release the device PFN back to the pool
656ca9f4942SBharata B Rao  *
657ca9f4942SBharata B Rao  * Gets called when secure page becomes a normal page during H_SVM_PAGE_OUT.
658ca9f4942SBharata B Rao  * Gets called with kvm->arch.uvmem_lock held.
659ca9f4942SBharata B Rao  */
660ca9f4942SBharata B Rao static void kvmppc_uvmem_page_free(struct page *page)
661ca9f4942SBharata B Rao {
662ca9f4942SBharata B Rao 	unsigned long pfn = page_to_pfn(page) -
663ca9f4942SBharata B Rao 			(kvmppc_uvmem_pgmap.res.start >> PAGE_SHIFT);
664ca9f4942SBharata B Rao 	struct kvmppc_uvmem_page_pvt *pvt;
665ca9f4942SBharata B Rao 
666ca9f4942SBharata B Rao 	spin_lock(&kvmppc_uvmem_bitmap_lock);
667ca9f4942SBharata B Rao 	bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
668ca9f4942SBharata B Rao 	spin_unlock(&kvmppc_uvmem_bitmap_lock);
669ca9f4942SBharata B Rao 
670ca9f4942SBharata B Rao 	pvt = page->zone_device_data;
671ca9f4942SBharata B Rao 	page->zone_device_data = NULL;
672ca9f4942SBharata B Rao 	kvmppc_uvmem_pfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
673ca9f4942SBharata B Rao 	kfree(pvt);
674ca9f4942SBharata B Rao }
675ca9f4942SBharata B Rao 
676ca9f4942SBharata B Rao static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
677ca9f4942SBharata B Rao 	.page_free = kvmppc_uvmem_page_free,
678ca9f4942SBharata B Rao 	.migrate_to_ram	= kvmppc_uvmem_migrate_to_ram,
679ca9f4942SBharata B Rao };
680ca9f4942SBharata B Rao 
681ca9f4942SBharata B Rao /*
682ca9f4942SBharata B Rao  * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
683ca9f4942SBharata B Rao  */
684ca9f4942SBharata B Rao unsigned long
685ca9f4942SBharata B Rao kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
686ca9f4942SBharata B Rao 		      unsigned long flags, unsigned long page_shift)
687ca9f4942SBharata B Rao {
688ca9f4942SBharata B Rao 	unsigned long gfn = gpa >> page_shift;
689ca9f4942SBharata B Rao 	unsigned long start, end;
690ca9f4942SBharata B Rao 	struct vm_area_struct *vma;
691ca9f4942SBharata B Rao 	int srcu_idx;
692ca9f4942SBharata B Rao 	int ret;
693ca9f4942SBharata B Rao 
694ca9f4942SBharata B Rao 	if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
695ca9f4942SBharata B Rao 		return H_UNSUPPORTED;
696ca9f4942SBharata B Rao 
697ca9f4942SBharata B Rao 	if (page_shift != PAGE_SHIFT)
698ca9f4942SBharata B Rao 		return H_P3;
699ca9f4942SBharata B Rao 
700ca9f4942SBharata B Rao 	if (flags)
701ca9f4942SBharata B Rao 		return H_P2;
702ca9f4942SBharata B Rao 
703ca9f4942SBharata B Rao 	ret = H_PARAMETER;
704ca9f4942SBharata B Rao 	srcu_idx = srcu_read_lock(&kvm->srcu);
705ca9f4942SBharata B Rao 	down_read(&kvm->mm->mmap_sem);
706ca9f4942SBharata B Rao 	start = gfn_to_hva(kvm, gfn);
707ca9f4942SBharata B Rao 	if (kvm_is_error_hva(start))
708ca9f4942SBharata B Rao 		goto out;
709ca9f4942SBharata B Rao 
710ca9f4942SBharata B Rao 	end = start + (1UL << page_shift);
711ca9f4942SBharata B Rao 	vma = find_vma_intersection(kvm->mm, start, end);
712ca9f4942SBharata B Rao 	if (!vma || vma->vm_start > start || vma->vm_end < end)
713ca9f4942SBharata B Rao 		goto out;
714ca9f4942SBharata B Rao 
715ca9f4942SBharata B Rao 	if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
716ca9f4942SBharata B Rao 		ret = H_SUCCESS;
717ca9f4942SBharata B Rao out:
718ca9f4942SBharata B Rao 	up_read(&kvm->mm->mmap_sem);
719ca9f4942SBharata B Rao 	srcu_read_unlock(&kvm->srcu, srcu_idx);
720ca9f4942SBharata B Rao 	return ret;
721ca9f4942SBharata B Rao }
722ca9f4942SBharata B Rao 
723008e359cSBharata B Rao int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
724008e359cSBharata B Rao {
725008e359cSBharata B Rao 	unsigned long pfn;
726008e359cSBharata B Rao 	int ret = U_SUCCESS;
727008e359cSBharata B Rao 
728008e359cSBharata B Rao 	pfn = gfn_to_pfn(kvm, gfn);
729008e359cSBharata B Rao 	if (is_error_noslot_pfn(pfn))
730008e359cSBharata B Rao 		return -EFAULT;
731008e359cSBharata B Rao 
732008e359cSBharata B Rao 	mutex_lock(&kvm->arch.uvmem_lock);
733008e359cSBharata B Rao 	if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
734008e359cSBharata B Rao 		goto out;
735008e359cSBharata B Rao 
736008e359cSBharata B Rao 	ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
737008e359cSBharata B Rao 			 0, PAGE_SHIFT);
738008e359cSBharata B Rao out:
739008e359cSBharata B Rao 	kvm_release_pfn_clean(pfn);
740008e359cSBharata B Rao 	mutex_unlock(&kvm->arch.uvmem_lock);
741008e359cSBharata B Rao 	return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
742008e359cSBharata B Rao }
743008e359cSBharata B Rao 
744ca9f4942SBharata B Rao static u64 kvmppc_get_secmem_size(void)
745ca9f4942SBharata B Rao {
746ca9f4942SBharata B Rao 	struct device_node *np;
747ca9f4942SBharata B Rao 	int i, len;
748ca9f4942SBharata B Rao 	const __be32 *prop;
749ca9f4942SBharata B Rao 	u64 size = 0;
750ca9f4942SBharata B Rao 
751ca9f4942SBharata B Rao 	np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
752ca9f4942SBharata B Rao 	if (!np)
753ca9f4942SBharata B Rao 		goto out;
754ca9f4942SBharata B Rao 
755ca9f4942SBharata B Rao 	prop = of_get_property(np, "secure-memory-ranges", &len);
756ca9f4942SBharata B Rao 	if (!prop)
757ca9f4942SBharata B Rao 		goto out_put;
758ca9f4942SBharata B Rao 
759ca9f4942SBharata B Rao 	for (i = 0; i < len / (sizeof(*prop) * 4); i++)
760ca9f4942SBharata B Rao 		size += of_read_number(prop + (i * 4) + 2, 2);
761ca9f4942SBharata B Rao 
762ca9f4942SBharata B Rao out_put:
763ca9f4942SBharata B Rao 	of_node_put(np);
764ca9f4942SBharata B Rao out:
765ca9f4942SBharata B Rao 	return size;
766ca9f4942SBharata B Rao }
767ca9f4942SBharata B Rao 
768ca9f4942SBharata B Rao int kvmppc_uvmem_init(void)
769ca9f4942SBharata B Rao {
770ca9f4942SBharata B Rao 	int ret = 0;
771ca9f4942SBharata B Rao 	unsigned long size;
772ca9f4942SBharata B Rao 	struct resource *res;
773ca9f4942SBharata B Rao 	void *addr;
774ca9f4942SBharata B Rao 	unsigned long pfn_last, pfn_first;
775ca9f4942SBharata B Rao 
776ca9f4942SBharata B Rao 	size = kvmppc_get_secmem_size();
777ca9f4942SBharata B Rao 	if (!size) {
778ca9f4942SBharata B Rao 		/*
779ca9f4942SBharata B Rao 		 * Don't fail the initialization of kvm-hv module if
780ca9f4942SBharata B Rao 		 * the platform doesn't export ibm,uv-firmware node.
781ca9f4942SBharata B Rao 		 * Let normal guests run on such PEF-disabled platform.
782ca9f4942SBharata B Rao 		 */
783ca9f4942SBharata B Rao 		pr_info("KVMPPC-UVMEM: No support for secure guests\n");
784ca9f4942SBharata B Rao 		goto out;
785ca9f4942SBharata B Rao 	}
786ca9f4942SBharata B Rao 
787ca9f4942SBharata B Rao 	res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
788ca9f4942SBharata B Rao 	if (IS_ERR(res)) {
789ca9f4942SBharata B Rao 		ret = PTR_ERR(res);
790ca9f4942SBharata B Rao 		goto out;
791ca9f4942SBharata B Rao 	}
792ca9f4942SBharata B Rao 
793ca9f4942SBharata B Rao 	kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
794ca9f4942SBharata B Rao 	kvmppc_uvmem_pgmap.res = *res;
795ca9f4942SBharata B Rao 	kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
796ca9f4942SBharata B Rao 	addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
797ca9f4942SBharata B Rao 	if (IS_ERR(addr)) {
798ca9f4942SBharata B Rao 		ret = PTR_ERR(addr);
799ca9f4942SBharata B Rao 		goto out_free_region;
800ca9f4942SBharata B Rao 	}
801ca9f4942SBharata B Rao 
802ca9f4942SBharata B Rao 	pfn_first = res->start >> PAGE_SHIFT;
803ca9f4942SBharata B Rao 	pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
804ca9f4942SBharata B Rao 	kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
805ca9f4942SBharata B Rao 				      sizeof(unsigned long), GFP_KERNEL);
806ca9f4942SBharata B Rao 	if (!kvmppc_uvmem_bitmap) {
807ca9f4942SBharata B Rao 		ret = -ENOMEM;
808ca9f4942SBharata B Rao 		goto out_unmap;
809ca9f4942SBharata B Rao 	}
810ca9f4942SBharata B Rao 
811ca9f4942SBharata B Rao 	pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
812ca9f4942SBharata B Rao 	return ret;
813ca9f4942SBharata B Rao out_unmap:
814ca9f4942SBharata B Rao 	memunmap_pages(&kvmppc_uvmem_pgmap);
815ca9f4942SBharata B Rao out_free_region:
816ca9f4942SBharata B Rao 	release_mem_region(res->start, size);
817ca9f4942SBharata B Rao out:
818ca9f4942SBharata B Rao 	return ret;
819ca9f4942SBharata B Rao }
820ca9f4942SBharata B Rao 
821ca9f4942SBharata B Rao void kvmppc_uvmem_free(void)
822ca9f4942SBharata B Rao {
8239bee484bSFabiano Rosas 	if (!kvmppc_uvmem_bitmap)
8249bee484bSFabiano Rosas 		return;
8259bee484bSFabiano Rosas 
826ca9f4942SBharata B Rao 	memunmap_pages(&kvmppc_uvmem_pgmap);
827ca9f4942SBharata B Rao 	release_mem_region(kvmppc_uvmem_pgmap.res.start,
828ca9f4942SBharata B Rao 			   resource_size(&kvmppc_uvmem_pgmap.res));
829ca9f4942SBharata B Rao 	kfree(kvmppc_uvmem_bitmap);
830ca9f4942SBharata B Rao }
831