1 /******************************************************************************
2 * grant_table.h
3 *
4 * Two sets of functionality:
5 * 1. Granting foreign access to our memory reservation.
6 * 2. Accessing others' memory reservations via grant references.
7 * (i.e., mechanisms for both sender and recipient of grant references)
8 *
9 * Copyright (c) 2004-2005, K A Fraser
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37 #ifndef __ASM_GNTTAB_H__
38 #define __ASM_GNTTAB_H__
39
40 #include <asm/page.h>
41
42 #include <xen/interface/xen.h>
43 #include <xen/interface/grant_table.h>
44
45 #include <asm/xen/hypervisor.h>
46
47 #include <xen/features.h>
48 #include <xen/page.h>
49 #include <linux/mm_types.h>
50 #include <linux/page-flags.h>
51 #include <linux/kernel.h>
52
53 /*
54 * Technically there's no reliably invalid grant reference or grant handle,
55 * so pick the value that is the most unlikely one to be observed valid.
56 */
57 #define INVALID_GRANT_REF ((grant_ref_t)-1)
58 #define INVALID_GRANT_HANDLE ((grant_handle_t)-1)
59
60 /* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
61 #define NR_GRANT_FRAMES 4
62
63 struct gnttab_free_callback {
64 struct gnttab_free_callback *next;
65 void (*fn)(void *);
66 void *arg;
67 u16 count;
68 };
69
70 struct gntab_unmap_queue_data;
71
72 typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
73
74 struct gntab_unmap_queue_data
75 {
76 struct delayed_work gnttab_work;
77 void *data;
78 gnttab_unmap_refs_done done;
79 struct gnttab_unmap_grant_ref *unmap_ops;
80 struct gnttab_unmap_grant_ref *kunmap_ops;
81 struct page **pages;
82 unsigned int count;
83 unsigned int age;
84 };
85
86 int gnttab_init(void);
87 #ifdef CONFIG_HIBERNATE_CALLBACKS
88 int gnttab_suspend(void);
89 int gnttab_resume(void);
90 #else
gnttab_suspend(void)91 static inline int gnttab_suspend(void)
92 {
93 return 0;
94 }
95
gnttab_resume(void)96 static inline int gnttab_resume(void)
97 {
98 return 0;
99 }
100 #endif
101
102 int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
103 int readonly);
104
105 /*
106 * End access through the given grant reference, iff the grant entry is no
107 * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
108 * use.
109 */
110 int gnttab_end_foreign_access_ref(grant_ref_t ref);
111
112 /*
113 * Eventually end access through the given grant reference, and once that
114 * access has been ended, free the given page too. Access will be ended
115 * immediately iff the grant entry is not in use, otherwise it will happen
116 * some time later. page may be NULL, in which case no freeing will occur.
117 * Note that the granted page might still be accessed (read or write) by the
118 * other side after gnttab_end_foreign_access() returns, so even if page was
119 * specified as NULL it is not allowed to just reuse the page for other
120 * purposes immediately. gnttab_end_foreign_access() will take an additional
121 * reference to the granted page in this case, which is dropped only after
122 * the grant is no longer in use.
123 * This requires that multi page allocations for areas subject to
124 * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
125 * via free_pages_exact()) in order to avoid high order pages.
126 */
127 void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
128
129 /*
130 * End access through the given grant reference, iff the grant entry is
131 * no longer in use. In case of success ending foreign access, the
132 * grant reference is deallocated.
133 * Return 1 if the grant entry was freed, 0 if it is still in use.
134 */
135 int gnttab_try_end_foreign_access(grant_ref_t ref);
136
137 /*
138 * operations on reserved batches of grant references
139 */
140 int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
141
142 int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first);
143
144 void gnttab_free_grant_reference(grant_ref_t ref);
145
146 void gnttab_free_grant_references(grant_ref_t head);
147
148 void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count);
149
150 int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
151
152 int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
153
154 void gnttab_release_grant_reference(grant_ref_t *private_head,
155 grant_ref_t release);
156
157 void gnttab_request_free_callback(struct gnttab_free_callback *callback,
158 void (*fn)(void *), void *arg, u16 count);
159 void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
160
161 void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
162 unsigned long frame, int readonly);
163
164 /* Give access to the first 4K of the page */
gnttab_page_grant_foreign_access_ref_one(grant_ref_t ref,domid_t domid,struct page * page,int readonly)165 static inline void gnttab_page_grant_foreign_access_ref_one(
166 grant_ref_t ref, domid_t domid,
167 struct page *page, int readonly)
168 {
169 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
170 readonly);
171 }
172
173 static inline void
gnttab_set_map_op(struct gnttab_map_grant_ref * map,phys_addr_t addr,uint32_t flags,grant_ref_t ref,domid_t domid)174 gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
175 uint32_t flags, grant_ref_t ref, domid_t domid)
176 {
177 if (flags & GNTMAP_contains_pte)
178 map->host_addr = addr;
179 else if (!xen_pv_domain())
180 map->host_addr = __pa(addr);
181 else
182 map->host_addr = addr;
183
184 map->flags = flags;
185 map->ref = ref;
186 map->dom = domid;
187 map->status = 1; /* arbitrary positive value */
188 }
189
190 static inline void
gnttab_set_unmap_op(struct gnttab_unmap_grant_ref * unmap,phys_addr_t addr,uint32_t flags,grant_handle_t handle)191 gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
192 uint32_t flags, grant_handle_t handle)
193 {
194 if (flags & GNTMAP_contains_pte)
195 unmap->host_addr = addr;
196 else if (!xen_pv_domain())
197 unmap->host_addr = __pa(addr);
198 else
199 unmap->host_addr = addr;
200
201 unmap->handle = handle;
202 unmap->dev_bus_addr = 0;
203 }
204
205 int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
206 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
207 unsigned long max_nr_gframes,
208 void **__shared);
209 int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
210 unsigned long max_nr_gframes,
211 grant_status_t **__shared);
212 void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
213
214 struct grant_frames {
215 xen_pfn_t *pfn;
216 unsigned int count;
217 void *vaddr;
218 };
219 extern struct grant_frames xen_auto_xlat_grant_frames;
220 unsigned int gnttab_max_grant_frames(void);
221 int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
222 void gnttab_free_auto_xlat_frames(void);
223
224 #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
225
226 int gnttab_alloc_pages(int nr_pages, struct page **pages);
227 void gnttab_free_pages(int nr_pages, struct page **pages);
228
229 struct gnttab_page_cache {
230 spinlock_t lock;
231 #ifdef CONFIG_XEN_UNPOPULATED_ALLOC
232 struct page *pages;
233 #else
234 struct list_head pages;
235 #endif
236 unsigned int num_pages;
237 };
238
239 void gnttab_page_cache_init(struct gnttab_page_cache *cache);
240 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
241 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
242 unsigned int num);
243 void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
244 unsigned int num);
245
246 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
247 struct gnttab_dma_alloc_args {
248 /* Device for which DMA memory will be/was allocated. */
249 struct device *dev;
250 /* If set then DMA buffer is coherent and write-combine otherwise. */
251 bool coherent;
252
253 int nr_pages;
254 struct page **pages;
255 xen_pfn_t *frames;
256 void *vaddr;
257 dma_addr_t dev_bus_addr;
258 };
259
260 int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
261 int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
262 #endif
263
264 int gnttab_pages_set_private(int nr_pages, struct page **pages);
265 void gnttab_pages_clear_private(int nr_pages, struct page **pages);
266
267 int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
268 struct gnttab_map_grant_ref *kmap_ops,
269 struct page **pages, unsigned int count);
270 int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
271 struct gnttab_unmap_grant_ref *kunmap_ops,
272 struct page **pages, unsigned int count);
273 void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
274 int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
275
276
277 /* Perform a batch of grant map/copy operations. Retry every batch slot
278 * for which the hypervisor returns GNTST_eagain. This is typically due
279 * to paged out target frames.
280 *
281 * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
282 *
283 * Return value in each iand every status field of the batch guaranteed
284 * to not be GNTST_eagain.
285 */
286 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
287 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
288
289
290 struct xen_page_foreign {
291 domid_t domid;
292 grant_ref_t gref;
293 };
294
xen_page_foreign(struct page * page)295 static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
296 {
297 if (!PageForeign(page))
298 return NULL;
299 #if BITS_PER_LONG < 64
300 return (struct xen_page_foreign *)page->private;
301 #else
302 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
303 return (struct xen_page_foreign *)&page->private;
304 #endif
305 }
306
307 /* Split Linux page in chunk of the size of the grant and call fn
308 *
309 * Parameters of fn:
310 * gfn: guest frame number
311 * offset: offset in the grant
312 * len: length of the data in the grant.
313 * data: internal information
314 */
315 typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
316 unsigned int len, void *data);
317
318 void gnttab_foreach_grant_in_range(struct page *page,
319 unsigned int offset,
320 unsigned int len,
321 xen_grant_fn_t fn,
322 void *data);
323
324 /* Helper to get to call fn only on the first "grant chunk" */
gnttab_for_one_grant(struct page * page,unsigned int offset,unsigned len,xen_grant_fn_t fn,void * data)325 static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
326 unsigned len, xen_grant_fn_t fn,
327 void *data)
328 {
329 /* The first request is limited to the size of one grant */
330 len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
331 len);
332
333 gnttab_foreach_grant_in_range(page, offset, len, fn, data);
334 }
335
336 /* Get @nr_grefs grants from an array of page and call fn for each grant */
337 void gnttab_foreach_grant(struct page **pages,
338 unsigned int nr_grefs,
339 xen_grant_fn_t fn,
340 void *data);
341
342 /* Get the number of grant in a specified region
343 *
344 * start: Offset from the beginning of the first page
345 * len: total length of data (can cross multiple page)
346 */
gnttab_count_grant(unsigned int start,unsigned int len)347 static inline unsigned int gnttab_count_grant(unsigned int start,
348 unsigned int len)
349 {
350 return XEN_PFN_UP(xen_offset_in_page(start) + len);
351 }
352
353 #endif /* __ASM_GNTTAB_H__ */
354