Lines Matching +full:page +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0-only
3 * MMU-based software IOTLB.
5 * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
30 return -ENOMEM; in vduse_iotlb_add_range()
32 map_file->file = get_file(file); in vduse_iotlb_add_range()
33 map_file->offset = offset; in vduse_iotlb_add_range()
35 ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last, in vduse_iotlb_add_range()
38 fput(map_file->file); in vduse_iotlb_add_range()
51 while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) { in vduse_iotlb_del_range()
52 map_file = (struct vdpa_map_file *)map->opaque; in vduse_iotlb_del_range()
53 fput(map_file->file); in vduse_iotlb_del_range()
55 vhost_iotlb_map_free(domain->iotlb, map); in vduse_iotlb_del_range()
67 spin_lock(&domain->iotlb_lock); in vduse_domain_set_map()
72 map_file = (struct vdpa_map_file *)map->opaque; in vduse_domain_set_map()
73 ret = vduse_iotlb_add_range(domain, map->start, map->last, in vduse_domain_set_map()
74 map->addr, map->perm, in vduse_domain_set_map()
75 map_file->file, in vduse_domain_set_map()
76 map_file->offset); in vduse_domain_set_map()
80 spin_unlock(&domain->iotlb_lock); in vduse_domain_set_map()
85 spin_unlock(&domain->iotlb_lock); in vduse_domain_set_map()
95 spin_lock(&domain->iotlb_lock); in vduse_domain_clear_map()
98 vduse_iotlb_del_range(domain, map->start, map->last); in vduse_domain_clear_map()
100 spin_unlock(&domain->iotlb_lock); in vduse_domain_clear_map()
104 u64 iova, u64 size, u64 paddr) in vduse_domain_map_bounce_page() argument
107 u64 last = iova + size - 1; in vduse_domain_map_bounce_page()
110 map = &domain->bounce_maps[iova >> PAGE_SHIFT]; in vduse_domain_map_bounce_page()
111 if (!map->bounce_page) { in vduse_domain_map_bounce_page()
112 map->bounce_page = alloc_page(GFP_ATOMIC); in vduse_domain_map_bounce_page()
113 if (!map->bounce_page) in vduse_domain_map_bounce_page()
114 return -ENOMEM; in vduse_domain_map_bounce_page()
116 map->orig_phys = paddr; in vduse_domain_map_bounce_page()
124 u64 iova, u64 size) in vduse_domain_unmap_bounce_page() argument
127 u64 last = iova + size - 1; in vduse_domain_unmap_bounce_page()
130 map = &domain->bounce_maps[iova >> PAGE_SHIFT]; in vduse_domain_unmap_bounce_page()
131 map->orig_phys = INVALID_PHYS_ADDR; in vduse_domain_unmap_bounce_page()
136 static void do_bounce(phys_addr_t orig, void *addr, size_t size, in do_bounce() argument
141 struct page *page; in do_bounce() local
144 while (size) { in do_bounce()
145 sz = min_t(size_t, PAGE_SIZE - offset, size); in do_bounce()
147 page = pfn_to_page(pfn); in do_bounce()
149 memcpy_from_page(addr, page, offset, sz); in do_bounce()
151 memcpy_to_page(page, offset, addr, sz); in do_bounce()
153 size -= sz; in do_bounce()
161 dma_addr_t iova, size_t size, in vduse_domain_bounce() argument
169 if (iova >= domain->bounce_size) in vduse_domain_bounce()
172 while (size) { in vduse_domain_bounce()
173 map = &domain->bounce_maps[iova >> PAGE_SHIFT]; in vduse_domain_bounce()
175 sz = min_t(size_t, PAGE_SIZE - offset, size); in vduse_domain_bounce()
177 if (WARN_ON(!map->bounce_page || in vduse_domain_bounce()
178 map->orig_phys == INVALID_PHYS_ADDR)) in vduse_domain_bounce()
181 addr = kmap_local_page(map->bounce_page); in vduse_domain_bounce()
182 do_bounce(map->orig_phys + offset, addr + offset, sz, dir); in vduse_domain_bounce()
184 size -= sz; in vduse_domain_bounce()
189 static struct page *
193 u64 last = start + PAGE_SIZE - 1; in vduse_domain_get_coherent_page()
195 struct page *page = NULL; in vduse_domain_get_coherent_page() local
197 spin_lock(&domain->iotlb_lock); in vduse_domain_get_coherent_page()
198 map = vhost_iotlb_itree_first(domain->iotlb, start, last); in vduse_domain_get_coherent_page()
202 page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT); in vduse_domain_get_coherent_page()
203 get_page(page); in vduse_domain_get_coherent_page()
205 spin_unlock(&domain->iotlb_lock); in vduse_domain_get_coherent_page()
207 return page; in vduse_domain_get_coherent_page()
210 static struct page *
214 struct page *page = NULL; in vduse_domain_get_bounce_page() local
216 read_lock(&domain->bounce_lock); in vduse_domain_get_bounce_page()
217 map = &domain->bounce_maps[iova >> PAGE_SHIFT]; in vduse_domain_get_bounce_page()
218 if (domain->user_bounce_pages || !map->bounce_page) in vduse_domain_get_bounce_page()
221 page = map->bounce_page; in vduse_domain_get_bounce_page()
222 get_page(page); in vduse_domain_get_bounce_page()
224 read_unlock(&domain->bounce_lock); in vduse_domain_get_bounce_page()
226 return page; in vduse_domain_get_bounce_page()
235 bounce_pfns = domain->bounce_size >> PAGE_SHIFT; in vduse_domain_free_kernel_bounce_pages()
238 map = &domain->bounce_maps[pfn]; in vduse_domain_free_kernel_bounce_pages()
239 if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR)) in vduse_domain_free_kernel_bounce_pages()
242 if (!map->bounce_page) in vduse_domain_free_kernel_bounce_pages()
245 __free_page(map->bounce_page); in vduse_domain_free_kernel_bounce_pages()
246 map->bounce_page = NULL; in vduse_domain_free_kernel_bounce_pages()
251 struct page **pages, int count) in vduse_domain_add_user_bounce_pages()
257 if (count != (domain->bounce_size >> PAGE_SHIFT)) in vduse_domain_add_user_bounce_pages()
258 return -EINVAL; in vduse_domain_add_user_bounce_pages()
260 write_lock(&domain->bounce_lock); in vduse_domain_add_user_bounce_pages()
261 ret = -EEXIST; in vduse_domain_add_user_bounce_pages()
262 if (domain->user_bounce_pages) in vduse_domain_add_user_bounce_pages()
266 map = &domain->bounce_maps[i]; in vduse_domain_add_user_bounce_pages()
267 if (map->bounce_page) { in vduse_domain_add_user_bounce_pages()
268 /* Copy kernel page to user page if it's in use */ in vduse_domain_add_user_bounce_pages()
269 if (map->orig_phys != INVALID_PHYS_ADDR) in vduse_domain_add_user_bounce_pages()
271 page_address(map->bounce_page), in vduse_domain_add_user_bounce_pages()
273 __free_page(map->bounce_page); in vduse_domain_add_user_bounce_pages()
275 map->bounce_page = pages[i]; in vduse_domain_add_user_bounce_pages()
278 domain->user_bounce_pages = true; in vduse_domain_add_user_bounce_pages()
281 write_unlock(&domain->bounce_lock); in vduse_domain_add_user_bounce_pages()
291 write_lock(&domain->bounce_lock); in vduse_domain_remove_user_bounce_pages()
292 if (!domain->user_bounce_pages) in vduse_domain_remove_user_bounce_pages()
295 count = domain->bounce_size >> PAGE_SHIFT; in vduse_domain_remove_user_bounce_pages()
297 struct page *page = NULL; in vduse_domain_remove_user_bounce_pages() local
299 map = &domain->bounce_maps[i]; in vduse_domain_remove_user_bounce_pages()
300 if (WARN_ON(!map->bounce_page)) in vduse_domain_remove_user_bounce_pages()
303 /* Copy user page to kernel page if it's in use */ in vduse_domain_remove_user_bounce_pages()
304 if (map->orig_phys != INVALID_PHYS_ADDR) { in vduse_domain_remove_user_bounce_pages()
305 page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL); in vduse_domain_remove_user_bounce_pages()
306 memcpy_from_page(page_address(page), in vduse_domain_remove_user_bounce_pages()
307 map->bounce_page, 0, PAGE_SIZE); in vduse_domain_remove_user_bounce_pages()
309 put_page(map->bounce_page); in vduse_domain_remove_user_bounce_pages()
310 map->bounce_page = page; in vduse_domain_remove_user_bounce_pages()
312 domain->user_bounce_pages = false; in vduse_domain_remove_user_bounce_pages()
314 write_unlock(&domain->bounce_lock); in vduse_domain_remove_user_bounce_pages()
319 if (!domain->bounce_map) in vduse_domain_reset_bounce_map()
322 spin_lock(&domain->iotlb_lock); in vduse_domain_reset_bounce_map()
323 if (!domain->bounce_map) in vduse_domain_reset_bounce_map()
326 vduse_iotlb_del_range(domain, 0, domain->bounce_size - 1); in vduse_domain_reset_bounce_map()
327 domain->bounce_map = 0; in vduse_domain_reset_bounce_map()
329 spin_unlock(&domain->iotlb_lock); in vduse_domain_reset_bounce_map()
336 if (domain->bounce_map) in vduse_domain_init_bounce_map()
339 spin_lock(&domain->iotlb_lock); in vduse_domain_init_bounce_map()
340 if (domain->bounce_map) in vduse_domain_init_bounce_map()
343 ret = vduse_iotlb_add_range(domain, 0, domain->bounce_size - 1, in vduse_domain_init_bounce_map()
344 0, VHOST_MAP_RW, domain->file, 0); in vduse_domain_init_bounce_map()
348 domain->bounce_map = 1; in vduse_domain_init_bounce_map()
350 spin_unlock(&domain->iotlb_lock); in vduse_domain_init_bounce_map()
356 unsigned long size, unsigned long limit) in vduse_domain_alloc_iova() argument
359 unsigned long iova_len = iova_align(iovad, size) >> shift; in vduse_domain_alloc_iova()
368 dma_addr_t iova, size_t size) in vduse_domain_free_iova() argument
371 unsigned long iova_len = iova_align(iovad, size) >> shift; in vduse_domain_free_iova()
377 struct page *page, unsigned long offset, in vduse_domain_map_page() argument
378 size_t size, enum dma_data_direction dir, in vduse_domain_map_page() argument
381 struct iova_domain *iovad = &domain->stream_iovad; in vduse_domain_map_page()
382 unsigned long limit = domain->bounce_size - 1; in vduse_domain_map_page()
383 phys_addr_t pa = page_to_phys(page) + offset; in vduse_domain_map_page()
384 dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit); in vduse_domain_map_page()
392 read_lock(&domain->bounce_lock); in vduse_domain_map_page()
393 if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa)) in vduse_domain_map_page()
397 vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE); in vduse_domain_map_page()
399 read_unlock(&domain->bounce_lock); in vduse_domain_map_page()
403 read_unlock(&domain->bounce_lock); in vduse_domain_map_page()
405 vduse_domain_free_iova(iovad, iova, size); in vduse_domain_map_page()
410 dma_addr_t dma_addr, size_t size, in vduse_domain_unmap_page() argument
413 struct iova_domain *iovad = &domain->stream_iovad; in vduse_domain_unmap_page()
415 read_lock(&domain->bounce_lock); in vduse_domain_unmap_page()
417 vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE); in vduse_domain_unmap_page()
419 vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size); in vduse_domain_unmap_page()
420 read_unlock(&domain->bounce_lock); in vduse_domain_unmap_page()
421 vduse_domain_free_iova(iovad, dma_addr, size); in vduse_domain_unmap_page()
425 size_t size, dma_addr_t *dma_addr, in vduse_domain_alloc_coherent() argument
428 struct iova_domain *iovad = &domain->consistent_iovad; in vduse_domain_alloc_coherent()
429 unsigned long limit = domain->iova_limit; in vduse_domain_alloc_coherent()
430 dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit); in vduse_domain_alloc_coherent()
431 void *orig = alloc_pages_exact(size, flag); in vduse_domain_alloc_coherent()
436 spin_lock(&domain->iotlb_lock); in vduse_domain_alloc_coherent()
437 if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1, in vduse_domain_alloc_coherent()
439 domain->file, (u64)iova)) { in vduse_domain_alloc_coherent()
440 spin_unlock(&domain->iotlb_lock); in vduse_domain_alloc_coherent()
443 spin_unlock(&domain->iotlb_lock); in vduse_domain_alloc_coherent()
451 free_pages_exact(orig, size); in vduse_domain_alloc_coherent()
453 vduse_domain_free_iova(iovad, iova, size); in vduse_domain_alloc_coherent()
458 void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size, in vduse_domain_free_coherent() argument
462 struct iova_domain *iovad = &domain->consistent_iovad; in vduse_domain_free_coherent()
467 spin_lock(&domain->iotlb_lock); in vduse_domain_free_coherent()
468 map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr, in vduse_domain_free_coherent()
469 (u64)dma_addr + size - 1); in vduse_domain_free_coherent()
471 spin_unlock(&domain->iotlb_lock); in vduse_domain_free_coherent()
474 map_file = (struct vdpa_map_file *)map->opaque; in vduse_domain_free_coherent()
475 fput(map_file->file); in vduse_domain_free_coherent()
477 pa = map->addr; in vduse_domain_free_coherent()
478 vhost_iotlb_map_free(domain->iotlb, map); in vduse_domain_free_coherent()
479 spin_unlock(&domain->iotlb_lock); in vduse_domain_free_coherent()
481 vduse_domain_free_iova(iovad, dma_addr, size); in vduse_domain_free_coherent()
482 free_pages_exact(phys_to_virt(pa), size); in vduse_domain_free_coherent()
487 struct vduse_iova_domain *domain = vmf->vma->vm_private_data; in vduse_domain_mmap_fault()
488 unsigned long iova = vmf->pgoff << PAGE_SHIFT; in vduse_domain_mmap_fault()
489 struct page *page; in vduse_domain_mmap_fault() local
494 if (iova < domain->bounce_size) in vduse_domain_mmap_fault()
495 page = vduse_domain_get_bounce_page(domain, iova); in vduse_domain_mmap_fault()
497 page = vduse_domain_get_coherent_page(domain, iova); in vduse_domain_mmap_fault()
499 if (!page) in vduse_domain_mmap_fault()
502 vmf->page = page; in vduse_domain_mmap_fault()
513 struct vduse_iova_domain *domain = file->private_data; in vduse_domain_mmap()
516 vma->vm_private_data = domain; in vduse_domain_mmap()
517 vma->vm_ops = &vduse_domain_mmap_ops; in vduse_domain_mmap()
524 struct vduse_iova_domain *domain = file->private_data; in vduse_domain_release()
526 spin_lock(&domain->iotlb_lock); in vduse_domain_release()
530 spin_unlock(&domain->iotlb_lock); in vduse_domain_release()
531 put_iova_domain(&domain->stream_iovad); in vduse_domain_release()
532 put_iova_domain(&domain->consistent_iovad); in vduse_domain_release()
533 vhost_iotlb_free(domain->iotlb); in vduse_domain_release()
534 vfree(domain->bounce_maps); in vduse_domain_release()
548 fput(domain->file); in vduse_domain_destroy()
568 domain->iotlb = vhost_iotlb_alloc(0, 0); in vduse_domain_create()
569 if (!domain->iotlb) in vduse_domain_create()
572 domain->iova_limit = iova_limit; in vduse_domain_create()
573 domain->bounce_size = PAGE_ALIGN(bounce_size); in vduse_domain_create()
574 domain->bounce_maps = vzalloc(bounce_pfns * in vduse_domain_create()
576 if (!domain->bounce_maps) in vduse_domain_create()
580 map = &domain->bounce_maps[pfn]; in vduse_domain_create()
581 map->orig_phys = INVALID_PHYS_ADDR; in vduse_domain_create()
583 file = anon_inode_getfile("[vduse-domain]", &vduse_domain_fops, in vduse_domain_create()
588 domain->file = file; in vduse_domain_create()
589 rwlock_init(&domain->bounce_lock); in vduse_domain_create()
590 spin_lock_init(&domain->iotlb_lock); in vduse_domain_create()
591 init_iova_domain(&domain->stream_iovad, in vduse_domain_create()
593 ret = iova_domain_init_rcaches(&domain->stream_iovad); in vduse_domain_create()
596 init_iova_domain(&domain->consistent_iovad, in vduse_domain_create()
598 ret = iova_domain_init_rcaches(&domain->consistent_iovad); in vduse_domain_create()
604 put_iova_domain(&domain->stream_iovad); in vduse_domain_create()
608 vfree(domain->bounce_maps); in vduse_domain_create()
610 vhost_iotlb_free(domain->iotlb); in vduse_domain_create()