Lines Matching +full:use +full:- +full:guard +full:- +full:pages

1 // SPDX-License-Identifier: GPL-2.0
17 static void *io_mem_alloc_compound(struct page **pages, int nr_pages, in io_mem_alloc_compound() argument
25 return ERR_PTR(-ENOMEM); in io_mem_alloc_compound()
31 return ERR_PTR(-ENOMEM); in io_mem_alloc_compound()
34 pages[i] = page + i; in io_mem_alloc_compound()
42 struct page **pages; in io_pin_pages() local
46 return ERR_PTR(-EOVERFLOW); in io_pin_pages()
47 if (check_add_overflow(end, PAGE_SIZE - 1, &end)) in io_pin_pages()
48 return ERR_PTR(-EOVERFLOW); in io_pin_pages()
52 nr_pages = end - start; in io_pin_pages()
54 return ERR_PTR(-EINVAL); in io_pin_pages()
56 return ERR_PTR(-EOVERFLOW); in io_pin_pages()
58 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in io_pin_pages()
59 if (!pages) in io_pin_pages()
60 return ERR_PTR(-ENOMEM); in io_pin_pages()
63 pages); in io_pin_pages()
64 /* success, mapped all pages */ in io_pin_pages()
67 return pages; in io_pin_pages()
72 /* if we did partial map, release any pages we did get */ in io_pin_pages()
74 unpin_user_pages(pages, ret); in io_pin_pages()
75 ret = -EFAULT; in io_pin_pages()
77 kvfree(pages); in io_pin_pages()
92 if (mr->pages) { in io_free_region()
93 long nr_refs = mr->nr_pages; in io_free_region()
95 if (mr->flags & IO_REGION_F_SINGLE_REF) in io_free_region()
98 if (mr->flags & IO_REGION_F_USER_PROVIDED) in io_free_region()
99 unpin_user_pages(mr->pages, nr_refs); in io_free_region()
101 release_pages(mr->pages, nr_refs); in io_free_region()
103 kvfree(mr->pages); in io_free_region()
105 if ((mr->flags & IO_REGION_F_VMAP) && mr->ptr) in io_free_region()
106 vunmap(mr->ptr); in io_free_region()
107 if (mr->nr_pages && ctx->user) in io_free_region()
108 __io_unaccount_mem(ctx->user, mr->nr_pages); in io_free_region()
118 if (io_check_coalesce_buffer(mr->pages, mr->nr_pages, &ifd)) { in io_region_init_ptr()
119 if (ifd.nr_folios == 1 && !PageHighMem(mr->pages[0])) { in io_region_init_ptr()
120 mr->ptr = page_address(mr->pages[0]); in io_region_init_ptr()
124 ptr = vmap(mr->pages, mr->nr_pages, VM_MAP, PAGE_KERNEL); in io_region_init_ptr()
126 return -ENOMEM; in io_region_init_ptr()
128 mr->ptr = ptr; in io_region_init_ptr()
129 mr->flags |= IO_REGION_F_VMAP; in io_region_init_ptr()
137 unsigned long size = mr->nr_pages << PAGE_SHIFT; in io_region_pin_pages()
138 struct page **pages; in io_region_pin_pages() local
141 pages = io_pin_pages(reg->user_addr, size, &nr_pages); in io_region_pin_pages()
142 if (IS_ERR(pages)) in io_region_pin_pages()
143 return PTR_ERR(pages); in io_region_pin_pages()
144 if (WARN_ON_ONCE(nr_pages != mr->nr_pages)) in io_region_pin_pages()
145 return -EFAULT; in io_region_pin_pages()
147 mr->pages = pages; in io_region_pin_pages()
148 mr->flags |= IO_REGION_F_USER_PROVIDED; in io_region_pin_pages()
158 unsigned long size = mr->nr_pages << PAGE_SHIFT; in io_region_allocate_pages()
160 struct page **pages; in io_region_allocate_pages() local
163 pages = kvmalloc_array(mr->nr_pages, sizeof(*pages), gfp); in io_region_allocate_pages()
164 if (!pages) in io_region_allocate_pages()
165 return -ENOMEM; in io_region_allocate_pages()
167 p = io_mem_alloc_compound(pages, mr->nr_pages, size, gfp); in io_region_allocate_pages()
169 mr->flags |= IO_REGION_F_SINGLE_REF; in io_region_allocate_pages()
174 mr->nr_pages, pages); in io_region_allocate_pages()
175 if (nr_allocated != mr->nr_pages) { in io_region_allocate_pages()
177 release_pages(pages, nr_allocated); in io_region_allocate_pages()
178 kvfree(pages); in io_region_allocate_pages()
179 return -ENOMEM; in io_region_allocate_pages()
182 reg->mmap_offset = mmap_offset; in io_region_allocate_pages()
183 mr->pages = pages; in io_region_allocate_pages()
194 if (WARN_ON_ONCE(mr->pages || mr->ptr || mr->nr_pages)) in io_create_region()
195 return -EFAULT; in io_create_region()
196 if (memchr_inv(&reg->__resv, 0, sizeof(reg->__resv))) in io_create_region()
197 return -EINVAL; in io_create_region()
198 if (reg->flags & ~IORING_MEM_REGION_TYPE_USER) in io_create_region()
199 return -EINVAL; in io_create_region()
201 if ((reg->flags & IORING_MEM_REGION_TYPE_USER) != !!reg->user_addr) in io_create_region()
202 return -EFAULT; in io_create_region()
203 if (!reg->size || reg->mmap_offset || reg->id) in io_create_region()
204 return -EINVAL; in io_create_region()
205 if ((reg->size >> PAGE_SHIFT) > INT_MAX) in io_create_region()
206 return -E2BIG; in io_create_region()
207 if ((reg->user_addr | reg->size) & ~PAGE_MASK) in io_create_region()
208 return -EINVAL; in io_create_region()
209 if (check_add_overflow(reg->user_addr, reg->size, &end)) in io_create_region()
210 return -EOVERFLOW; in io_create_region()
212 nr_pages = reg->size >> PAGE_SHIFT; in io_create_region()
213 if (ctx->user) { in io_create_region()
214 ret = __io_account_mem(ctx->user, nr_pages); in io_create_region()
218 mr->nr_pages = nr_pages; in io_create_region()
220 if (reg->flags & IORING_MEM_REGION_TYPE_USER) in io_create_region()
249 * Once published mmap can find it without holding only the ->mmap_lock in io_create_region_mmap_safe()
250 * and not ->uring_lock. in io_create_region_mmap_safe()
252 guard(mutex)(&ctx->mmap_lock); in io_create_region_mmap_safe()
266 return &ctx->ring_region; in io_mmap_get_region()
268 return &ctx->sq_region; in io_mmap_get_region()
273 return &ctx->param_region; in io_mmap_get_region()
275 return &ctx->zcrx_region; in io_mmap_get_region()
283 lockdep_assert_held(&ctx->mmap_lock); in io_region_validate_mmap()
286 return ERR_PTR(-EINVAL); in io_region_validate_mmap()
287 if (mr->flags & IO_REGION_F_USER_PROVIDED) in io_region_validate_mmap()
288 return ERR_PTR(-EINVAL); in io_region_validate_mmap()
296 struct io_ring_ctx *ctx = file->private_data; in io_uring_validate_mmap_request()
301 return ERR_PTR(-EINVAL); in io_uring_validate_mmap_request()
312 unsigned long nr_pages = min(mr->nr_pages, max_pages); in io_region_mmap()
315 return vm_insert_pages(vma, vma->vm_start, mr->pages, &nr_pages); in io_region_mmap()
320 struct io_ring_ctx *ctx = file->private_data; in io_uring_mmap()
321 size_t sz = vma->vm_end - vma->vm_start; in io_uring_mmap()
322 long offset = vma->vm_pgoff << PAGE_SHIFT; in io_uring_mmap()
327 guard(mutex)(&ctx->mmap_lock); in io_uring_mmap()
329 ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); in io_uring_mmap()
336 page_limit = (sz + PAGE_SIZE - 1) >> PAGE_SHIFT; in io_uring_mmap()
340 region = io_mmap_get_region(ctx, vma->vm_pgoff); in io_uring_mmap()
348 struct io_ring_ctx *ctx = filp->private_data; in io_uring_get_unmapped_area()
352 * Do not allow to map to user-provided address to avoid breaking the in io_uring_get_unmapped_area()
357 return -EINVAL; in io_uring_get_unmapped_area()
359 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area()
363 return -ENOMEM; in io_uring_get_unmapped_area()
369 * - use a NULL file pointer to reference physical memory, and in io_uring_get_unmapped_area()
370 * - use the kernel virtual address of the shared io_uring context in io_uring_get_unmapped_area()
371 * (instead of the userspace-provided address, which has to be 0UL in io_uring_get_unmapped_area()
373 * - use the same pgoff which the get_unmapped_area() uses to in io_uring_get_unmapped_area()
387 return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); in io_uring_get_unmapped_area()
394 return is_nommu_shared_mapping(vma->vm_flags) ? 0 : -EINVAL; in io_uring_mmap()
406 struct io_ring_ctx *ctx = file->private_data; in io_uring_get_unmapped_area()
409 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area()