Lines Matching +full:use +full:- +full:guard +full:- +full:pages
1 // SPDX-License-Identifier: GPL-2.0-only
16 * For example on x86-64 the values could be:
19 * For user space all pointers within the arena are normal 8-byte addresses.
22 * (u32)7f7d26200000 -> 26200000
28 * mov eax, eax // eax has lower 32-bit of user pointer
34 * User space can fault-in any address which will insert the page
38 * The later fault-in from user space will populate that page into user vma.
41 /* number of bytes addressable by LDX/STX insn with 16-bit 'off' field */
57 return arena ? (u64) (long) arena->kern_vm->addr + GUARD_SZ / 2 : 0; in bpf_arena_get_kern_vm_start()
62 return arena ? arena->user_vm_start : 0; in bpf_arena_get_user_vm_start()
67 return -EOPNOTSUPP; in arena_map_peek_elem()
72 return -EOPNOTSUPP; in arena_map_push_elem()
77 return -EOPNOTSUPP; in arena_map_pop_elem()
82 return -EOPNOTSUPP; in arena_map_delete_elem()
87 return -EOPNOTSUPP; in arena_map_get_next_key()
92 return (u32)(uaddr - (u32)arena->user_vm_start) >> PAGE_SHIFT; in compute_pgoff()
101 int err = -ENOMEM; in arena_map_alloc()
104 return ERR_PTR(-EOPNOTSUPP); in arena_map_alloc()
106 if (attr->key_size || attr->value_size || attr->max_entries == 0 || in arena_map_alloc()
108 !(attr->map_flags & BPF_F_MMAPABLE) || in arena_map_alloc()
110 (attr->map_flags & ~(BPF_F_SEGV_ON_FAULT | BPF_F_MMAPABLE | BPF_F_NO_USER_CONV))) in arena_map_alloc()
111 return ERR_PTR(-EINVAL); in arena_map_alloc()
113 if (attr->map_extra & ~PAGE_MASK) in arena_map_alloc()
114 /* If non-zero the map_extra is an expected user VMA start address */ in arena_map_alloc()
115 return ERR_PTR(-EINVAL); in arena_map_alloc()
117 vm_range = (u64)attr->max_entries * PAGE_SIZE; in arena_map_alloc()
119 return ERR_PTR(-E2BIG); in arena_map_alloc()
121 if ((attr->map_extra >> 32) != ((attr->map_extra + vm_range - 1) >> 32)) in arena_map_alloc()
122 /* user vma must not cross 32-bit boundary */ in arena_map_alloc()
123 return ERR_PTR(-ERANGE); in arena_map_alloc()
127 return ERR_PTR(-ENOMEM); in arena_map_alloc()
133 arena->kern_vm = kern_vm; in arena_map_alloc()
134 arena->user_vm_start = attr->map_extra; in arena_map_alloc()
135 if (arena->user_vm_start) in arena_map_alloc()
136 arena->user_vm_end = arena->user_vm_start + vm_range; in arena_map_alloc()
138 INIT_LIST_HEAD(&arena->vma_list); in arena_map_alloc()
139 bpf_map_init_from_attr(&arena->map, attr); in arena_map_alloc()
140 range_tree_init(&arena->rt); in arena_map_alloc()
141 err = range_tree_set(&arena->rt, 0, attr->max_entries); in arena_map_alloc()
146 mutex_init(&arena->lock); in arena_map_alloc()
148 return &arena->map; in arena_map_alloc()
179 * Check that user vma-s are not around when bpf map is freed. in arena_map_free()
182 * which would clear arena->vma_list. in arena_map_free()
184 if (WARN_ON_ONCE(!list_empty(&arena->vma_list))) in arena_map_free()
191 * free those pages. in arena_map_free()
194 KERN_VM_SZ - GUARD_SZ, existing_page_cb, NULL); in arena_map_free()
195 free_vm_area(arena->kern_vm); in arena_map_free()
196 range_tree_destroy(&arena->rt); in arena_map_free()
202 return ERR_PTR(-EINVAL); in arena_map_lookup_elem()
208 return -EOPNOTSUPP; in arena_map_update_elem()
234 return -ENOMEM; in remember_vma()
235 refcount_set(&vml->mmap_count, 1); in remember_vma()
236 vma->vm_private_data = vml; in remember_vma()
237 vml->vma = vma; in remember_vma()
238 list_add(&vml->head, &arena->vma_list); in remember_vma()
244 struct vma_list *vml = vma->vm_private_data; in arena_vm_open()
246 refcount_inc(&vml->mmap_count); in arena_vm_open()
251 struct bpf_map *map = vma->vm_file->private_data; in arena_vm_close()
253 struct vma_list *vml = vma->vm_private_data; in arena_vm_close()
255 if (!refcount_dec_and_test(&vml->mmap_count)) in arena_vm_close()
257 guard(mutex)(&arena->lock); in arena_vm_close()
259 list_del(&vml->head); in arena_vm_close()
260 vma->vm_private_data = NULL; in arena_vm_close()
266 struct bpf_map *map = vmf->vma->vm_file->private_data; in arena_vm_fault()
273 kaddr = kbase + (u32)(vmf->address); in arena_vm_fault()
275 guard(mutex)(&arena->lock); in arena_vm_fault()
278 /* already have a page vmap-ed */ in arena_vm_fault()
281 if (arena->map.map_flags & BPF_F_SEGV_ON_FAULT) in arena_vm_fault()
285 ret = range_tree_clear(&arena->rt, vmf->pgoff, 1); in arena_vm_fault()
292 range_tree_set(&arena->rt, vmf->pgoff, 1); in arena_vm_fault()
296 ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page); in arena_vm_fault()
298 range_tree_set(&arena->rt, vmf->pgoff, 1); in arena_vm_fault()
304 vmf->page = page; in arena_vm_fault()
318 struct bpf_map *map = filp->private_data; in arena_get_unmapped_area()
323 return -EINVAL; in arena_get_unmapped_area()
325 return -E2BIG; in arena_get_unmapped_area()
328 if (arena->user_vm_start) { in arena_get_unmapped_area()
329 if (len > arena->user_vm_end - arena->user_vm_start) in arena_get_unmapped_area()
330 return -E2BIG; in arena_get_unmapped_area()
331 if (len != arena->user_vm_end - arena->user_vm_start) in arena_get_unmapped_area()
332 return -EINVAL; in arena_get_unmapped_area()
333 if (addr != arena->user_vm_start) in arena_get_unmapped_area()
334 return -EINVAL; in arena_get_unmapped_area()
337 ret = mm_get_unmapped_area(current->mm, filp, addr, len * 2, 0, flags); in arena_get_unmapped_area()
340 if ((ret >> 32) == ((ret + len - 1) >> 32)) in arena_get_unmapped_area()
342 if (WARN_ON_ONCE(arena->user_vm_start)) in arena_get_unmapped_area()
344 return -EFAULT; in arena_get_unmapped_area()
352 guard(mutex)(&arena->lock); in arena_map_mmap()
353 if (arena->user_vm_start && arena->user_vm_start != vma->vm_start) in arena_map_mmap()
360 * use the same addr later with mmap(addr, MAP_FIXED..); in arena_map_mmap()
362 return -EBUSY; in arena_map_mmap()
364 if (arena->user_vm_end && arena->user_vm_end != vma->vm_end) in arena_map_mmap()
365 /* all user processes must have the same size of mmap-ed region */ in arena_map_mmap()
366 return -EBUSY; in arena_map_mmap()
369 if (WARN_ON_ONCE(vma->vm_end - vma->vm_start > SZ_4G || vma->vm_pgoff)) in arena_map_mmap()
370 return -EFAULT; in arena_map_mmap()
373 return -ENOMEM; in arena_map_mmap()
375 arena->user_vm_start = vma->vm_start; in arena_map_mmap()
376 arena->user_vm_end = vma->vm_end; in arena_map_mmap()
383 vma->vm_ops = &arena_vm_ops; in arena_map_mmap()
391 if ((u64)off > arena->user_vm_end - arena->user_vm_start) in arena_map_direct_value_addr()
392 return -ERANGE; in arena_map_direct_value_addr()
393 *imm = (unsigned long)arena->user_vm_start; in arena_map_direct_value_addr()
423 * Allocate pages and vmap them into kernel vmalloc area.
424 * Later the pages will be mmaped into user space vma.
429 long page_cnt_max = (arena->user_vm_end - arena->user_vm_start) >> PAGE_SHIFT; in arena_alloc_pages()
431 struct page **pages; in arena_alloc_pages() local
443 if (pgoff > page_cnt_max - page_cnt) in arena_alloc_pages()
448 /* zeroing is needed, since alloc_pages_bulk() only fills in non-zero entries */ in arena_alloc_pages()
449 pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL); in arena_alloc_pages()
450 if (!pages) in arena_alloc_pages()
453 guard(mutex)(&arena->lock); in arena_alloc_pages()
456 ret = is_range_tree_set(&arena->rt, pgoff, page_cnt); in arena_alloc_pages()
459 ret = range_tree_clear(&arena->rt, pgoff, page_cnt); in arena_alloc_pages()
461 ret = pgoff = range_tree_find(&arena->rt, page_cnt); in arena_alloc_pages()
463 ret = range_tree_clear(&arena->rt, pgoff, page_cnt); in arena_alloc_pages()
468 ret = bpf_map_alloc_pages(&arena->map, node_id, page_cnt, pages); in arena_alloc_pages()
472 uaddr32 = (u32)(arena->user_vm_start + pgoff * PAGE_SIZE); in arena_alloc_pages()
473 /* Earlier checks made sure that uaddr32 + page_cnt * PAGE_SIZE - 1 in arena_alloc_pages()
474 * will not overflow 32-bit. Lower 32-bit need to represent in arena_alloc_pages()
476 * Map these pages at kern_vm_start base. in arena_alloc_pages()
477 * kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE - 1 can overflow in arena_alloc_pages()
478 * lower 32-bit and it's ok. in arena_alloc_pages()
480 ret = vm_area_map_pages(arena->kern_vm, kern_vm_start + uaddr32, in arena_alloc_pages()
481 kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages); in arena_alloc_pages()
484 __free_page(pages[i]); in arena_alloc_pages()
487 kvfree(pages); in arena_alloc_pages()
488 return clear_lo32(arena->user_vm_start) + uaddr32; in arena_alloc_pages()
490 range_tree_set(&arena->rt, pgoff, page_cnt); in arena_alloc_pages()
492 kvfree(pages); in arena_alloc_pages()
498 * unmap it from all user space vma-s,
505 list_for_each_entry(vml, &arena->vma_list, head) in zap_pages()
506 zap_page_range_single(vml->vma, uaddr, in zap_pages()
516 /* only aligned lower 32-bit are relevant */ in arena_free_pages()
519 full_uaddr = clear_lo32(arena->user_vm_start) + uaddr; in arena_free_pages()
520 uaddr_end = min(arena->user_vm_end, full_uaddr + (page_cnt << PAGE_SHIFT)); in arena_free_pages()
524 page_cnt = (uaddr_end - full_uaddr) >> PAGE_SHIFT; in arena_free_pages()
526 guard(mutex)(&arena->lock); in arena_free_pages()
530 range_tree_set(&arena->rt, pgoff, page_cnt); in arena_free_pages()
533 /* bulk zap if multiple pages being freed */ in arena_free_pages()
548 vm_area_unmap_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE); in arena_free_pages()
561 if (map->map_type != BPF_MAP_TYPE_ARENA || flags || !page_cnt) in bpf_arena_alloc_pages()
572 if (map->map_type != BPF_MAP_TYPE_ARENA || !page_cnt || !ptr__ign) in bpf_arena_free_pages()