Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
6 * Copyright (C) 2007-2017 Google, Inc.
52 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
57 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
63 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
64 return alloc->vm_start + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
65 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size()
71 struct rb_node **p = &alloc->free_buffers.rb_node; in binder_insert_free_buffer()
77 BUG_ON(!new_buffer->free); in binder_insert_free_buffer()
82 "%d: add free buffer, size %zd, at %pK\n", in binder_insert_free_buffer()
83 alloc->pid, new_buffer_size, new_buffer); in binder_insert_free_buffer()
88 BUG_ON(!buffer->free); in binder_insert_free_buffer()
93 p = &parent->rb_left; in binder_insert_free_buffer()
95 p = &parent->rb_right; in binder_insert_free_buffer()
97 rb_link_node(&new_buffer->rb_node, parent, p); in binder_insert_free_buffer()
98 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); in binder_insert_free_buffer()
104 struct rb_node **p = &alloc->allocated_buffers.rb_node; in binder_insert_allocated_buffer_locked()
108 BUG_ON(new_buffer->free); in binder_insert_allocated_buffer_locked()
113 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
115 if (new_buffer->user_data < buffer->user_data) in binder_insert_allocated_buffer_locked()
116 p = &parent->rb_left; in binder_insert_allocated_buffer_locked()
117 else if (new_buffer->user_data > buffer->user_data) in binder_insert_allocated_buffer_locked()
118 p = &parent->rb_right; in binder_insert_allocated_buffer_locked()
122 rb_link_node(&new_buffer->rb_node, parent, p); in binder_insert_allocated_buffer_locked()
123 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); in binder_insert_allocated_buffer_locked()
130 struct rb_node *n = alloc->allocated_buffers.rb_node; in binder_alloc_prepare_to_free_locked()
135 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
137 if (user_ptr < buffer->user_data) { in binder_alloc_prepare_to_free_locked()
138 n = n->rb_left; in binder_alloc_prepare_to_free_locked()
139 } else if (user_ptr > buffer->user_data) { in binder_alloc_prepare_to_free_locked()
140 n = n->rb_right; in binder_alloc_prepare_to_free_locked()
147 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
148 return ERR_PTR(-EPERM); in binder_alloc_prepare_to_free_locked()
149 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
157 * binder_alloc_prepare_to_free() - get buffer given user ptr
172 mutex_lock(&alloc->mutex); in binder_alloc_prepare_to_free()
174 mutex_unlock(&alloc->mutex); in binder_alloc_prepare_to_free()
181 struct page *page) in binder_set_installed_page() argument
184 smp_store_release(&alloc->pages[index], page); in binder_set_installed_page()
187 static inline struct page *
191 return smp_load_acquire(&alloc->pages[index]); in binder_get_installed_page()
198 struct page *page; in binder_lru_freelist_add() local
206 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_add()
207 page = binder_get_installed_page(alloc, index); in binder_lru_freelist_add()
208 if (!page) in binder_lru_freelist_add()
214 page_to_lru(page), in binder_lru_freelist_add()
215 page_to_nid(page), in binder_lru_freelist_add()
227 smp_store_release(&alloc->mapped, state); in binder_alloc_set_mapped()
233 return smp_load_acquire(&alloc->mapped); in binder_alloc_is_mapped()
236 static struct page *binder_page_lookup(struct binder_alloc *alloc, in binder_page_lookup()
239 struct mm_struct *mm = alloc->mm; in binder_page_lookup()
240 struct page *page; in binder_page_lookup() local
244 * Find an existing page in the remote mm. If missing, in binder_page_lookup()
245 * don't attempt to fault-in just propagate an error. in binder_page_lookup()
250 &page, NULL); in binder_page_lookup()
253 return npages > 0 ? page : NULL; in binder_page_lookup()
258 struct page *page) in binder_page_insert() argument
260 struct mm_struct *mm = alloc->mm; in binder_page_insert()
262 int ret = -ESRCH; in binder_page_insert()
264 /* attempt per-vma lock first */ in binder_page_insert()
268 ret = vm_insert_page(vma, addr, page); in binder_page_insert()
277 ret = vm_insert_page(vma, addr, page); in binder_page_insert()
283 static struct page *binder_page_alloc(struct binder_alloc *alloc, in binder_page_alloc()
287 struct page *page; in binder_page_alloc() local
289 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); in binder_page_alloc()
290 if (!page) in binder_page_alloc()
293 /* allocate and install shrinker metadata under page->private */ in binder_page_alloc()
296 __free_page(page); in binder_page_alloc()
300 mdata->alloc = alloc; in binder_page_alloc()
301 mdata->page_index = index; in binder_page_alloc()
302 INIT_LIST_HEAD(&mdata->lru); in binder_page_alloc()
303 set_page_private(page, (unsigned long)mdata); in binder_page_alloc()
305 return page; in binder_page_alloc()
308 static void binder_free_page(struct page *page) in binder_free_page() argument
310 kfree((struct binder_shrinker_mdata *)page_private(page)); in binder_free_page()
311 __free_page(page); in binder_free_page()
318 struct page *page; in binder_install_single_page() local
321 if (!mmget_not_zero(alloc->mm)) in binder_install_single_page()
322 return -ESRCH; in binder_install_single_page()
324 page = binder_page_alloc(alloc, index); in binder_install_single_page()
325 if (!page) { in binder_install_single_page()
326 ret = -ENOMEM; in binder_install_single_page()
330 ret = binder_page_insert(alloc, addr, page); in binder_install_single_page()
332 case -EBUSY: in binder_install_single_page()
335 * alloc->pages[index] has not been updated yet. Discard in binder_install_single_page()
336 * our page and look up the one already installed. in binder_install_single_page()
339 binder_free_page(page); in binder_install_single_page()
340 page = binder_page_lookup(alloc, addr); in binder_install_single_page()
341 if (!page) { in binder_install_single_page()
342 pr_err("%d: failed to find page at offset %lx\n", in binder_install_single_page()
343 alloc->pid, addr - alloc->vm_start); in binder_install_single_page()
344 ret = -ESRCH; in binder_install_single_page()
349 /* Mark page installation complete and safe to use */ in binder_install_single_page()
350 binder_set_installed_page(alloc, index, page); in binder_install_single_page()
353 binder_free_page(page); in binder_install_single_page()
354 pr_err("%d: %s failed to insert page at offset %lx with %d\n", in binder_install_single_page()
355 alloc->pid, __func__, addr - alloc->vm_start, ret); in binder_install_single_page()
359 mmput_async(alloc->mm); in binder_install_single_page()
365 size_t size) in binder_install_buffer_pages() argument
370 start = buffer->user_data & PAGE_MASK; in binder_install_buffer_pages()
371 final = PAGE_ALIGN(buffer->user_data + size); in binder_install_buffer_pages()
377 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_install_buffer_pages()
398 struct page *page; in binder_lru_freelist_del() local
406 index = (page_addr - alloc->vm_start) / PAGE_SIZE; in binder_lru_freelist_del()
407 page = binder_get_installed_page(alloc, index); in binder_lru_freelist_del()
409 if (page) { in binder_lru_freelist_del()
413 page_to_lru(page), in binder_lru_freelist_del()
414 page_to_nid(page), in binder_lru_freelist_del()
422 if (index + 1 > alloc->pages_high) in binder_lru_freelist_del()
423 alloc->pages_high = index + 1; in binder_lru_freelist_del()
439 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
448 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { in debug_no_space_locked()
467 * Find the amount and size of buffers allocated by the current caller; in debug_low_async_space_locked()
475 int pid = current->tgid; in debug_low_async_space_locked()
481 * space left (which is less than 10% of total buffer size). in debug_low_async_space_locked()
483 if (alloc->free_async_space >= alloc->buffer_size / 10) { in debug_low_async_space_locked()
484 alloc->oneway_spam_detected = false; in debug_low_async_space_locked()
488 for (n = rb_first(&alloc->allocated_buffers); n != NULL; in debug_low_async_space_locked()
491 if (buffer->pid != pid) in debug_low_async_space_locked()
493 if (!buffer->async_transaction) in debug_low_async_space_locked()
501 * async space (which is 25% of total buffer size). Oneway spam is only in debug_low_async_space_locked()
504 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { in debug_low_async_space_locked()
506 "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", in debug_low_async_space_locked()
507 alloc->pid, pid, num_buffers, total_alloc_size); in debug_low_async_space_locked()
508 if (!alloc->oneway_spam_detected) { in debug_low_async_space_locked()
509 alloc->oneway_spam_detected = true; in debug_low_async_space_locked()
520 size_t size, in binder_alloc_new_buf_locked() argument
523 struct rb_node *n = alloc->free_buffers.rb_node; in binder_alloc_new_buf_locked()
530 if (is_async && alloc->free_async_space < size) { in binder_alloc_new_buf_locked()
532 "%d: binder_alloc_buf size %zd failed, no async space left\n", in binder_alloc_new_buf_locked()
533 alloc->pid, size); in binder_alloc_new_buf_locked()
534 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
540 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
543 if (size < buffer_size) { in binder_alloc_new_buf_locked()
545 n = n->rb_left; in binder_alloc_new_buf_locked()
546 } else if (size > buffer_size) { in binder_alloc_new_buf_locked()
547 n = n->rb_right; in binder_alloc_new_buf_locked()
556 "%d: binder_alloc_buf size %zd failed, no address space\n", in binder_alloc_new_buf_locked()
557 alloc->pid, size); in binder_alloc_new_buf_locked()
559 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
563 if (buffer_size != size) { in binder_alloc_new_buf_locked()
568 WARN_ON(n || buffer_size == size); in binder_alloc_new_buf_locked()
569 new_buffer->user_data = buffer->user_data + size; in binder_alloc_new_buf_locked()
570 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
571 new_buffer->free = 1; in binder_alloc_new_buf_locked()
577 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", in binder_alloc_new_buf_locked()
578 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
582 * with buffer_size determines if the last page is shared with an in binder_alloc_new_buf_locked()
583 * adjacent in-use buffer. In such case, the page has been already in binder_alloc_new_buf_locked()
586 next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; in binder_alloc_new_buf_locked()
587 curr_last_page = PAGE_ALIGN(buffer->user_data + size); in binder_alloc_new_buf_locked()
588 binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), in binder_alloc_new_buf_locked()
591 rb_erase(&buffer->rb_node, &alloc->free_buffers); in binder_alloc_new_buf_locked()
592 buffer->free = 0; in binder_alloc_new_buf_locked()
593 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
595 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
596 buffer->oneway_spam_suspect = false; in binder_alloc_new_buf_locked()
598 alloc->free_async_space -= size; in binder_alloc_new_buf_locked()
600 "%d: binder_alloc_buf size %zd async free %zd\n", in binder_alloc_new_buf_locked()
601 alloc->pid, size, alloc->free_async_space); in binder_alloc_new_buf_locked()
603 buffer->oneway_spam_suspect = true; in binder_alloc_new_buf_locked()
612 /* Calculate the sanitized total size, returns 0 for invalid request */
619 /* Align to pointer size and check for overflows */ in sanitized_size()
628 /* Pad 0-sized buffers so they get a unique address */ in sanitized_size()
635 * binder_alloc_new_buf() - Allocate a new binder buffer
637 * @data_size: size of user data buffer
639 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
643 * the kernel version of the buffer pointer. The size allocated
645 * pointer-sized boundary)
647 * Return: The allocated buffer or %ERR_PTR(-errno) if error
656 size_t size; in binder_alloc_new_buf() local
663 alloc->pid); in binder_alloc_new_buf()
664 return ERR_PTR(-ESRCH); in binder_alloc_new_buf()
667 size = sanitized_size(data_size, offsets_size, extra_buffers_size); in binder_alloc_new_buf()
668 if (unlikely(!size)) { in binder_alloc_new_buf()
670 "%d: got transaction with invalid size %zd-%zd-%zd\n", in binder_alloc_new_buf()
671 alloc->pid, data_size, offsets_size, in binder_alloc_new_buf()
673 return ERR_PTR(-EINVAL); in binder_alloc_new_buf()
679 return ERR_PTR(-ENOMEM); in binder_alloc_new_buf()
681 mutex_lock(&alloc->mutex); in binder_alloc_new_buf()
682 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); in binder_alloc_new_buf()
684 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
688 buffer->data_size = data_size; in binder_alloc_new_buf()
689 buffer->offsets_size = offsets_size; in binder_alloc_new_buf()
690 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf()
691 buffer->pid = current->tgid; in binder_alloc_new_buf()
692 mutex_unlock(&alloc->mutex); in binder_alloc_new_buf()
694 ret = binder_install_buffer_pages(alloc, buffer, size); in binder_alloc_new_buf()
705 return buffer->user_data & PAGE_MASK; in buffer_start_page()
710 return (buffer->user_data - 1) & PAGE_MASK; in prev_buffer_end_page()
718 if (PAGE_ALIGNED(buffer->user_data)) in binder_delete_free_buffer()
721 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
723 BUG_ON(!prev->free); in binder_delete_free_buffer()
727 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
736 list_del(&buffer->entry); in binder_delete_free_buffer()
743 size_t size, buffer_size; in binder_free_buf_locked() local
747 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
748 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
749 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
752 "%d: binder_free_buf %pK size %zd buffer_size %zd\n", in binder_free_buf_locked()
753 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
755 BUG_ON(buffer->free); in binder_free_buf_locked()
756 BUG_ON(size > buffer_size); in binder_free_buf_locked()
757 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
758 BUG_ON(buffer->user_data < alloc->vm_start); in binder_free_buf_locked()
759 BUG_ON(buffer->user_data > alloc->vm_start + alloc->buffer_size); in binder_free_buf_locked()
761 if (buffer->async_transaction) { in binder_free_buf_locked()
762 alloc->free_async_space += buffer_size; in binder_free_buf_locked()
764 "%d: binder_free_buf size %zd async free %zd\n", in binder_free_buf_locked()
765 alloc->pid, size, alloc->free_async_space); in binder_free_buf_locked()
768 binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), in binder_free_buf_locked()
769 (buffer->user_data + buffer_size) & PAGE_MASK); in binder_free_buf_locked()
771 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
772 buffer->free = 1; in binder_free_buf_locked()
773 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
776 if (next->free) { in binder_free_buf_locked()
777 rb_erase(&next->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
781 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
784 if (prev->free) { in binder_free_buf_locked()
786 rb_erase(&prev->rb_node, &alloc->free_buffers); in binder_free_buf_locked()
794 * binder_alloc_get_page() - get kernel pointer for given buffer offset
798 * @pgoffp: address to copy final page offset to
800 * Lookup the struct page corresponding to the address
801 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
802 * NULL, the byte-offset into the page is written there.
807 * guaranteed that the corresponding elements of @alloc->pages[]
810 * Return: struct page
812 static struct page *binder_alloc_get_page(struct binder_alloc *alloc, in binder_alloc_get_page()
818 (buffer->user_data - alloc->vm_start); in binder_alloc_get_page()
824 return alloc->pages[index]; in binder_alloc_get_page()
828 * binder_alloc_clear_buf() - zero out buffer
841 unsigned long size; in binder_alloc_clear_buf() local
842 struct page *page; in binder_alloc_clear_buf() local
845 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
847 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_clear_buf()
848 memset_page(page, pgoff, 0, size); in binder_alloc_clear_buf()
849 bytes -= size; in binder_alloc_clear_buf()
850 buffer_offset += size; in binder_alloc_clear_buf()
855 * binder_alloc_free_buf() - free a binder buffer
872 if (buffer->clear_on_free) { in binder_alloc_free_buf()
874 buffer->clear_on_free = false; in binder_alloc_free_buf()
876 mutex_lock(&alloc->mutex); in binder_alloc_free_buf()
878 mutex_unlock(&alloc->mutex); in binder_alloc_free_buf()
882 * binder_alloc_mmap_handler() - map virtual address space for proc
891 * -EBUSY = address space already mapped
892 * -ENOMEM = failed to map memory to given address space
901 if (unlikely(vma->vm_mm != alloc->mm)) { in binder_alloc_mmap_handler()
902 ret = -EINVAL; in binder_alloc_mmap_handler()
903 failure_string = "invalid vma->vm_mm"; in binder_alloc_mmap_handler()
908 if (alloc->buffer_size) { in binder_alloc_mmap_handler()
909 ret = -EBUSY; in binder_alloc_mmap_handler()
913 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, in binder_alloc_mmap_handler()
917 alloc->vm_start = vma->vm_start; in binder_alloc_mmap_handler()
919 alloc->pages = kvcalloc(alloc->buffer_size / PAGE_SIZE, in binder_alloc_mmap_handler()
920 sizeof(alloc->pages[0]), in binder_alloc_mmap_handler()
922 if (!alloc->pages) { in binder_alloc_mmap_handler()
923 ret = -ENOMEM; in binder_alloc_mmap_handler()
924 failure_string = "alloc page array"; in binder_alloc_mmap_handler()
930 ret = -ENOMEM; in binder_alloc_mmap_handler()
935 buffer->user_data = alloc->vm_start; in binder_alloc_mmap_handler()
936 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
937 buffer->free = 1; in binder_alloc_mmap_handler()
939 alloc->free_async_space = alloc->buffer_size / 2; in binder_alloc_mmap_handler()
947 kvfree(alloc->pages); in binder_alloc_mmap_handler()
948 alloc->pages = NULL; in binder_alloc_mmap_handler()
950 alloc->vm_start = 0; in binder_alloc_mmap_handler()
952 alloc->buffer_size = 0; in binder_alloc_mmap_handler()
957 "%s: %d %lx-%lx %s failed %d\n", __func__, in binder_alloc_mmap_handler()
958 alloc->pid, vma->vm_start, vma->vm_end, in binder_alloc_mmap_handler()
971 mutex_lock(&alloc->mutex); in binder_alloc_deferred_release()
972 BUG_ON(alloc->mapped); in binder_alloc_deferred_release()
974 while ((n = rb_first(&alloc->allocated_buffers))) { in binder_alloc_deferred_release()
978 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
980 if (buffer->clear_on_free) { in binder_alloc_deferred_release()
982 buffer->clear_on_free = false; in binder_alloc_deferred_release()
988 while (!list_empty(&alloc->buffers)) { in binder_alloc_deferred_release()
989 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
991 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
993 list_del(&buffer->entry); in binder_alloc_deferred_release()
994 WARN_ON_ONCE(!list_empty(&alloc->buffers)); in binder_alloc_deferred_release()
999 if (alloc->pages) { in binder_alloc_deferred_release()
1002 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_deferred_release()
1003 struct page *page; in binder_alloc_deferred_release() local
1006 page = binder_get_installed_page(alloc, i); in binder_alloc_deferred_release()
1007 if (!page) in binder_alloc_deferred_release()
1011 page_to_lru(page), in binder_alloc_deferred_release()
1012 page_to_nid(page), in binder_alloc_deferred_release()
1015 "%s: %d: page %d %s\n", in binder_alloc_deferred_release()
1016 __func__, alloc->pid, i, in binder_alloc_deferred_release()
1018 binder_free_page(page); in binder_alloc_deferred_release()
1022 mutex_unlock(&alloc->mutex); in binder_alloc_deferred_release()
1023 kvfree(alloc->pages); in binder_alloc_deferred_release()
1024 if (alloc->mm) in binder_alloc_deferred_release()
1025 mmdrop(alloc->mm); in binder_alloc_deferred_release()
1029 __func__, alloc->pid, buffers, page_count); in binder_alloc_deferred_release()
1033 * binder_alloc_print_allocated() - print buffer info
1046 mutex_lock(&alloc->mutex); in binder_alloc_print_allocated()
1047 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { in binder_alloc_print_allocated()
1049 seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", in binder_alloc_print_allocated()
1050 buffer->debug_id, in binder_alloc_print_allocated()
1051 buffer->user_data - alloc->vm_start, in binder_alloc_print_allocated()
1052 buffer->data_size, buffer->offsets_size, in binder_alloc_print_allocated()
1053 buffer->extra_buffers_size, in binder_alloc_print_allocated()
1054 buffer->transaction ? "active" : "delivered"); in binder_alloc_print_allocated()
1056 mutex_unlock(&alloc->mutex); in binder_alloc_print_allocated()
1060 * binder_alloc_print_pages() - print page usage
1067 struct page *page; in binder_alloc_print_pages() local
1073 mutex_lock(&alloc->mutex); in binder_alloc_print_pages()
1079 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { in binder_alloc_print_pages()
1080 page = binder_get_installed_page(alloc, i); in binder_alloc_print_pages()
1081 if (!page) in binder_alloc_print_pages()
1083 else if (list_empty(page_to_lru(page))) in binder_alloc_print_pages()
1089 mutex_unlock(&alloc->mutex); in binder_alloc_print_pages()
1091 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); in binder_alloc_print_pages()
1095 * binder_alloc_get_allocated_count() - return count of buffers
1105 mutex_lock(&alloc->mutex); in binder_alloc_get_allocated_count()
1106 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) in binder_alloc_get_allocated_count()
1108 mutex_unlock(&alloc->mutex); in binder_alloc_get_allocated_count()
1114 * binder_alloc_vma_close() - invalidate address space
1118 * Clears alloc->mapped to prevent new incoming transactions from
1127 * binder_alloc_free_page() - shrinker callback to free pages
1138 __must_hold(&lru->lock) in binder_alloc_free_page()
1141 struct binder_alloc *alloc = mdata->alloc; in binder_alloc_free_page()
1142 struct mm_struct *mm = alloc->mm; in binder_alloc_free_page()
1144 struct page *page_to_free; in binder_alloc_free_page()
1152 index = mdata->page_index; in binder_alloc_free_page()
1153 page_addr = alloc->vm_start + index * PAGE_SIZE; in binder_alloc_free_page()
1155 /* attempt per-vma lock first */ in binder_alloc_free_page()
1165 if (!mutex_trylock(&alloc->mutex)) in binder_alloc_free_page()
1178 page_to_free = alloc->pages[index]; in binder_alloc_free_page()
1184 spin_unlock(&lru->lock); in binder_alloc_free_page()
1194 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1205 mutex_unlock(&alloc->mutex); in binder_alloc_free_page()
1227 NULL, sc->nr_to_scan); in binder_shrink_scan()
1233 * binder_alloc_init() - called by binder_open() for per-proc initialization
1241 alloc->pid = current->group_leader->pid; in binder_alloc_init()
1242 alloc->mm = current->mm; in binder_alloc_init()
1243 mmgrab(alloc->mm); in binder_alloc_init()
1244 mutex_init(&alloc->mutex); in binder_alloc_init()
1245 INIT_LIST_HEAD(&alloc->buffers); in binder_alloc_init()
1256 binder_shrinker = shrinker_alloc(0, "android-binder"); in binder_alloc_shrinker_init()
1259 return -ENOMEM; in binder_alloc_shrinker_init()
1262 binder_shrinker->count_objects = binder_shrink_count; in binder_alloc_shrinker_init()
1263 binder_shrinker->scan_objects = binder_shrink_scan; in binder_alloc_shrinker_init()
1277 * check_buffer() - verify that buffer/offset is safe to access
1283 * Check that the @offset/@bytes are within the size of the given
1289 * (buffer->free == 0 && buffer->allow_user_free == 0)
1291 * (buffer->free == 0 && buffer->transaction == NULL).
1302 offset <= buffer_size - bytes && in check_buffer()
1304 !buffer->free && in check_buffer()
1305 (!buffer->allow_user_free || !buffer->transaction); in check_buffer()
1309 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1331 unsigned long size; in binder_alloc_copy_user_to_buffer() local
1333 struct page *page; in binder_alloc_copy_user_to_buffer() local
1337 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1339 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_copy_user_to_buffer()
1340 kptr = kmap_local_page(page) + pgoff; in binder_alloc_copy_user_to_buffer()
1341 ret = copy_from_user(kptr, from, size); in binder_alloc_copy_user_to_buffer()
1344 return bytes - size + ret; in binder_alloc_copy_user_to_buffer()
1345 bytes -= size; in binder_alloc_copy_user_to_buffer()
1346 from += size; in binder_alloc_copy_user_to_buffer()
1347 buffer_offset += size; in binder_alloc_copy_user_to_buffer()
1359 /* All copies must be 32-bit aligned and 32-bit size */ in binder_alloc_do_buffer_copy()
1361 return -EINVAL; in binder_alloc_do_buffer_copy()
1364 unsigned long size; in binder_alloc_do_buffer_copy() local
1365 struct page *page; in binder_alloc_do_buffer_copy() local
1368 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1370 size = min_t(size_t, bytes, PAGE_SIZE - pgoff); in binder_alloc_do_buffer_copy()
1372 memcpy_to_page(page, pgoff, ptr, size); in binder_alloc_do_buffer_copy()
1374 memcpy_from_page(ptr, page, pgoff, size); in binder_alloc_do_buffer_copy()
1375 bytes -= size; in binder_alloc_do_buffer_copy()
1377 ptr = ptr + size; in binder_alloc_do_buffer_copy()
1378 buffer_offset += size; in binder_alloc_do_buffer_copy()