Lines Matching full:buffer
50 static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) in binder_buffer_next() argument
52 return list_entry(buffer->entry.next, struct binder_buffer, entry); in binder_buffer_next()
55 static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) in binder_buffer_prev() argument
57 return list_entry(buffer->entry.prev, struct binder_buffer, entry); in binder_buffer_prev()
61 struct binder_buffer *buffer) in binder_alloc_buffer_size() argument
63 if (list_is_last(&buffer->entry, &alloc->buffers)) in binder_alloc_buffer_size()
64 return alloc->buffer + alloc->buffer_size - buffer->user_data; in binder_alloc_buffer_size()
65 return binder_buffer_next(buffer)->user_data - buffer->user_data; in binder_alloc_buffer_size()
73 struct binder_buffer *buffer; in binder_insert_free_buffer() local
82 "%d: add free buffer, size %zd, at %pK\n", in binder_insert_free_buffer()
87 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
88 BUG_ON(!buffer->free); in binder_insert_free_buffer()
90 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_insert_free_buffer()
106 struct binder_buffer *buffer; in binder_insert_allocated_buffer_locked() local
112 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer_locked()
113 BUG_ON(buffer->free); in binder_insert_allocated_buffer_locked()
115 if (new_buffer->user_data < buffer->user_data) in binder_insert_allocated_buffer_locked()
117 else if (new_buffer->user_data > buffer->user_data) in binder_insert_allocated_buffer_locked()
131 struct binder_buffer *buffer; in binder_alloc_prepare_to_free_locked() local
134 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_prepare_to_free_locked()
135 BUG_ON(buffer->free); in binder_alloc_prepare_to_free_locked()
137 if (user_ptr < buffer->user_data) { in binder_alloc_prepare_to_free_locked()
139 } else if (user_ptr > buffer->user_data) { in binder_alloc_prepare_to_free_locked()
144 * free the buffer when in use by kernel or in binder_alloc_prepare_to_free_locked()
147 if (!buffer->allow_user_free) in binder_alloc_prepare_to_free_locked()
149 buffer->allow_user_free = 0; in binder_alloc_prepare_to_free_locked()
150 return buffer; in binder_alloc_prepare_to_free_locked()
157 * binder_alloc_prepare_to_free() - get buffer given user ptr
159 * @user_ptr: User pointer to buffer data
161 * Validate userspace pointer to buffer data and return buffer corresponding to
162 * that user pointer. Search the rb tree for buffer that matches user data
165 * Return: Pointer to buffer or NULL
170 struct binder_buffer *buffer; in binder_alloc_prepare_to_free() local
173 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); in binder_alloc_prepare_to_free()
175 return buffer; in binder_alloc_prepare_to_free()
205 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_lru_freelist_add()
254 alloc->pid, __func__, addr - alloc->buffer, ret); in binder_install_single_page()
269 struct binder_buffer *buffer, in binder_install_buffer_pages() argument
276 start = buffer->user_data & PAGE_MASK; in binder_install_buffer_pages()
277 final = PAGE_ALIGN(buffer->user_data + size); in binder_install_buffer_pages()
283 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_install_buffer_pages()
314 index = (page_addr - alloc->buffer) / PAGE_SIZE; in binder_lru_freelist_del()
349 struct binder_buffer *buffer; in debug_no_space_locked() local
359 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_no_space_locked()
360 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
368 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_no_space_locked()
369 buffer_size = binder_alloc_buffer_size(alloc, buffer); in debug_no_space_locked()
392 struct binder_buffer *buffer; in debug_low_async_space_locked() local
400 * space left (which is less than 10% of total buffer size). in debug_low_async_space_locked()
409 buffer = rb_entry(n, struct binder_buffer, rb_node); in debug_low_async_space_locked()
410 if (buffer->pid != pid) in debug_low_async_space_locked()
412 if (!buffer->async_transaction) in debug_low_async_space_locked()
414 total_alloc_size += binder_alloc_buffer_size(alloc, buffer); in debug_low_async_space_locked()
420 * async space (which is 25% of total buffer size). Oneway spam is only in debug_low_async_space_locked()
444 struct binder_buffer *buffer; in binder_alloc_new_buf_locked() local
453 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
458 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
459 BUG_ON(!buffer->free); in binder_alloc_new_buf_locked()
460 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
478 buffer = ERR_PTR(-ENOSPC); in binder_alloc_new_buf_locked()
483 /* Found an oversized buffer and needs to be split */ in binder_alloc_new_buf_locked()
484 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
485 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_new_buf_locked()
488 new_buffer->user_data = buffer->user_data + size; in binder_alloc_new_buf_locked()
489 list_add(&new_buffer->entry, &buffer->entry); in binder_alloc_new_buf_locked()
496 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", in binder_alloc_new_buf_locked()
497 alloc->pid, size, buffer, buffer_size); in binder_alloc_new_buf_locked()
502 * adjacent in-use buffer. In such case, the page has been already in binder_alloc_new_buf_locked()
505 next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; in binder_alloc_new_buf_locked()
506 curr_last_page = PAGE_ALIGN(buffer->user_data + size); in binder_alloc_new_buf_locked()
507 binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), in binder_alloc_new_buf_locked()
510 rb_erase(&buffer->rb_node, &alloc->free_buffers); in binder_alloc_new_buf_locked()
511 buffer->free = 0; in binder_alloc_new_buf_locked()
512 buffer->allow_user_free = 0; in binder_alloc_new_buf_locked()
513 binder_insert_allocated_buffer_locked(alloc, buffer); in binder_alloc_new_buf_locked()
514 buffer->async_transaction = is_async; in binder_alloc_new_buf_locked()
515 buffer->oneway_spam_suspect = false; in binder_alloc_new_buf_locked()
522 buffer->oneway_spam_suspect = true; in binder_alloc_new_buf_locked()
528 return buffer; in binder_alloc_new_buf_locked()
554 * binder_alloc_new_buf() - Allocate a new binder buffer
556 * @data_size: size of user data buffer
557 * @offsets_size: user specified buffer offset
559 * @is_async: buffer for async transaction
561 * Allocate a new buffer given the requested sizes. Returns
562 * the kernel version of the buffer pointer. The size allocated
566 * Return: The allocated buffer or %ERR_PTR(-errno) if error
574 struct binder_buffer *buffer, *next; in binder_alloc_new_buf() local
595 /* Preallocate the next buffer */ in binder_alloc_new_buf()
601 buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); in binder_alloc_new_buf()
602 if (IS_ERR(buffer)) { in binder_alloc_new_buf()
607 buffer->data_size = data_size; in binder_alloc_new_buf()
608 buffer->offsets_size = offsets_size; in binder_alloc_new_buf()
609 buffer->extra_buffers_size = extra_buffers_size; in binder_alloc_new_buf()
610 buffer->pid = current->tgid; in binder_alloc_new_buf()
613 ret = binder_install_buffer_pages(alloc, buffer, size); in binder_alloc_new_buf()
615 binder_alloc_free_buf(alloc, buffer); in binder_alloc_new_buf()
616 buffer = ERR_PTR(ret); in binder_alloc_new_buf()
619 return buffer; in binder_alloc_new_buf()
622 static unsigned long buffer_start_page(struct binder_buffer *buffer) in buffer_start_page() argument
624 return buffer->user_data & PAGE_MASK; in buffer_start_page()
627 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer) in prev_buffer_end_page() argument
629 return (buffer->user_data - 1) & PAGE_MASK; in prev_buffer_end_page()
633 struct binder_buffer *buffer) in binder_delete_free_buffer() argument
637 if (PAGE_ALIGNED(buffer->user_data)) in binder_delete_free_buffer()
640 BUG_ON(alloc->buffers.next == &buffer->entry); in binder_delete_free_buffer()
641 prev = binder_buffer_prev(buffer); in binder_delete_free_buffer()
643 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) in binder_delete_free_buffer()
646 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_delete_free_buffer()
647 next = binder_buffer_next(buffer); in binder_delete_free_buffer()
648 if (buffer_start_page(next) == buffer_start_page(buffer)) in binder_delete_free_buffer()
652 binder_lru_freelist_add(alloc, buffer_start_page(buffer), in binder_delete_free_buffer()
653 buffer_start_page(buffer) + PAGE_SIZE); in binder_delete_free_buffer()
655 list_del(&buffer->entry); in binder_delete_free_buffer()
656 kfree(buffer); in binder_delete_free_buffer()
660 struct binder_buffer *buffer) in binder_free_buf_locked() argument
664 buffer_size = binder_alloc_buffer_size(alloc, buffer); in binder_free_buf_locked()
666 size = ALIGN(buffer->data_size, sizeof(void *)) + in binder_free_buf_locked()
667 ALIGN(buffer->offsets_size, sizeof(void *)) + in binder_free_buf_locked()
668 ALIGN(buffer->extra_buffers_size, sizeof(void *)); in binder_free_buf_locked()
672 alloc->pid, buffer, size, buffer_size); in binder_free_buf_locked()
674 BUG_ON(buffer->free); in binder_free_buf_locked()
676 BUG_ON(buffer->transaction != NULL); in binder_free_buf_locked()
677 BUG_ON(buffer->user_data < alloc->buffer); in binder_free_buf_locked()
678 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); in binder_free_buf_locked()
680 if (buffer->async_transaction) { in binder_free_buf_locked()
687 binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), in binder_free_buf_locked()
688 (buffer->user_data + buffer_size) & PAGE_MASK); in binder_free_buf_locked()
690 rb_erase(&buffer->rb_node, &alloc->allocated_buffers); in binder_free_buf_locked()
691 buffer->free = 1; in binder_free_buf_locked()
692 if (!list_is_last(&buffer->entry, &alloc->buffers)) { in binder_free_buf_locked()
693 struct binder_buffer *next = binder_buffer_next(buffer); in binder_free_buf_locked()
700 if (alloc->buffers.next != &buffer->entry) { in binder_free_buf_locked()
701 struct binder_buffer *prev = binder_buffer_prev(buffer); in binder_free_buf_locked()
704 binder_delete_free_buffer(alloc, buffer); in binder_free_buf_locked()
706 buffer = prev; in binder_free_buf_locked()
709 binder_insert_free_buffer(alloc, buffer); in binder_free_buf_locked()
713 * binder_alloc_get_page() - get kernel pointer for given buffer offset
715 * @buffer: binder buffer to be accessed
716 * @buffer_offset: offset into @buffer data
720 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
724 * to a valid address within the @buffer and that @buffer is
732 struct binder_buffer *buffer, in binder_alloc_get_page() argument
737 (buffer->user_data - alloc->buffer); in binder_alloc_get_page()
748 * binder_alloc_clear_buf() - zero out buffer
750 * @buffer: binder buffer to be cleared
752 * memset the given buffer to 0
755 struct binder_buffer *buffer) in binder_alloc_clear_buf() argument
757 size_t bytes = binder_alloc_buffer_size(alloc, buffer); in binder_alloc_clear_buf()
765 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_clear_buf()
775 * binder_alloc_free_buf() - free a binder buffer
777 * @buffer: kernel pointer to buffer
779 * Free the buffer allocated via binder_alloc_new_buf()
782 struct binder_buffer *buffer) in binder_alloc_free_buf() argument
792 if (buffer->clear_on_free) { in binder_alloc_free_buf()
793 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_free_buf()
794 buffer->clear_on_free = false; in binder_alloc_free_buf()
797 binder_free_buf_locked(alloc, buffer); in binder_alloc_free_buf()
817 struct binder_buffer *buffer; in binder_alloc_mmap_handler() local
837 alloc->buffer = vma->vm_start; in binder_alloc_mmap_handler()
853 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); in binder_alloc_mmap_handler()
854 if (!buffer) { in binder_alloc_mmap_handler()
856 failure_string = "alloc buffer struct"; in binder_alloc_mmap_handler()
860 buffer->user_data = alloc->buffer; in binder_alloc_mmap_handler()
861 list_add(&buffer->entry, &alloc->buffers); in binder_alloc_mmap_handler()
862 buffer->free = 1; in binder_alloc_mmap_handler()
863 binder_insert_free_buffer(alloc, buffer); in binder_alloc_mmap_handler()
875 alloc->buffer = 0; in binder_alloc_mmap_handler()
893 struct binder_buffer *buffer; in binder_alloc_deferred_release() local
900 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_deferred_release()
903 BUG_ON(buffer->transaction); in binder_alloc_deferred_release()
905 if (buffer->clear_on_free) { in binder_alloc_deferred_release()
906 binder_alloc_clear_buf(alloc, buffer); in binder_alloc_deferred_release()
907 buffer->clear_on_free = false; in binder_alloc_deferred_release()
909 binder_free_buf_locked(alloc, buffer); in binder_alloc_deferred_release()
914 buffer = list_first_entry(&alloc->buffers, in binder_alloc_deferred_release()
916 WARN_ON(!buffer->free); in binder_alloc_deferred_release()
918 list_del(&buffer->entry); in binder_alloc_deferred_release()
920 kfree(buffer); in binder_alloc_deferred_release()
936 page_addr = alloc->buffer + i * PAGE_SIZE; in binder_alloc_deferred_release()
956 * binder_alloc_print_allocated() - print buffer info
960 * Prints information about every buffer associated with
966 struct binder_buffer *buffer; in binder_alloc_print_allocated() local
971 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_print_allocated()
972 seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", in binder_alloc_print_allocated()
973 buffer->debug_id, in binder_alloc_print_allocated()
974 buffer->user_data - alloc->buffer, in binder_alloc_print_allocated()
975 buffer->data_size, buffer->offsets_size, in binder_alloc_print_allocated()
976 buffer->extra_buffers_size, in binder_alloc_print_allocated()
977 buffer->transaction ? "active" : "delivered"); in binder_alloc_print_allocated()
1082 page_addr = alloc->buffer + index * PAGE_SIZE; in binder_alloc_free_page()
1185 * check_buffer() - verify that buffer/offset is safe to access
1187 * @buffer: binder buffer to be accessed
1188 * @offset: offset into @buffer data
1192 * @buffer and that the buffer is currently active and not freeable.
1194 * allowed to touch the buffer in two cases:
1196 * 1) when the buffer is being created:
1197 * (buffer->free == 0 && buffer->allow_user_free == 0)
1198 * 2) when the buffer is being torn down:
1199 * (buffer->free == 0 && buffer->transaction == NULL).
1201 * Return: true if the buffer is safe to access
1204 struct binder_buffer *buffer, in check_buffer() argument
1207 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); in check_buffer()
1212 !buffer->free && in check_buffer()
1213 (!buffer->allow_user_free || !buffer->transaction); in check_buffer()
1219 * @buffer: binder buffer to be accessed
1220 * @buffer_offset: offset into @buffer data
1221 * @from: userspace pointer to source buffer
1224 * Copy bytes from source userspace to target buffer.
1230 struct binder_buffer *buffer, in binder_alloc_copy_user_to_buffer() argument
1235 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_copy_user_to_buffer()
1245 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_copy_user_to_buffer()
1262 struct binder_buffer *buffer, in binder_alloc_do_buffer_copy() argument
1268 if (!check_buffer(alloc, buffer, buffer_offset, bytes)) in binder_alloc_do_buffer_copy()
1276 page = binder_alloc_get_page(alloc, buffer, in binder_alloc_do_buffer_copy()
1292 struct binder_buffer *buffer, in binder_alloc_copy_to_buffer() argument
1297 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, in binder_alloc_copy_to_buffer()
1303 struct binder_buffer *buffer, in binder_alloc_copy_from_buffer() argument
1307 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, in binder_alloc_copy_from_buffer()