Lines Matching refs:alloc

141 					 struct binder_alloc *alloc,
152 page_index = (page_addr - alloc->vm_start) / PAGE_SIZE;
153 if (!alloc->pages[page_index] ||
154 !list_empty(page_to_lru(alloc->pages[page_index]))) {
155 kunit_err(test, "expect alloc but is %s at page index %d\n",
156 alloc->pages[page_index] ?
165 struct binder_alloc *alloc,
173 buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
175 !check_buffer_pages_allocated(test, alloc, buffers[i], sizes[i]))
183 struct binder_alloc *alloc,
191 binder_alloc_free_buf(alloc, buffers[seq[i]]);
194 if (list_empty(page_to_lru(alloc->pages[i]))) {
196 alloc->pages[i] ? "alloc" : "free", i);
205 struct binder_alloc *alloc)
211 while ((count = list_lru_count(alloc->freelist))) {
212 list_lru_walk(alloc->freelist, binder_alloc_free_page,
216 for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
217 if (alloc->pages[i]) {
219 list_empty(page_to_lru(alloc->pages[i])) ?
220 "alloc" : "lru", i);
230 struct binder_alloc *alloc,
239 failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
247 failures = binder_alloc_test_free_buf(test, alloc, buffers,
256 failures = binder_alloc_test_alloc_buf(test, alloc, buffers,
264 failures = list_lru_count(alloc->freelist);
270 failures = binder_alloc_test_free_buf(test, alloc, buffers,
278 failures = binder_alloc_test_free_page(test, alloc);
282 failures, (alloc->buffer_size / PAGE_SIZE));
299 static void permute_frees(struct kunit *test, struct binder_alloc *alloc,
310 case_failed = binder_alloc_test_alloc_free(test, alloc, tc, end);
330 permute_frees(test, alloc, tc, runs, failures, index + 1, end);
335 struct binder_alloc *alloc,
353 back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
362 permute_frees(test, alloc, tc, runs, failures, 0,
367 permute_frees(test, alloc, tc, runs, failures, 0, alloc->buffer_size);
370 static void gen_buf_offsets(struct kunit *test, struct binder_alloc *alloc,
385 gen_buf_sizes(test, alloc, &tc, end_offset, runs, failures);
400 gen_buf_offsets(test, alloc, end_offset, alignments, runs,
406 struct binder_alloc alloc;
416 KUNIT_EXPECT_PTR_EQ(test, priv->alloc.freelist,
423 struct binder_alloc *alloc = &priv->alloc;
427 KUNIT_EXPECT_EQ(test, alloc->mapped, true);
428 KUNIT_EXPECT_EQ(test, alloc->buffer_size, BINDER_MMAP_SIZE);
430 n = rb_first(&alloc->allocated_buffers);
433 n = rb_first(&alloc->free_buffers);
435 KUNIT_EXPECT_EQ(test, binder_alloc_buffer_size(alloc, buf),
437 KUNIT_EXPECT_TRUE(test, list_is_last(&buf->entry, &alloc->buffers));
441 * binder_alloc_exhaustive_test() - Exhaustively test alloc and free of buffer pages.
457 gen_buf_offsets(test, &priv->alloc, end_offset, alignments, &runs,
468 struct binder_alloc *alloc = vma->vm_private_data;
470 binder_alloc_vma_close(alloc);
481 struct binder_alloc *alloc = filp->private_data;
486 vma->vm_private_data = alloc;
488 return binder_alloc_mmap_handler(alloc, vma);
517 __binder_alloc_init(&priv->alloc, &priv->binder_test_freelist);
520 &binder_alloc_test_fops, &priv->alloc,
523 kunit_err(test, "Failed to open binder alloc test driver file\n");
546 if (priv->alloc.mm)
547 binder_alloc_deferred_release(&priv->alloc);