Lines Matching refs:alloc
219 static int ttm_pool_apply_caching(struct ttm_pool_alloc_state *alloc)
222 unsigned int num_pages = alloc->pages - alloc->caching_divide;
227 switch (alloc->tt_caching) {
231 return set_pages_array_wc(alloc->caching_divide, num_pages);
233 return set_pages_array_uc(alloc->caching_divide, num_pages);
236 alloc->caching_divide = alloc->pages;
486 struct ttm_pool_alloc_state *alloc,
492 *alloc->pages++ = allocated++;
494 alloc->remaining_pages -= nr;
496 if (!alloc->dma_addr)
500 *alloc->dma_addr++ = first_dma;
512 struct ttm_pool_alloc_state *alloc)
516 struct page **first_page = alloc->pages;
561 dma_addr_t *dma_addr = alloc->dma_addr ? &restore->first_dma : NULL;
571 alloc, nr);
572 if (restore->page_caching == alloc->tt_caching || PageHighMem(restore->alloced_page))
573 alloc->caching_divide = alloc->pages;
574 restore->snapshot_alloc = *alloc;
587 const struct ttm_pool_alloc_state *alloc)
595 restore->snapshot_alloc = *alloc;
607 struct ttm_pool_alloc_state *alloc,
614 caching_consistent = (page_caching == alloc->tt_caching) || PageHighMem(p);
617 r = ttm_pool_apply_caching(alloc);
622 if (alloc->dma_addr) {
630 first_dma, restore, alloc);
632 ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order);
635 alloc->caching_divide = alloc->pages;
681 struct ttm_pool_alloc_state *alloc)
683 alloc->pages = tt->pages;
684 alloc->caching_divide = tt->pages;
685 alloc->dma_addr = tt->dma_address;
686 alloc->remaining_pages = tt->num_pages;
687 alloc->tt_caching = tt->caching;
695 const struct ttm_pool_alloc_state *alloc)
697 return min_t(unsigned int, highest, __fls(alloc->remaining_pages));
702 struct ttm_pool_alloc_state *alloc,
713 WARN_ON(!alloc->remaining_pages || ttm_tt_is_populated(tt));
714 WARN_ON(alloc->dma_addr && !pool->dev);
729 for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc);
730 alloc->remaining_pages;
731 order = ttm_pool_alloc_find_order(order, alloc)) {
761 r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc,
767 r = ttm_pool_restore_commit(restore, tt->backup, ctx, alloc);
773 r = ttm_pool_apply_caching(alloc);
789 caching_divide = alloc->caching_divide - tt->pages;
792 tt->num_pages - alloc->remaining_pages);
812 struct ttm_pool_alloc_state alloc;
817 ttm_pool_alloc_state_init(tt, &alloc);
819 return __ttm_pool_alloc(pool, tt, ctx, &alloc, NULL);
839 struct ttm_pool_alloc_state alloc;
847 ttm_pool_alloc_state_init(tt, &alloc);
855 tt->restore->snapshot_alloc = alloc;
862 alloc = restore->snapshot_alloc;
864 ret = ttm_pool_restore_commit(restore, tt->backup, ctx, &alloc);
868 if (!alloc.remaining_pages)
872 return __ttm_pool_alloc(pool, tt, ctx, &alloc, tt->restore);
1061 * @use_dma_alloc: true if coherent DMA alloc should be used