Lines Matching full:order
62 * @vaddr: original vaddr return for the mapping and order in the lower bits
96 * is typically a multi-order page).
98 * @order: The order of @alloced_page.
101 * full ttm_tt. In order not to loose any data (yet), keep information
112 unsigned int order; member
133 /* Allocate pages of size 1 << order with the given gfp_flags */
135 unsigned int order) in ttm_pool_alloc_page() argument
142 /* Don't set the __GFP_COMP flag for higher order allocations. in ttm_pool_alloc_page()
146 if (order) in ttm_pool_alloc_page()
151 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page()
153 p->private = order; in ttm_pool_alloc_page()
161 if (order) in ttm_pool_alloc_page()
164 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page()
177 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page()
186 /* Reset the caching and pages of size 1 << order */
188 unsigned int order, struct page *p) in ttm_pool_free_page() argument
199 set_pages_wb(p, 1 << order); in ttm_pool_free_page()
203 __free_pages(p, order); in ttm_pool_free_page()
207 if (order) in ttm_pool_free_page()
212 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, in ttm_pool_free_page()
239 /* DMA Map pages of 1 << order size and return the resulting dma_address. */
240 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, in ttm_pool_map() argument
250 size_t size = (1ULL << order) * PAGE_SIZE; in ttm_pool_map()
262 /* Unmap pages of 1 << order size */
277 unsigned int i, num_pages = 1 << pt->order; in ttm_pool_type_give()
289 atomic_long_add(1 << pt->order, &allocated_pages); in ttm_pool_type_give()
300 atomic_long_sub(1 << pt->order, &allocated_pages); in ttm_pool_type_take()
310 enum ttm_caching caching, unsigned int order) in ttm_pool_type_init() argument
314 pt->order = order; in ttm_pool_type_init()
333 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_type_fini()
336 /* Return the pool_type to use for the given caching and order */
339 unsigned int order) in ttm_pool_select_type() argument
342 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
348 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
351 return &global_dma32_write_combined[order]; in ttm_pool_select_type()
353 return &global_write_combined[order]; in ttm_pool_select_type()
356 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
359 return &global_dma32_uncached[order]; in ttm_pool_select_type()
361 return &global_uncached[order]; in ttm_pool_select_type()
385 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_shrink()
386 num_pages = 1 << pt->order; in ttm_pool_shrink()
395 /* Return the allocation order based for a page */
409 * as it has been backed up, in order to avoid memory pressure during
414 unsigned int order = ttm_pool_page_order(pool, p); in ttm_pool_split_for_swap() local
417 if (!order) in ttm_pool_split_for_swap()
420 split_page(p, order); in ttm_pool_split_for_swap()
421 nr = 1UL << order; in ttm_pool_split_for_swap()
451 return restore && restore->restored_pages < (1 << restore->order); in ttm_pool_restore_valid()
454 /* DMA unmap and free a multi-order page, either to the relevant pool or to system. */
459 unsigned int order; in ttm_pool_unmap_and_free() local
463 order = ttm_pool_page_order(pool, page); in ttm_pool_unmap_and_free()
464 nr = (1UL << order); in ttm_pool_unmap_and_free()
468 pt = ttm_pool_select_type(pool, caching, order); in ttm_pool_unmap_and_free()
470 order = page->private; in ttm_pool_unmap_and_free()
471 nr = (1UL << order); in ttm_pool_unmap_and_free()
477 ttm_pool_free_page(pool, caching, order, page); in ttm_pool_unmap_and_free()
482 /* Populate the page-array using the most recent allocated multi-order page. */
514 pgoff_t i, nr = 1UL << restore->order; in ttm_pool_restore_commit()
581 ttm_pool_page_allocated_restore(struct ttm_pool *pool, unsigned int order, in ttm_pool_page_allocated_restore() argument
589 restore->order = order; in ttm_pool_page_allocated_restore()
604 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, in ttm_pool_page_allocated() argument
622 r = ttm_pool_map(pool, order, p, &first_dma); in ttm_pool_page_allocated()
628 ttm_pool_page_allocated_restore(pool, order, p, page_caching, in ttm_pool_page_allocated()
631 ttm_pool_allocated_page_commit(p, first_dma, alloc, 1UL << order); in ttm_pool_page_allocated()
690 * Find a suitable allocation order based on highest desired order
707 unsigned int order; in __ttm_pool_alloc() local
728 for (order = ttm_pool_alloc_find_order(MAX_PAGE_ORDER, alloc); in __ttm_pool_alloc()
730 order = ttm_pool_alloc_find_order(order, alloc)) { in __ttm_pool_alloc()
735 pt = ttm_pool_select_type(pool, page_caching, order); in __ttm_pool_alloc()
741 * write-back cached pools of the same order. Consider removing in __ttm_pool_alloc()
747 p = ttm_pool_alloc_page(pool, gfp_flags, order); in __ttm_pool_alloc()
749 /* If that fails, lower the order if possible and retry. */ in __ttm_pool_alloc()
751 if (order) { in __ttm_pool_alloc()
752 --order; in __ttm_pool_alloc()
760 r = ttm_pool_page_allocated(pool, order, p, page_caching, alloc, in __ttm_pool_alloc()
782 ttm_pool_free_page(pool, page_caching, order, p); in __ttm_pool_alloc()
917 restore->restored_pages = 1UL << restore->order; in ttm_pool_drop_backed_up()
990 unsigned int order; in ttm_pool_backup() local
998 order = ttm_pool_page_order(pool, page); in ttm_pool_backup()
999 num_pages = 1UL << order; in ttm_pool_backup()
1006 __free_pages(page, order); in ttm_pool_backup()
1173 /* Print a nice header for the order */