Lines Matching full:order

54  * @vaddr: original vaddr return for the mapping and order in the lower bits
79 /* Allocate pages of size 1 << order with the given gfp_flags */
81 unsigned int order) in ttm_pool_alloc_page() argument
88 /* Don't set the __GFP_COMP flag for higher order allocations. in ttm_pool_alloc_page()
92 if (order) in ttm_pool_alloc_page()
97 p = alloc_pages_node(pool->nid, gfp_flags, order); in ttm_pool_alloc_page()
99 p->private = order; in ttm_pool_alloc_page()
107 if (order) in ttm_pool_alloc_page()
110 vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, in ttm_pool_alloc_page()
123 dma->vaddr = (unsigned long)vaddr | order; in ttm_pool_alloc_page()
132 /* Reset the caching and pages of size 1 << order */
134 unsigned int order, struct page *p) in ttm_pool_free_page() argument
145 set_pages_wb(p, 1 << order); in ttm_pool_free_page()
149 __free_pages(p, order); in ttm_pool_free_page()
153 if (order) in ttm_pool_free_page()
158 dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, in ttm_pool_free_page()
185 /* Map pages of 1 << order size and fill the DMA address array */
186 static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, in ttm_pool_map() argument
197 size_t size = (1ULL << order) * PAGE_SIZE; in ttm_pool_map()
204 for (i = 1 << order; i ; --i) { in ttm_pool_map()
212 /* Unmap pages of 1 << order size */
227 unsigned int i, num_pages = 1 << pt->order; in ttm_pool_type_give()
239 atomic_long_add(1 << pt->order, &allocated_pages); in ttm_pool_type_give()
250 atomic_long_sub(1 << pt->order, &allocated_pages); in ttm_pool_type_take()
260 enum ttm_caching caching, unsigned int order) in ttm_pool_type_init() argument
264 pt->order = order; in ttm_pool_type_init()
283 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_type_fini()
286 /* Return the pool_type to use for the given caching and order */
289 unsigned int order) in ttm_pool_select_type() argument
292 return &pool->caching[caching].orders[order]; in ttm_pool_select_type()
298 return &global_dma32_write_combined[order]; in ttm_pool_select_type()
300 return &global_write_combined[order]; in ttm_pool_select_type()
303 return &global_dma32_uncached[order]; in ttm_pool_select_type()
305 return &global_uncached[order]; in ttm_pool_select_type()
329 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); in ttm_pool_shrink()
330 num_pages = 1 << pt->order; in ttm_pool_shrink()
339 /* Return the allocation order based for a page */
352 static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, in ttm_pool_page_allocated() argument
361 r = ttm_pool_map(pool, order, p, dma_addr); in ttm_pool_page_allocated()
366 *num_pages -= 1 << order; in ttm_pool_page_allocated()
367 for (i = 1 << order; i; --i, ++(*pages), ++p) in ttm_pool_page_allocated()
391 unsigned int order; in ttm_pool_free_range() local
397 order = ttm_pool_page_order(pool, *pages); in ttm_pool_free_range()
398 nr = (1UL << order); in ttm_pool_free_range()
402 pt = ttm_pool_select_type(pool, caching, order); in ttm_pool_free_range()
406 ttm_pool_free_page(pool, caching, order, *pages); in ttm_pool_free_range()
432 unsigned int order; in ttm_pool_alloc() local
450 for (order = min_t(unsigned int, MAX_PAGE_ORDER, __fls(num_pages)); in ttm_pool_alloc()
452 order = min_t(unsigned int, order, __fls(num_pages))) { in ttm_pool_alloc()
456 pt = ttm_pool_select_type(pool, tt->caching, order); in ttm_pool_alloc()
466 r = ttm_pool_page_allocated(pool, order, p, in ttm_pool_alloc()
474 if (num_pages < (1 << order)) in ttm_pool_alloc()
482 while (num_pages >= (1 << order) && in ttm_pool_alloc()
483 (p = ttm_pool_alloc_page(pool, gfp_flags, order))) { in ttm_pool_alloc()
492 r = ttm_pool_page_allocated(pool, order, p, &dma_addr, in ttm_pool_alloc()
501 if (order) { in ttm_pool_alloc()
502 --order; in ttm_pool_alloc()
517 ttm_pool_free_page(pool, page_caching, order, p); in ttm_pool_alloc()
653 /* Print a nice header for the order */