Lines Matching full:pages

26 /* Pooling of allocated pages is necessary because changing the caching
63 MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
79 /* Allocate pages of size 1 << order with the given gfp_flags */
89 * Mapping pages directly into an userspace process and calling in ttm_pool_alloc_page()
132 /* Reset the caching and pages of size 1 << order */
163 /* Apply a new caching to an array of pages */
185 /* Map pages of 1 << order size and fill the DMA address array */
212 /* Unmap pages of 1 << order size */
224 /* Give pages into a specific pool_type */
237 list_add(&p->lru, &pt->pages); in ttm_pool_type_give()
242 /* Take pages from a specific pool_type, return NULL when nothing available */
248 p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); in ttm_pool_type_take()
266 INIT_LIST_HEAD(&pt->pages); in ttm_pool_type_init()
273 /* Remove a pool_type from the global shrinker list and free all pages */
314 /* Free pages using the global shrinker list */
355 struct page ***pages) in ttm_pool_page_allocated() argument
367 for (i = 1 << order; i; --i, ++(*pages), ++p) in ttm_pool_page_allocated()
368 **pages = p; in ttm_pool_page_allocated()
374 * ttm_pool_free_range() - Free a range of TTM pages
382 * pages with different attributes if allocation hit an error without being
390 struct page **pages = &tt->pages[start_page]; in ttm_pool_free_range() local
394 for (i = start_page; i < end_page; i += nr, pages += nr) { in ttm_pool_free_range()
397 order = ttm_pool_page_order(pool, *pages); in ttm_pool_free_range()
404 ttm_pool_type_give(pt, *pages); in ttm_pool_free_range()
406 ttm_pool_free_page(pool, caching, order, *pages); in ttm_pool_free_range()
417 * Fill the ttm_tt object with pages and also make sure to DMA map them when
427 struct page **caching = tt->pages; in ttm_pool_alloc()
428 struct page **pages = tt->pages; in ttm_pool_alloc() local
459 r = ttm_pool_apply_caching(caching, pages, in ttm_pool_alloc()
464 caching = pages; in ttm_pool_alloc()
469 &pages); in ttm_pool_alloc()
473 caching = pages; in ttm_pool_alloc()
486 r = ttm_pool_apply_caching(caching, pages, in ttm_pool_alloc()
490 caching = pages; in ttm_pool_alloc()
493 &num_pages, &pages); in ttm_pool_alloc()
497 caching = pages; in ttm_pool_alloc()
510 r = ttm_pool_apply_caching(caching, pages, tt->caching); in ttm_pool_alloc()
521 caching_divide = caching - tt->pages; in ttm_pool_alloc()
530 * ttm_pool_free - Free the backing pages from a ttm_tt object
532 * @pool: Pool to give pages back to.
535 * Give the packing pages back to a pool or free them
595 * Free all pages in the pool and unregister the types from the global
609 * that no shrinker is concurrently freeing pages from the pool. in ttm_pool_fini()
615 /* As long as pages are available make sure to release at least one */
628 /* Return the number of pages available or SHRINK_EMPTY if we have none */
638 /* Count the number of pages available in a pool_type */
646 list_for_each_entry(p, &pt->pages, lru) in ttm_pool_type_count()
675 /* Dump the total amount of allocated pages */
765 * @num_pages: default number of pages