Lines Matching full:pool
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
29 * - Pool collects resently freed pages for reuse (and hooks up to
70 * The pool structure. There are up to nine pools:
80 * @type: Type of the pool
82 * used with irqsave/irqrestore variants because pool allocator maybe called
84 * @free_list: Pool of pages that are free to be used. No order requirements.
89 * @nfrees: Stats when pool is shrinking.
90 * @nrefills: Stats when the pool is grown.
92 * @name: Name of the pool.
117 * huge pool
129 * Limits for the pool. They are handled without locks because only place where
144 * @dev: The 'struct device' associated with the 'pool'
145 * @pool: The 'struct dma_pool' associated with the 'dev'
150 struct dma_pool *pool; member
158 * @options: Limits for the pool.
265 static int ttm_set_pages_caching(struct dma_pool *pool, in ttm_set_pages_caching() argument
270 if (pool->type & IS_UC) { in ttm_set_pages_caching()
274 pool->dev_name, cpages); in ttm_set_pages_caching()
276 if (pool->type & IS_WC) { in ttm_set_pages_caching()
280 pool->dev_name, cpages); in ttm_set_pages_caching()
285 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) in __ttm_dma_free_page() argument
290 if (pool->type & IS_HUGE) in __ttm_dma_free_page()
293 dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs); in __ttm_dma_free_page()
298 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) in __ttm_dma_alloc_page() argument
308 if (pool->type & IS_HUGE) in __ttm_dma_alloc_page()
311 vaddr = dma_alloc_attrs(pool->dev, pool->size, &d_page->dma, in __ttm_dma_alloc_page()
312 pool->gfp_flags, attrs); in __ttm_dma_alloc_page()
319 if (pool->type & IS_HUGE) in __ttm_dma_alloc_page()
343 static void ttm_pool_update_free_locked(struct dma_pool *pool, in ttm_pool_update_free_locked() argument
346 pool->npages_free -= freed_pages; in ttm_pool_update_free_locked()
347 pool->nfrees += freed_pages; in ttm_pool_update_free_locked()
352 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) in ttm_dma_page_put() argument
357 /* Don't set WB on WB page pool. */ in ttm_dma_page_put()
358 if (!(pool->type & IS_CACHED)) { in ttm_dma_page_put()
359 num_pages = pool->size / PAGE_SIZE; in ttm_dma_page_put()
362 pool->dev_name, num_pages); in ttm_dma_page_put()
366 __ttm_dma_free_page(pool, d_page); in ttm_dma_page_put()
369 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, in ttm_dma_pages_put() argument
374 if (pool->type & IS_HUGE) { in ttm_dma_pages_put()
376 ttm_dma_page_put(pool, d_page); in ttm_dma_pages_put()
381 /* Don't set WB on WB page pool. */ in ttm_dma_pages_put()
382 if (npages && !(pool->type & IS_CACHED) && in ttm_dma_pages_put()
385 pool->dev_name, npages); in ttm_dma_pages_put()
389 __ttm_dma_free_page(pool, d_page); in ttm_dma_pages_put()
394 * Free pages from pool.
399 * @pool: to free the pages from
400 * @nr_free: If set to true will free all pages in pool
403 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, in ttm_dma_page_pool_free() argument
425 pr_debug("%s: Failed to allocate memory for pool free operation\n", in ttm_dma_page_pool_free()
426 pool->dev_name); in ttm_dma_page_pool_free()
431 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_dma_page_pool_free()
434 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, in ttm_dma_page_pool_free()
446 ttm_pool_update_free_locked(pool, freed_pages); in ttm_dma_page_pool_free()
449 * we unlock the pool to prevent stalling. in ttm_dma_page_pool_free()
451 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_page_pool_free()
453 ttm_dma_pages_put(pool, &d_pages, pages_to_free, in ttm_dma_page_pool_free()
481 /* remove range of pages from the pool */ in ttm_dma_page_pool_free()
483 ttm_pool_update_free_locked(pool, freed_pages); in ttm_dma_page_pool_free()
487 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_page_pool_free()
490 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); in ttm_dma_page_pool_free()
500 struct dma_pool *pool; in ttm_dma_free_pool() local
509 pool = p->pool; in ttm_dma_free_pool()
510 if (pool->type != type) in ttm_dma_free_pool()
518 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { in ttm_dma_free_pool()
519 if (pool->type != type) in ttm_dma_free_pool()
523 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); in ttm_dma_free_pool()
524 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); in ttm_dma_free_pool()
529 list_del(&pool->pools); in ttm_dma_free_pool()
530 kfree(pool); in ttm_dma_free_pool()
538 * Albeit the pool might have already been freed earlier.
542 struct dma_pool *pool = *(struct dma_pool **)res; in ttm_dma_pool_release() local
544 if (pool) in ttm_dma_pool_release()
545 ttm_dma_free_pool(dev, pool->type); in ttm_dma_pool_release()
559 struct dma_pool *pool = NULL, **ptr; in ttm_dma_pool_init() local
573 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, in ttm_dma_pool_init()
575 if (!pool) in ttm_dma_pool_init()
585 sec_pool->pool = pool; in ttm_dma_pool_init()
587 INIT_LIST_HEAD(&pool->free_list); in ttm_dma_pool_init()
588 INIT_LIST_HEAD(&pool->pools); in ttm_dma_pool_init()
589 spin_lock_init(&pool->lock); in ttm_dma_pool_init()
590 pool->dev = dev; in ttm_dma_pool_init()
591 pool->npages_free = pool->npages_in_use = 0; in ttm_dma_pool_init()
592 pool->nfrees = 0; in ttm_dma_pool_init()
593 pool->gfp_flags = flags; in ttm_dma_pool_init()
596 pool->size = HPAGE_PMD_SIZE; in ttm_dma_pool_init()
601 pool->size = PAGE_SIZE; in ttm_dma_pool_init()
602 pool->type = type; in ttm_dma_pool_init()
603 pool->nrefills = 0; in ttm_dma_pool_init()
604 p = pool->name; in ttm_dma_pool_init()
607 p += scnprintf(p, sizeof(pool->name) - (p - pool->name), in ttm_dma_pool_init()
614 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", in ttm_dma_pool_init()
621 list_add(&pool->pools, &dev->dma_pools); in ttm_dma_pool_init()
624 *ptr = pool; in ttm_dma_pool_init()
627 return pool; in ttm_dma_pool_init()
631 kfree(pool); in ttm_dma_pool_init()
638 struct dma_pool *pool, *tmp; in ttm_dma_find_pool() local
654 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) in ttm_dma_find_pool()
655 if (pool->type == type) in ttm_dma_find_pool()
656 return pool; in ttm_dma_find_pool()
663 * pool.
665 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, in ttm_dma_handle_caching_state_failure() argument
683 __ttm_dma_free_page(pool, d_page); in ttm_dma_handle_caching_state_failure()
698 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, in ttm_dma_pool_alloc_new_pages() argument
716 pool->dev_name); in ttm_dma_pool_alloc_new_pages()
722 pool->dev_name, pool->name, current->pid, count); in ttm_dma_pool_alloc_new_pages()
725 dma_p = __ttm_dma_alloc_page(pool); in ttm_dma_pool_alloc_new_pages()
728 pool->dev_name, i); in ttm_dma_pool_alloc_new_pages()
730 /* store already allocated pages in the pool after in ttm_dma_pool_alloc_new_pages()
733 r = ttm_set_pages_caching(pool, caching_array, in ttm_dma_pool_alloc_new_pages()
737 pool, d_pages, caching_array, in ttm_dma_pool_alloc_new_pages()
754 npages = pool->size / PAGE_SIZE; in ttm_dma_pool_alloc_new_pages()
759 r = ttm_set_pages_caching(pool, caching_array, in ttm_dma_pool_alloc_new_pages()
763 pool, d_pages, caching_array, in ttm_dma_pool_alloc_new_pages()
773 r = ttm_set_pages_caching(pool, caching_array, cpages); in ttm_dma_pool_alloc_new_pages()
775 ttm_dma_handle_caching_state_failure(pool, d_pages, in ttm_dma_pool_alloc_new_pages()
786 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, in ttm_dma_page_pool_fill_locked() argument
790 int r = pool->npages_free; in ttm_dma_page_pool_fill_locked()
792 if (count > pool->npages_free) { in ttm_dma_page_pool_fill_locked()
797 spin_unlock_irqrestore(&pool->lock, *irq_flags); in ttm_dma_page_pool_fill_locked()
801 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); in ttm_dma_page_pool_fill_locked()
803 spin_lock_irqsave(&pool->lock, *irq_flags); in ttm_dma_page_pool_fill_locked()
806 list_splice(&d_pages, &pool->free_list); in ttm_dma_page_pool_fill_locked()
807 ++pool->nrefills; in ttm_dma_page_pool_fill_locked()
808 pool->npages_free += count; in ttm_dma_page_pool_fill_locked()
814 pr_debug("%s: Failed to fill %s pool (r:%d)!\n", in ttm_dma_page_pool_fill_locked()
815 pool->dev_name, pool->name, r); in ttm_dma_page_pool_fill_locked()
820 list_splice_tail(&d_pages, &pool->free_list); in ttm_dma_page_pool_fill_locked()
821 pool->npages_free += cpages; in ttm_dma_page_pool_fill_locked()
833 static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, in ttm_dma_pool_get_pages() argument
842 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_dma_pool_get_pages()
843 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); in ttm_dma_pool_get_pages()
845 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); in ttm_dma_pool_get_pages()
849 pool->npages_in_use += 1; in ttm_dma_pool_get_pages()
850 pool->npages_free -= 1; in ttm_dma_pool_get_pages()
852 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_pool_get_pages()
891 struct dma_pool *pool; in ttm_dma_populate() local
912 pool = ttm_dma_find_pool(dev, type | IS_HUGE); in ttm_dma_populate()
913 if (!pool) { in ttm_dma_populate()
916 pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); in ttm_dma_populate()
917 if (IS_ERR_OR_NULL(pool)) in ttm_dma_populate()
924 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); in ttm_dma_populate()
929 pool->size, ctx); in ttm_dma_populate()
949 pool = ttm_dma_find_pool(dev, type); in ttm_dma_populate()
950 if (!pool) { in ttm_dma_populate()
953 pool = ttm_dma_pool_init(dev, gfp_flags, type); in ttm_dma_populate()
954 if (IS_ERR_OR_NULL(pool)) in ttm_dma_populate()
959 d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); in ttm_dma_populate()
966 pool->size, ctx); in ttm_dma_populate()
990 /* Put all pages in pages list to correct pool to wait for reuse */
995 struct dma_pool *pool; in ttm_dma_unpopulate() local
1005 pool = ttm_dma_find_pool(dev, type | IS_HUGE); in ttm_dma_unpopulate()
1006 if (pool) { in ttm_dma_unpopulate()
1016 pool->size); in ttm_dma_unpopulate()
1019 ttm_dma_page_put(pool, d_page); in ttm_dma_unpopulate()
1022 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_dma_unpopulate()
1023 pool->npages_in_use -= count; in ttm_dma_unpopulate()
1024 pool->nfrees += count; in ttm_dma_unpopulate()
1025 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_unpopulate()
1029 pool = ttm_dma_find_pool(dev, type); in ttm_dma_unpopulate()
1030 if (!pool) in ttm_dma_unpopulate()
1033 is_cached = (ttm_dma_find_pool(pool->dev, in ttm_dma_unpopulate()
1034 ttm_to_type(ttm->page_flags, tt_cached)) == pool); in ttm_dma_unpopulate()
1045 pool->size); in ttm_dma_unpopulate()
1050 ttm_dma_page_put(pool, d_page); in ttm_dma_unpopulate()
1053 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_dma_unpopulate()
1054 pool->npages_in_use -= count; in ttm_dma_unpopulate()
1056 pool->nfrees += count; in ttm_dma_unpopulate()
1058 pool->npages_free += count; in ttm_dma_unpopulate()
1059 list_splice(&ttm_dma->pages_list, &pool->free_list); in ttm_dma_unpopulate()
1064 if (pool->npages_free >= (_manager->options.max_size + in ttm_dma_unpopulate()
1066 npages = pool->npages_free - _manager->options.max_size; in ttm_dma_unpopulate()
1068 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_dma_unpopulate()
1076 /* shrink pool if necessary (only on !is_cached pools)*/ in ttm_dma_unpopulate()
1078 ttm_dma_page_pool_free(pool, npages, false); in ttm_dma_unpopulate()
1084 * Callback for mm to request pool to reduce number of page held.
1088 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1121 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); in ttm_dma_pool_shrink_scan()
1125 p->pool->dev_name, p->pool->name, current->pid, in ttm_dma_pool_shrink_scan()
1142 count += p->pool->npages_free; in ttm_dma_pool_shrink_count()
1166 pr_info("Initializing DMA pool allocator\n"); in ttm_dma_page_alloc_init()
1200 pr_info("Finalizing DMA pool allocator\n"); in ttm_dma_page_alloc_fini()
1204 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, in ttm_dma_page_alloc_fini()
1207 ttm_dma_pool_match, p->pool)); in ttm_dma_page_alloc_fini()
1208 ttm_dma_free_pool(p->dev, p->pool->type); in ttm_dma_page_alloc_fini()
1217 struct dma_pool *pool = NULL; in ttm_dma_page_alloc_debugfs() local
1220 seq_printf(m, "No pool allocator running.\n"); in ttm_dma_page_alloc_debugfs()
1223 seq_printf(m, " pool refills pages freed inuse available name\n"); in ttm_dma_page_alloc_debugfs()
1229 pool = p->pool; in ttm_dma_page_alloc_debugfs()
1231 pool->name, pool->nrefills, in ttm_dma_page_alloc_debugfs()
1232 pool->nfrees, pool->npages_in_use, in ttm_dma_page_alloc_debugfs()
1233 pool->npages_free, in ttm_dma_page_alloc_debugfs()
1234 pool->dev_name); in ttm_dma_page_alloc_debugfs()