1 /*
2  * Copyright 2011 (c) Oracle Corp.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24  */
25 
26 /*
27  * A simple DMA pool losely based on dmapool.c. It has certain advantages
28  * over the DMA pools:
29  * - Pool collects resently freed pages for reuse (and hooks up to
30  *   the shrinker).
31  * - Tracks currently in use pages
32  * - Tracks whether the page is UC, WB or cached (and reverts to WB
33  *   when freed).
34  */
35 
36 #include <linux/dma-mapping.h>
37 #include <linux/list.h>
38 #include <linux/seq_file.h> /* for seq_printf */
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/highmem.h>
42 #include <linux/mm_types.h>
43 #include <linux/module.h>
44 #include <linux/mm.h>
45 #include <linux/atomic.h>
46 #include <linux/device.h>
47 #include <linux/kthread.h>
48 #include "ttm/ttm_bo_driver.h"
49 #include "ttm/ttm_page_alloc.h"
50 #ifdef TTM_HAS_AGP
51 #include <asm/agp.h>
52 #endif
53 
54 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
55 #define SMALL_ALLOCATION		4
56 #define FREE_ALL_PAGES			(~0U)
57 /* times are in msecs */
58 #define IS_UNDEFINED			(0)
59 #define IS_WC				(1<<1)
60 #define IS_UC				(1<<2)
61 #define IS_CACHED			(1<<3)
62 #define IS_DMA32			(1<<4)
63 
64 enum pool_type {
65 	POOL_IS_UNDEFINED,
66 	POOL_IS_WC = IS_WC,
67 	POOL_IS_UC = IS_UC,
68 	POOL_IS_CACHED = IS_CACHED,
69 	POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
70 	POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
71 	POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
72 };
73 /*
74  * The pool structure. There are usually six pools:
75  *  - generic (not restricted to DMA32):
76  *      - write combined, uncached, cached.
77  *  - dma32 (up to 2^32 - so up 4GB):
78  *      - write combined, uncached, cached.
79  * for each 'struct device'. The 'cached' is for pages that are actively used.
80  * The other ones can be shrunk by the shrinker API if neccessary.
81  * @pools: The 'struct device->dma_pools' link.
82  * @type: Type of the pool
83  * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
84  * used with irqsave/irqrestore variants because pool allocator maybe called
85  * from delayed work.
86  * @inuse_list: Pool of pages that are in use. The order is very important and
87  *   it is in the order that the TTM pages that are put back are in.
88  * @free_list: Pool of pages that are free to be used. No order requirements.
89  * @dev: The device that is associated with these pools.
90  * @size: Size used during DMA allocation.
91  * @npages_free: Count of available pages for re-use.
92  * @npages_in_use: Count of pages that are in use.
93  * @nfrees: Stats when pool is shrinking.
94  * @nrefills: Stats when the pool is grown.
95  * @gfp_flags: Flags to pass for alloc_page.
96  * @name: Name of the pool.
97  * @dev_name: Name derieved from dev - similar to how dev_info works.
98  *   Used during shutdown as the dev_info during release is unavailable.
99  */
100 struct dma_pool {
101 	struct list_head pools; /* The 'struct device->dma_pools link */
102 	enum pool_type type;
103 	spinlock_t lock;
104 	struct list_head inuse_list;
105 	struct list_head free_list;
106 	struct device *dev;
107 	unsigned size;
108 	unsigned npages_free;
109 	unsigned npages_in_use;
110 	unsigned long nfrees; /* Stats when shrunk. */
111 	unsigned long nrefills; /* Stats when grown. */
112 	gfp_t gfp_flags;
113 	char name[13]; /* "cached dma32" */
114 	char dev_name[64]; /* Constructed from dev */
115 };
116 
117 /*
118  * The accounting page keeping track of the allocated page along with
119  * the DMA address.
120  * @page_list: The link to the 'page_list' in 'struct dma_pool'.
121  * @vaddr: The virtual address of the page
122  * @dma: The bus address of the page. If the page is not allocated
123  *   via the DMA API, it will be -1.
124  */
125 struct dma_page {
126 	struct list_head page_list;
127 	void *vaddr;
128 	struct page *p;
129 	dma_addr_t dma;
130 };
131 
132 /*
133  * Limits for the pool. They are handled without locks because only place where
134  * they may change is in sysfs store. They won't have immediate effect anyway
135  * so forcing serialization to access them is pointless.
136  */
137 
138 struct ttm_pool_opts {
139 	unsigned	alloc_size;
140 	unsigned	max_size;
141 	unsigned	small;
142 };
143 
144 /*
145  * Contains the list of all of the 'struct device' and their corresponding
146  * DMA pools. Guarded by _mutex->lock.
147  * @pools: The link to 'struct ttm_pool_manager->pools'
148  * @dev: The 'struct device' associated with the 'pool'
149  * @pool: The 'struct dma_pool' associated with the 'dev'
150  */
151 struct device_pools {
152 	struct list_head pools;
153 	struct device *dev;
154 	struct dma_pool *pool;
155 };
156 
157 /*
158  * struct ttm_pool_manager - Holds memory pools for fast allocation
159  *
160  * @lock: Lock used when adding/removing from pools
161  * @pools: List of 'struct device' and 'struct dma_pool' tuples.
162  * @options: Limits for the pool.
163  * @npools: Total amount of pools in existence.
164  * @shrinker: The structure used by [un|]register_shrinker
165  */
166 struct ttm_pool_manager {
167 	struct mutex		lock;
168 	struct list_head	pools;
169 	struct ttm_pool_opts	options;
170 	unsigned		npools;
171 	struct shrinker		mm_shrink;
172 	struct kobject		kobj;
173 };
174 
175 static struct ttm_pool_manager *_manager;
176 
177 static struct attribute ttm_page_pool_max = {
178 	.name = "pool_max_size",
179 	.mode = S_IRUGO | S_IWUSR
180 };
181 static struct attribute ttm_page_pool_small = {
182 	.name = "pool_small_allocation",
183 	.mode = S_IRUGO | S_IWUSR
184 };
185 static struct attribute ttm_page_pool_alloc_size = {
186 	.name = "pool_allocation_size",
187 	.mode = S_IRUGO | S_IWUSR
188 };
189 
190 static struct attribute *ttm_pool_attrs[] = {
191 	&ttm_page_pool_max,
192 	&ttm_page_pool_small,
193 	&ttm_page_pool_alloc_size,
194 	NULL
195 };
196 
ttm_pool_kobj_release(struct kobject * kobj)197 static void ttm_pool_kobj_release(struct kobject *kobj)
198 {
199 	struct ttm_pool_manager *m =
200 		container_of(kobj, struct ttm_pool_manager, kobj);
201 	kfree(m);
202 }
203 
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)204 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
205 			      const char *buffer, size_t size)
206 {
207 	struct ttm_pool_manager *m =
208 		container_of(kobj, struct ttm_pool_manager, kobj);
209 	int chars;
210 	unsigned val;
211 	chars = sscanf(buffer, "%u", &val);
212 	if (chars == 0)
213 		return size;
214 
215 	/* Convert kb to number of pages */
216 	val = val / (PAGE_SIZE >> 10);
217 
218 	if (attr == &ttm_page_pool_max)
219 		m->options.max_size = val;
220 	else if (attr == &ttm_page_pool_small)
221 		m->options.small = val;
222 	else if (attr == &ttm_page_pool_alloc_size) {
223 		if (val > NUM_PAGES_TO_ALLOC*8) {
224 			printk(KERN_ERR TTM_PFX
225 			       "Setting allocation size to %lu "
226 			       "is not allowed. Recommended size is "
227 			       "%lu\n",
228 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
229 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
230 			return size;
231 		} else if (val > NUM_PAGES_TO_ALLOC) {
232 			printk(KERN_WARNING TTM_PFX
233 			       "Setting allocation size to "
234 			       "larger than %lu is not recommended.\n",
235 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
236 		}
237 		m->options.alloc_size = val;
238 	}
239 
240 	return size;
241 }
242 
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)243 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
244 			     char *buffer)
245 {
246 	struct ttm_pool_manager *m =
247 		container_of(kobj, struct ttm_pool_manager, kobj);
248 	unsigned val = 0;
249 
250 	if (attr == &ttm_page_pool_max)
251 		val = m->options.max_size;
252 	else if (attr == &ttm_page_pool_small)
253 		val = m->options.small;
254 	else if (attr == &ttm_page_pool_alloc_size)
255 		val = m->options.alloc_size;
256 
257 	val = val * (PAGE_SIZE >> 10);
258 
259 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
260 }
261 
262 static const struct sysfs_ops ttm_pool_sysfs_ops = {
263 	.show = &ttm_pool_show,
264 	.store = &ttm_pool_store,
265 };
266 
267 static struct kobj_type ttm_pool_kobj_type = {
268 	.release = &ttm_pool_kobj_release,
269 	.sysfs_ops = &ttm_pool_sysfs_ops,
270 	.default_attrs = ttm_pool_attrs,
271 };
272 
273 #ifndef CONFIG_X86
set_pages_array_wb(struct page ** pages,int addrinarray)274 static int set_pages_array_wb(struct page **pages, int addrinarray)
275 {
276 #ifdef TTM_HAS_AGP
277 	int i;
278 
279 	for (i = 0; i < addrinarray; i++)
280 		unmap_page_from_agp(pages[i]);
281 #endif
282 	return 0;
283 }
284 
set_pages_array_wc(struct page ** pages,int addrinarray)285 static int set_pages_array_wc(struct page **pages, int addrinarray)
286 {
287 #ifdef TTM_HAS_AGP
288 	int i;
289 
290 	for (i = 0; i < addrinarray; i++)
291 		map_page_into_agp(pages[i]);
292 #endif
293 	return 0;
294 }
295 
set_pages_array_uc(struct page ** pages,int addrinarray)296 static int set_pages_array_uc(struct page **pages, int addrinarray)
297 {
298 #ifdef TTM_HAS_AGP
299 	int i;
300 
301 	for (i = 0; i < addrinarray; i++)
302 		map_page_into_agp(pages[i]);
303 #endif
304 	return 0;
305 }
306 #endif /* for !CONFIG_X86 */
307 
ttm_set_pages_caching(struct dma_pool * pool,struct page ** pages,unsigned cpages)308 static int ttm_set_pages_caching(struct dma_pool *pool,
309 				 struct page **pages, unsigned cpages)
310 {
311 	int r = 0;
312 	/* Set page caching */
313 	if (pool->type & IS_UC) {
314 		r = set_pages_array_uc(pages, cpages);
315 		if (r)
316 			pr_err(TTM_PFX
317 			       "%s: Failed to set %d pages to uc!\n",
318 			       pool->dev_name, cpages);
319 	}
320 	if (pool->type & IS_WC) {
321 		r = set_pages_array_wc(pages, cpages);
322 		if (r)
323 			pr_err(TTM_PFX
324 			       "%s: Failed to set %d pages to wc!\n",
325 			       pool->dev_name, cpages);
326 	}
327 	return r;
328 }
329 
__ttm_dma_free_page(struct dma_pool * pool,struct dma_page * d_page)330 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
331 {
332 	dma_addr_t dma = d_page->dma;
333 	dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
334 
335 	kfree(d_page);
336 	d_page = NULL;
337 }
__ttm_dma_alloc_page(struct dma_pool * pool)338 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
339 {
340 	struct dma_page *d_page;
341 
342 	d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
343 	if (!d_page)
344 		return NULL;
345 
346 	d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
347 					   &d_page->dma,
348 					   pool->gfp_flags);
349 	if (d_page->vaddr)
350 		d_page->p = virt_to_page(d_page->vaddr);
351 	else {
352 		kfree(d_page);
353 		d_page = NULL;
354 	}
355 	return d_page;
356 }
ttm_to_type(int flags,enum ttm_caching_state cstate)357 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
358 {
359 	enum pool_type type = IS_UNDEFINED;
360 
361 	if (flags & TTM_PAGE_FLAG_DMA32)
362 		type |= IS_DMA32;
363 	if (cstate == tt_cached)
364 		type |= IS_CACHED;
365 	else if (cstate == tt_uncached)
366 		type |= IS_UC;
367 	else
368 		type |= IS_WC;
369 
370 	return type;
371 }
372 
ttm_pool_update_free_locked(struct dma_pool * pool,unsigned freed_pages)373 static void ttm_pool_update_free_locked(struct dma_pool *pool,
374 					unsigned freed_pages)
375 {
376 	pool->npages_free -= freed_pages;
377 	pool->nfrees += freed_pages;
378 
379 }
380 
381 /* set memory back to wb and free the pages. */
ttm_dma_pages_put(struct dma_pool * pool,struct list_head * d_pages,struct page * pages[],unsigned npages)382 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
383 			      struct page *pages[], unsigned npages)
384 {
385 	struct dma_page *d_page, *tmp;
386 
387 	/* Don't set WB on WB page pool. */
388 	if (npages && !(pool->type & IS_CACHED) &&
389 	    set_pages_array_wb(pages, npages))
390 		pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
391 			pool->dev_name, npages);
392 
393 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
394 		list_del(&d_page->page_list);
395 		__ttm_dma_free_page(pool, d_page);
396 	}
397 }
398 
ttm_dma_page_put(struct dma_pool * pool,struct dma_page * d_page)399 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
400 {
401 	/* Don't set WB on WB page pool. */
402 	if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
403 		pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
404 			pool->dev_name, 1);
405 
406 	list_del(&d_page->page_list);
407 	__ttm_dma_free_page(pool, d_page);
408 }
409 
410 /*
411  * Free pages from pool.
412  *
413  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
414  * number of pages in one go.
415  *
416  * @pool: to free the pages from
417  * @nr_free: If set to true will free all pages in pool
418  **/
ttm_dma_page_pool_free(struct dma_pool * pool,unsigned nr_free)419 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
420 {
421 	unsigned long irq_flags;
422 	struct dma_page *dma_p, *tmp;
423 	struct page **pages_to_free;
424 	struct list_head d_pages;
425 	unsigned freed_pages = 0,
426 		 npages_to_free = nr_free;
427 
428 	if (NUM_PAGES_TO_ALLOC < nr_free)
429 		npages_to_free = NUM_PAGES_TO_ALLOC;
430 #if 0
431 	if (nr_free > 1) {
432 		pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
433 			pool->dev_name, pool->name, current->pid,
434 			npages_to_free, nr_free);
435 	}
436 #endif
437 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
438 			GFP_KERNEL);
439 
440 	if (!pages_to_free) {
441 		pr_err(TTM_PFX
442 		       "%s: Failed to allocate memory for pool free operation.\n",
443 			pool->dev_name);
444 		return 0;
445 	}
446 	INIT_LIST_HEAD(&d_pages);
447 restart:
448 	spin_lock_irqsave(&pool->lock, irq_flags);
449 
450 	/* We picking the oldest ones off the list */
451 	list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
452 					 page_list) {
453 		if (freed_pages >= npages_to_free)
454 			break;
455 
456 		/* Move the dma_page from one list to another. */
457 		list_move(&dma_p->page_list, &d_pages);
458 
459 		pages_to_free[freed_pages++] = dma_p->p;
460 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
461 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
462 
463 			ttm_pool_update_free_locked(pool, freed_pages);
464 			/**
465 			 * Because changing page caching is costly
466 			 * we unlock the pool to prevent stalling.
467 			 */
468 			spin_unlock_irqrestore(&pool->lock, irq_flags);
469 
470 			ttm_dma_pages_put(pool, &d_pages, pages_to_free,
471 					  freed_pages);
472 
473 			INIT_LIST_HEAD(&d_pages);
474 
475 			if (likely(nr_free != FREE_ALL_PAGES))
476 				nr_free -= freed_pages;
477 
478 			if (NUM_PAGES_TO_ALLOC >= nr_free)
479 				npages_to_free = nr_free;
480 			else
481 				npages_to_free = NUM_PAGES_TO_ALLOC;
482 
483 			freed_pages = 0;
484 
485 			/* free all so restart the processing */
486 			if (nr_free)
487 				goto restart;
488 
489 			/* Not allowed to fall through or break because
490 			 * following context is inside spinlock while we are
491 			 * outside here.
492 			 */
493 			goto out;
494 
495 		}
496 	}
497 
498 	/* remove range of pages from the pool */
499 	if (freed_pages) {
500 		ttm_pool_update_free_locked(pool, freed_pages);
501 		nr_free -= freed_pages;
502 	}
503 
504 	spin_unlock_irqrestore(&pool->lock, irq_flags);
505 
506 	if (freed_pages)
507 		ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
508 out:
509 	kfree(pages_to_free);
510 	return nr_free;
511 }
512 
ttm_dma_free_pool(struct device * dev,enum pool_type type)513 static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
514 {
515 	struct device_pools *p;
516 	struct dma_pool *pool;
517 
518 	if (!dev)
519 		return;
520 
521 	mutex_lock(&_manager->lock);
522 	list_for_each_entry_reverse(p, &_manager->pools, pools) {
523 		if (p->dev != dev)
524 			continue;
525 		pool = p->pool;
526 		if (pool->type != type)
527 			continue;
528 
529 		list_del(&p->pools);
530 		kfree(p);
531 		_manager->npools--;
532 		break;
533 	}
534 	list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
535 		if (pool->type != type)
536 			continue;
537 		/* Takes a spinlock.. */
538 		ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
539 		WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
540 		/* This code path is called after _all_ references to the
541 		 * struct device has been dropped - so nobody should be
542 		 * touching it. In case somebody is trying to _add_ we are
543 		 * guarded by the mutex. */
544 		list_del(&pool->pools);
545 		kfree(pool);
546 		break;
547 	}
548 	mutex_unlock(&_manager->lock);
549 }
550 
551 /*
552  * On free-ing of the 'struct device' this deconstructor is run.
553  * Albeit the pool might have already been freed earlier.
554  */
ttm_dma_pool_release(struct device * dev,void * res)555 static void ttm_dma_pool_release(struct device *dev, void *res)
556 {
557 	struct dma_pool *pool = *(struct dma_pool **)res;
558 
559 	if (pool)
560 		ttm_dma_free_pool(dev, pool->type);
561 }
562 
ttm_dma_pool_match(struct device * dev,void * res,void * match_data)563 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
564 {
565 	return *(struct dma_pool **)res == match_data;
566 }
567 
ttm_dma_pool_init(struct device * dev,gfp_t flags,enum pool_type type)568 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
569 					  enum pool_type type)
570 {
571 	char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
572 	enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
573 	struct device_pools *sec_pool = NULL;
574 	struct dma_pool *pool = NULL, **ptr;
575 	unsigned i;
576 	int ret = -ENODEV;
577 	char *p;
578 
579 	if (!dev)
580 		return NULL;
581 
582 	ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
583 	if (!ptr)
584 		return NULL;
585 
586 	ret = -ENOMEM;
587 
588 	pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
589 			    dev_to_node(dev));
590 	if (!pool)
591 		goto err_mem;
592 
593 	sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
594 				dev_to_node(dev));
595 	if (!sec_pool)
596 		goto err_mem;
597 
598 	INIT_LIST_HEAD(&sec_pool->pools);
599 	sec_pool->dev = dev;
600 	sec_pool->pool =  pool;
601 
602 	INIT_LIST_HEAD(&pool->free_list);
603 	INIT_LIST_HEAD(&pool->inuse_list);
604 	INIT_LIST_HEAD(&pool->pools);
605 	spin_lock_init(&pool->lock);
606 	pool->dev = dev;
607 	pool->npages_free = pool->npages_in_use = 0;
608 	pool->nfrees = 0;
609 	pool->gfp_flags = flags;
610 	pool->size = PAGE_SIZE;
611 	pool->type = type;
612 	pool->nrefills = 0;
613 	p = pool->name;
614 	for (i = 0; i < 5; i++) {
615 		if (type & t[i]) {
616 			p += snprintf(p, sizeof(pool->name) - (p - pool->name),
617 				      "%s", n[i]);
618 		}
619 	}
620 	*p = 0;
621 	/* We copy the name for pr_ calls b/c when dma_pool_destroy is called
622 	 * - the kobj->name has already been deallocated.*/
623 	snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
624 		 dev_driver_string(dev), dev_name(dev));
625 	mutex_lock(&_manager->lock);
626 	/* You can get the dma_pool from either the global: */
627 	list_add(&sec_pool->pools, &_manager->pools);
628 	_manager->npools++;
629 	/* or from 'struct device': */
630 	list_add(&pool->pools, &dev->dma_pools);
631 	mutex_unlock(&_manager->lock);
632 
633 	*ptr = pool;
634 	devres_add(dev, ptr);
635 
636 	return pool;
637 err_mem:
638 	devres_free(ptr);
639 	kfree(sec_pool);
640 	kfree(pool);
641 	return ERR_PTR(ret);
642 }
643 
ttm_dma_find_pool(struct device * dev,enum pool_type type)644 static struct dma_pool *ttm_dma_find_pool(struct device *dev,
645 					  enum pool_type type)
646 {
647 	struct dma_pool *pool, *tmp, *found = NULL;
648 
649 	if (type == IS_UNDEFINED)
650 		return found;
651 
652 	/* NB: We iterate on the 'struct dev' which has no spinlock, but
653 	 * it does have a kref which we have taken. The kref is taken during
654 	 * graphic driver loading - in the drm_pci_init it calls either
655 	 * pci_dev_get or pci_register_driver which both end up taking a kref
656 	 * on 'struct device'.
657 	 *
658 	 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
659 	 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
660 	 * thing is at that point of time there are no pages associated with the
661 	 * driver so this function will not be called.
662 	 */
663 	list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
664 		if (pool->type != type)
665 			continue;
666 		found = pool;
667 		break;
668 	}
669 	return found;
670 }
671 
672 /*
673  * Free pages the pages that failed to change the caching state. If there
674  * are pages that have changed their caching state already put them to the
675  * pool.
676  */
ttm_dma_handle_caching_state_failure(struct dma_pool * pool,struct list_head * d_pages,struct page ** failed_pages,unsigned cpages)677 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
678 						 struct list_head *d_pages,
679 						 struct page **failed_pages,
680 						 unsigned cpages)
681 {
682 	struct dma_page *d_page, *tmp;
683 	struct page *p;
684 	unsigned i = 0;
685 
686 	p = failed_pages[0];
687 	if (!p)
688 		return;
689 	/* Find the failed page. */
690 	list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
691 		if (d_page->p != p)
692 			continue;
693 		/* .. and then progress over the full list. */
694 		list_del(&d_page->page_list);
695 		__ttm_dma_free_page(pool, d_page);
696 		if (++i < cpages)
697 			p = failed_pages[i];
698 		else
699 			break;
700 	}
701 
702 }
703 
704 /*
705  * Allocate 'count' pages, and put 'need' number of them on the
706  * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
707  * The full list of pages should also be on 'd_pages'.
708  * We return zero for success, and negative numbers as errors.
709  */
ttm_dma_pool_alloc_new_pages(struct dma_pool * pool,struct list_head * d_pages,unsigned count)710 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
711 					struct list_head *d_pages,
712 					unsigned count)
713 {
714 	struct page **caching_array;
715 	struct dma_page *dma_p;
716 	struct page *p;
717 	int r = 0;
718 	unsigned i, cpages;
719 	unsigned max_cpages = min(count,
720 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
721 
722 	/* allocate array for page caching change */
723 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
724 
725 	if (!caching_array) {
726 		pr_err(TTM_PFX
727 		       "%s: Unable to allocate table for new pages.",
728 			pool->dev_name);
729 		return -ENOMEM;
730 	}
731 
732 	if (count > 1) {
733 		pr_debug("%s: (%s:%d) Getting %d pages\n",
734 			pool->dev_name, pool->name, current->pid,
735 			count);
736 	}
737 
738 	for (i = 0, cpages = 0; i < count; ++i) {
739 		dma_p = __ttm_dma_alloc_page(pool);
740 		if (!dma_p) {
741 			pr_err(TTM_PFX "%s: Unable to get page %u.\n",
742 				pool->dev_name, i);
743 
744 			/* store already allocated pages in the pool after
745 			 * setting the caching state */
746 			if (cpages) {
747 				r = ttm_set_pages_caching(pool, caching_array,
748 							  cpages);
749 				if (r)
750 					ttm_dma_handle_caching_state_failure(
751 						pool, d_pages, caching_array,
752 						cpages);
753 			}
754 			r = -ENOMEM;
755 			goto out;
756 		}
757 		p = dma_p->p;
758 #ifdef CONFIG_HIGHMEM
759 		/* gfp flags of highmem page should never be dma32 so we
760 		 * we should be fine in such case
761 		 */
762 		if (!PageHighMem(p))
763 #endif
764 		{
765 			caching_array[cpages++] = p;
766 			if (cpages == max_cpages) {
767 				/* Note: Cannot hold the spinlock */
768 				r = ttm_set_pages_caching(pool, caching_array,
769 						 cpages);
770 				if (r) {
771 					ttm_dma_handle_caching_state_failure(
772 						pool, d_pages, caching_array,
773 						cpages);
774 					goto out;
775 				}
776 				cpages = 0;
777 			}
778 		}
779 		list_add(&dma_p->page_list, d_pages);
780 	}
781 
782 	if (cpages) {
783 		r = ttm_set_pages_caching(pool, caching_array, cpages);
784 		if (r)
785 			ttm_dma_handle_caching_state_failure(pool, d_pages,
786 					caching_array, cpages);
787 	}
788 out:
789 	kfree(caching_array);
790 	return r;
791 }
792 
793 /*
794  * @return count of pages still required to fulfill the request.
795  */
ttm_dma_page_pool_fill_locked(struct dma_pool * pool,unsigned long * irq_flags)796 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
797 					 unsigned long *irq_flags)
798 {
799 	unsigned count = _manager->options.small;
800 	int r = pool->npages_free;
801 
802 	if (count > pool->npages_free) {
803 		struct list_head d_pages;
804 
805 		INIT_LIST_HEAD(&d_pages);
806 
807 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
808 
809 		/* Returns how many more are neccessary to fulfill the
810 		 * request. */
811 		r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
812 
813 		spin_lock_irqsave(&pool->lock, *irq_flags);
814 		if (!r) {
815 			/* Add the fresh to the end.. */
816 			list_splice(&d_pages, &pool->free_list);
817 			++pool->nrefills;
818 			pool->npages_free += count;
819 			r = count;
820 		} else {
821 			struct dma_page *d_page;
822 			unsigned cpages = 0;
823 
824 			pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
825 				pool->dev_name, pool->name, r);
826 
827 			list_for_each_entry(d_page, &d_pages, page_list) {
828 				cpages++;
829 			}
830 			list_splice_tail(&d_pages, &pool->free_list);
831 			pool->npages_free += cpages;
832 			r = cpages;
833 		}
834 	}
835 	return r;
836 }
837 
838 /*
839  * @return count of pages still required to fulfill the request.
840  * The populate list is actually a stack (not that is matters as TTM
841  * allocates one page at a time.
842  */
ttm_dma_pool_get_pages(struct dma_pool * pool,struct ttm_dma_tt * ttm_dma,unsigned index)843 static int ttm_dma_pool_get_pages(struct dma_pool *pool,
844 				  struct ttm_dma_tt *ttm_dma,
845 				  unsigned index)
846 {
847 	struct dma_page *d_page;
848 	struct ttm_tt *ttm = &ttm_dma->ttm;
849 	unsigned long irq_flags;
850 	int count, r = -ENOMEM;
851 
852 	spin_lock_irqsave(&pool->lock, irq_flags);
853 	count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
854 	if (count) {
855 		d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
856 		ttm->pages[index] = d_page->p;
857 		ttm_dma->dma_address[index] = d_page->dma;
858 		list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
859 		r = 0;
860 		pool->npages_in_use += 1;
861 		pool->npages_free -= 1;
862 	}
863 	spin_unlock_irqrestore(&pool->lock, irq_flags);
864 	return r;
865 }
866 
867 /*
868  * On success pages list will hold count number of correctly
869  * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
870  */
ttm_dma_populate(struct ttm_dma_tt * ttm_dma,struct device * dev)871 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
872 {
873 	struct ttm_tt *ttm = &ttm_dma->ttm;
874 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
875 	struct dma_pool *pool;
876 	enum pool_type type;
877 	unsigned i;
878 	gfp_t gfp_flags;
879 	int ret;
880 
881 	if (ttm->state != tt_unpopulated)
882 		return 0;
883 
884 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
885 	if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
886 		gfp_flags = GFP_USER | GFP_DMA32;
887 	else
888 		gfp_flags = GFP_HIGHUSER;
889 	if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
890 		gfp_flags |= __GFP_ZERO;
891 
892 	pool = ttm_dma_find_pool(dev, type);
893 	if (!pool) {
894 		pool = ttm_dma_pool_init(dev, gfp_flags, type);
895 		if (IS_ERR_OR_NULL(pool)) {
896 			return -ENOMEM;
897 		}
898 	}
899 
900 	INIT_LIST_HEAD(&ttm_dma->pages_list);
901 	for (i = 0; i < ttm->num_pages; ++i) {
902 		ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
903 		if (ret != 0) {
904 			ttm_dma_unpopulate(ttm_dma, dev);
905 			return -ENOMEM;
906 		}
907 
908 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
909 						false, false);
910 		if (unlikely(ret != 0)) {
911 			ttm_dma_unpopulate(ttm_dma, dev);
912 			return -ENOMEM;
913 		}
914 	}
915 
916 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
917 		ret = ttm_tt_swapin(ttm);
918 		if (unlikely(ret != 0)) {
919 			ttm_dma_unpopulate(ttm_dma, dev);
920 			return ret;
921 		}
922 	}
923 
924 	ttm->state = tt_unbound;
925 	return 0;
926 }
927 EXPORT_SYMBOL_GPL(ttm_dma_populate);
928 
929 /* Get good estimation how many pages are free in pools */
ttm_dma_pool_get_num_unused_pages(void)930 static int ttm_dma_pool_get_num_unused_pages(void)
931 {
932 	struct device_pools *p;
933 	unsigned total = 0;
934 
935 	mutex_lock(&_manager->lock);
936 	list_for_each_entry(p, &_manager->pools, pools)
937 		total += p->pool->npages_free;
938 	mutex_unlock(&_manager->lock);
939 	return total;
940 }
941 
942 /* Put all pages in pages list to correct pool to wait for reuse */
ttm_dma_unpopulate(struct ttm_dma_tt * ttm_dma,struct device * dev)943 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
944 {
945 	struct ttm_tt *ttm = &ttm_dma->ttm;
946 	struct dma_pool *pool;
947 	struct dma_page *d_page, *next;
948 	enum pool_type type;
949 	bool is_cached = false;
950 	unsigned count = 0, i, npages = 0;
951 	unsigned long irq_flags;
952 
953 	type = ttm_to_type(ttm->page_flags, ttm->caching_state);
954 	pool = ttm_dma_find_pool(dev, type);
955 	if (!pool)
956 		return;
957 
958 	is_cached = (ttm_dma_find_pool(pool->dev,
959 		     ttm_to_type(ttm->page_flags, tt_cached)) == pool);
960 
961 	/* make sure pages array match list and count number of pages */
962 	list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
963 		ttm->pages[count] = d_page->p;
964 		count++;
965 	}
966 
967 	spin_lock_irqsave(&pool->lock, irq_flags);
968 	pool->npages_in_use -= count;
969 	if (is_cached) {
970 		pool->nfrees += count;
971 	} else {
972 		pool->npages_free += count;
973 		list_splice(&ttm_dma->pages_list, &pool->free_list);
974 		npages = count;
975 		if (pool->npages_free > _manager->options.max_size) {
976 			npages = pool->npages_free - _manager->options.max_size;
977 			/* free at least NUM_PAGES_TO_ALLOC number of pages
978 			 * to reduce calls to set_memory_wb */
979 			if (npages < NUM_PAGES_TO_ALLOC)
980 				npages = NUM_PAGES_TO_ALLOC;
981 		}
982 	}
983 	spin_unlock_irqrestore(&pool->lock, irq_flags);
984 
985 	if (is_cached) {
986 		list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
987 			ttm_mem_global_free_page(ttm->glob->mem_glob,
988 						 d_page->p);
989 			ttm_dma_page_put(pool, d_page);
990 		}
991 	} else {
992 		for (i = 0; i < count; i++) {
993 			ttm_mem_global_free_page(ttm->glob->mem_glob,
994 						 ttm->pages[i]);
995 		}
996 	}
997 
998 	INIT_LIST_HEAD(&ttm_dma->pages_list);
999 	for (i = 0; i < ttm->num_pages; i++) {
1000 		ttm->pages[i] = NULL;
1001 		ttm_dma->dma_address[i] = 0;
1002 	}
1003 
1004 	/* shrink pool if necessary (only on !is_cached pools)*/
1005 	if (npages)
1006 		ttm_dma_page_pool_free(pool, npages);
1007 	ttm->state = tt_unpopulated;
1008 }
1009 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1010 
1011 /**
1012  * Callback for mm to request pool to reduce number of page held.
1013  */
ttm_dma_pool_mm_shrink(struct shrinker * shrink,struct shrink_control * sc)1014 static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
1015 				  struct shrink_control *sc)
1016 {
1017 	static atomic_t start_pool = ATOMIC_INIT(0);
1018 	unsigned idx = 0;
1019 	unsigned pool_offset = atomic_add_return(1, &start_pool);
1020 	unsigned shrink_pages = sc->nr_to_scan;
1021 	struct device_pools *p;
1022 
1023 	if (list_empty(&_manager->pools))
1024 		return 0;
1025 
1026 	mutex_lock(&_manager->lock);
1027 	pool_offset = pool_offset % _manager->npools;
1028 	list_for_each_entry(p, &_manager->pools, pools) {
1029 		unsigned nr_free;
1030 
1031 		if (!p->dev)
1032 			continue;
1033 		if (shrink_pages == 0)
1034 			break;
1035 		/* Do it in round-robin fashion. */
1036 		if (++idx < pool_offset)
1037 			continue;
1038 		nr_free = shrink_pages;
1039 		shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
1040 		pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1041 			p->pool->dev_name, p->pool->name, current->pid, nr_free,
1042 			shrink_pages);
1043 	}
1044 	mutex_unlock(&_manager->lock);
1045 	/* return estimated number of unused pages in pool */
1046 	return ttm_dma_pool_get_num_unused_pages();
1047 }
1048 
ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager * manager)1049 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1050 {
1051 	manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
1052 	manager->mm_shrink.seeks = 1;
1053 	register_shrinker(&manager->mm_shrink);
1054 }
1055 
ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager * manager)1056 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1057 {
1058 	unregister_shrinker(&manager->mm_shrink);
1059 }
1060 
ttm_dma_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)1061 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1062 {
1063 	int ret = -ENOMEM;
1064 
1065 	WARN_ON(_manager);
1066 
1067 	printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
1068 
1069 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1070 	if (!_manager)
1071 		goto err_manager;
1072 
1073 	mutex_init(&_manager->lock);
1074 	INIT_LIST_HEAD(&_manager->pools);
1075 
1076 	_manager->options.max_size = max_pages;
1077 	_manager->options.small = SMALL_ALLOCATION;
1078 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1079 
1080 	/* This takes care of auto-freeing the _manager */
1081 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1082 				   &glob->kobj, "dma_pool");
1083 	if (unlikely(ret != 0)) {
1084 		kobject_put(&_manager->kobj);
1085 		goto err;
1086 	}
1087 	ttm_dma_pool_mm_shrink_init(_manager);
1088 	return 0;
1089 err_manager:
1090 	kfree(_manager);
1091 	_manager = NULL;
1092 err:
1093 	return ret;
1094 }
1095 
ttm_dma_page_alloc_fini(void)1096 void ttm_dma_page_alloc_fini(void)
1097 {
1098 	struct device_pools *p, *t;
1099 
1100 	printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
1101 	ttm_dma_pool_mm_shrink_fini(_manager);
1102 
1103 	list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1104 		dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1105 			current->pid);
1106 		WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1107 			ttm_dma_pool_match, p->pool));
1108 		ttm_dma_free_pool(p->dev, p->pool->type);
1109 	}
1110 	kobject_put(&_manager->kobj);
1111 	_manager = NULL;
1112 }
1113 
ttm_dma_page_alloc_debugfs(struct seq_file * m,void * data)1114 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1115 {
1116 	struct device_pools *p;
1117 	struct dma_pool *pool = NULL;
1118 	char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1119 		     "name", "virt", "busaddr"};
1120 
1121 	if (!_manager) {
1122 		seq_printf(m, "No pool allocator running.\n");
1123 		return 0;
1124 	}
1125 	seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1126 		   h[0], h[1], h[2], h[3], h[4], h[5]);
1127 	mutex_lock(&_manager->lock);
1128 	list_for_each_entry(p, &_manager->pools, pools) {
1129 		struct device *dev = p->dev;
1130 		if (!dev)
1131 			continue;
1132 		pool = p->pool;
1133 		seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1134 				pool->name, pool->nrefills,
1135 				pool->nfrees, pool->npages_in_use,
1136 				pool->npages_free,
1137 				pool->dev_name);
1138 	}
1139 	mutex_unlock(&_manager->lock);
1140 	return 0;
1141 }
1142 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1143