1 /*
2  * Copyright (c) Red Hat Inc.
3 
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the
12  * next paragraph) shall be included in all copies or substantial portions
13  * of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors: Dave Airlie <airlied@redhat.com>
24  *          Jerome Glisse <jglisse@redhat.com>
25  *          Pauli Nieminen <suokkos@gmail.com>
26  */
27 
28 /* simple list based uncached page pool
29  * - Pool collects resently freed pages for reuse
30  * - Use page->lru to keep a free list
31  * - doesn't track currently in use pages
32  */
33 #include <linux/list.h>
34 #include <linux/spinlock.h>
35 #include <linux/highmem.h>
36 #include <linux/mm_types.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39 #include <linux/seq_file.h> /* for seq_printf */
40 #include <linux/slab.h>
41 #include <linux/dma-mapping.h>
42 
43 #include <linux/atomic.h>
44 
45 #include "ttm/ttm_bo_driver.h"
46 #include "ttm/ttm_page_alloc.h"
47 
48 #ifdef TTM_HAS_AGP
49 #include <asm/agp.h>
50 #endif
51 
52 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
53 #define SMALL_ALLOCATION		16
54 #define FREE_ALL_PAGES			(~0U)
55 /* times are in msecs */
56 #define PAGE_FREE_INTERVAL		1000
57 
58 /**
59  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
60  *
61  * @lock: Protects the shared pool from concurrnet access. Must be used with
62  * irqsave/irqrestore variants because pool allocator maybe called from
63  * delayed work.
64  * @fill_lock: Prevent concurrent calls to fill.
65  * @list: Pool of free uc/wc pages for fast reuse.
66  * @gfp_flags: Flags to pass for alloc_page.
67  * @npages: Number of pages in pool.
68  */
69 struct ttm_page_pool {
70 	spinlock_t		lock;
71 	bool			fill_lock;
72 	struct list_head	list;
73 	gfp_t			gfp_flags;
74 	unsigned		npages;
75 	char			*name;
76 	unsigned long		nfrees;
77 	unsigned long		nrefills;
78 };
79 
80 /**
81  * Limits for the pool. They are handled without locks because only place where
82  * they may change is in sysfs store. They won't have immediate effect anyway
83  * so forcing serialization to access them is pointless.
84  */
85 
86 struct ttm_pool_opts {
87 	unsigned	alloc_size;
88 	unsigned	max_size;
89 	unsigned	small;
90 };
91 
92 #define NUM_POOLS 4
93 
94 /**
95  * struct ttm_pool_manager - Holds memory pools for fst allocation
96  *
97  * Manager is read only object for pool code so it doesn't need locking.
98  *
99  * @free_interval: minimum number of jiffies between freeing pages from pool.
100  * @page_alloc_inited: reference counting for pool allocation.
101  * @work: Work that is used to shrink the pool. Work is only run when there is
102  * some pages to free.
103  * @small_allocation: Limit in number of pages what is small allocation.
104  *
105  * @pools: All pool objects in use.
106  **/
107 struct ttm_pool_manager {
108 	struct kobject		kobj;
109 	struct shrinker		mm_shrink;
110 	struct ttm_pool_opts	options;
111 
112 	union {
113 		struct ttm_page_pool	pools[NUM_POOLS];
114 		struct {
115 			struct ttm_page_pool	wc_pool;
116 			struct ttm_page_pool	uc_pool;
117 			struct ttm_page_pool	wc_pool_dma32;
118 			struct ttm_page_pool	uc_pool_dma32;
119 		} ;
120 	};
121 };
122 
123 static struct attribute ttm_page_pool_max = {
124 	.name = "pool_max_size",
125 	.mode = S_IRUGO | S_IWUSR
126 };
127 static struct attribute ttm_page_pool_small = {
128 	.name = "pool_small_allocation",
129 	.mode = S_IRUGO | S_IWUSR
130 };
131 static struct attribute ttm_page_pool_alloc_size = {
132 	.name = "pool_allocation_size",
133 	.mode = S_IRUGO | S_IWUSR
134 };
135 
136 static struct attribute *ttm_pool_attrs[] = {
137 	&ttm_page_pool_max,
138 	&ttm_page_pool_small,
139 	&ttm_page_pool_alloc_size,
140 	NULL
141 };
142 
ttm_pool_kobj_release(struct kobject * kobj)143 static void ttm_pool_kobj_release(struct kobject *kobj)
144 {
145 	struct ttm_pool_manager *m =
146 		container_of(kobj, struct ttm_pool_manager, kobj);
147 	kfree(m);
148 }
149 
ttm_pool_store(struct kobject * kobj,struct attribute * attr,const char * buffer,size_t size)150 static ssize_t ttm_pool_store(struct kobject *kobj,
151 		struct attribute *attr, const char *buffer, size_t size)
152 {
153 	struct ttm_pool_manager *m =
154 		container_of(kobj, struct ttm_pool_manager, kobj);
155 	int chars;
156 	unsigned val;
157 	chars = sscanf(buffer, "%u", &val);
158 	if (chars == 0)
159 		return size;
160 
161 	/* Convert kb to number of pages */
162 	val = val / (PAGE_SIZE >> 10);
163 
164 	if (attr == &ttm_page_pool_max)
165 		m->options.max_size = val;
166 	else if (attr == &ttm_page_pool_small)
167 		m->options.small = val;
168 	else if (attr == &ttm_page_pool_alloc_size) {
169 		if (val > NUM_PAGES_TO_ALLOC*8) {
170 			printk(KERN_ERR TTM_PFX
171 			       "Setting allocation size to %lu "
172 			       "is not allowed. Recommended size is "
173 			       "%lu\n",
174 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
176 			return size;
177 		} else if (val > NUM_PAGES_TO_ALLOC) {
178 			printk(KERN_WARNING TTM_PFX
179 			       "Setting allocation size to "
180 			       "larger than %lu is not recommended.\n",
181 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
182 		}
183 		m->options.alloc_size = val;
184 	}
185 
186 	return size;
187 }
188 
ttm_pool_show(struct kobject * kobj,struct attribute * attr,char * buffer)189 static ssize_t ttm_pool_show(struct kobject *kobj,
190 		struct attribute *attr, char *buffer)
191 {
192 	struct ttm_pool_manager *m =
193 		container_of(kobj, struct ttm_pool_manager, kobj);
194 	unsigned val = 0;
195 
196 	if (attr == &ttm_page_pool_max)
197 		val = m->options.max_size;
198 	else if (attr == &ttm_page_pool_small)
199 		val = m->options.small;
200 	else if (attr == &ttm_page_pool_alloc_size)
201 		val = m->options.alloc_size;
202 
203 	val = val * (PAGE_SIZE >> 10);
204 
205 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
206 }
207 
208 static const struct sysfs_ops ttm_pool_sysfs_ops = {
209 	.show = &ttm_pool_show,
210 	.store = &ttm_pool_store,
211 };
212 
213 static struct kobj_type ttm_pool_kobj_type = {
214 	.release = &ttm_pool_kobj_release,
215 	.sysfs_ops = &ttm_pool_sysfs_ops,
216 	.default_attrs = ttm_pool_attrs,
217 };
218 
219 static struct ttm_pool_manager *_manager;
220 
221 #ifndef CONFIG_X86
set_pages_array_wb(struct page ** pages,int addrinarray)222 static int set_pages_array_wb(struct page **pages, int addrinarray)
223 {
224 #ifdef TTM_HAS_AGP
225 	int i;
226 
227 	for (i = 0; i < addrinarray; i++)
228 		unmap_page_from_agp(pages[i]);
229 #endif
230 	return 0;
231 }
232 
set_pages_array_wc(struct page ** pages,int addrinarray)233 static int set_pages_array_wc(struct page **pages, int addrinarray)
234 {
235 #ifdef TTM_HAS_AGP
236 	int i;
237 
238 	for (i = 0; i < addrinarray; i++)
239 		map_page_into_agp(pages[i]);
240 #endif
241 	return 0;
242 }
243 
set_pages_array_uc(struct page ** pages,int addrinarray)244 static int set_pages_array_uc(struct page **pages, int addrinarray)
245 {
246 #ifdef TTM_HAS_AGP
247 	int i;
248 
249 	for (i = 0; i < addrinarray; i++)
250 		map_page_into_agp(pages[i]);
251 #endif
252 	return 0;
253 }
254 #endif
255 
256 /**
257  * Select the right pool or requested caching state and ttm flags. */
ttm_get_pool(int flags,enum ttm_caching_state cstate)258 static struct ttm_page_pool *ttm_get_pool(int flags,
259 		enum ttm_caching_state cstate)
260 {
261 	int pool_index;
262 
263 	if (cstate == tt_cached)
264 		return NULL;
265 
266 	if (cstate == tt_wc)
267 		pool_index = 0x0;
268 	else
269 		pool_index = 0x1;
270 
271 	if (flags & TTM_PAGE_FLAG_DMA32)
272 		pool_index |= 0x2;
273 
274 	return &_manager->pools[pool_index];
275 }
276 
277 /* set memory back to wb and free the pages. */
ttm_pages_put(struct page * pages[],unsigned npages)278 static void ttm_pages_put(struct page *pages[], unsigned npages)
279 {
280 	unsigned i;
281 	if (set_pages_array_wb(pages, npages))
282 		printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
283 				npages);
284 	for (i = 0; i < npages; ++i)
285 		__free_page(pages[i]);
286 }
287 
ttm_pool_update_free_locked(struct ttm_page_pool * pool,unsigned freed_pages)288 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
289 		unsigned freed_pages)
290 {
291 	pool->npages -= freed_pages;
292 	pool->nfrees += freed_pages;
293 }
294 
295 /**
296  * Free pages from pool.
297  *
298  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
299  * number of pages in one go.
300  *
301  * @pool: to free the pages from
302  * @free_all: If set to true will free all pages in pool
303  **/
ttm_page_pool_free(struct ttm_page_pool * pool,unsigned nr_free)304 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
305 {
306 	unsigned long irq_flags;
307 	struct page *p;
308 	struct page **pages_to_free;
309 	unsigned freed_pages = 0,
310 		 npages_to_free = nr_free;
311 
312 	if (NUM_PAGES_TO_ALLOC < nr_free)
313 		npages_to_free = NUM_PAGES_TO_ALLOC;
314 
315 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
316 			GFP_KERNEL);
317 	if (!pages_to_free) {
318 		printk(KERN_ERR TTM_PFX
319 		       "Failed to allocate memory for pool free operation.\n");
320 		return 0;
321 	}
322 
323 restart:
324 	spin_lock_irqsave(&pool->lock, irq_flags);
325 
326 	list_for_each_entry_reverse(p, &pool->list, lru) {
327 		if (freed_pages >= npages_to_free)
328 			break;
329 
330 		pages_to_free[freed_pages++] = p;
331 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
332 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
333 			/* remove range of pages from the pool */
334 			__list_del(p->lru.prev, &pool->list);
335 
336 			ttm_pool_update_free_locked(pool, freed_pages);
337 			/**
338 			 * Because changing page caching is costly
339 			 * we unlock the pool to prevent stalling.
340 			 */
341 			spin_unlock_irqrestore(&pool->lock, irq_flags);
342 
343 			ttm_pages_put(pages_to_free, freed_pages);
344 			if (likely(nr_free != FREE_ALL_PAGES))
345 				nr_free -= freed_pages;
346 
347 			if (NUM_PAGES_TO_ALLOC >= nr_free)
348 				npages_to_free = nr_free;
349 			else
350 				npages_to_free = NUM_PAGES_TO_ALLOC;
351 
352 			freed_pages = 0;
353 
354 			/* free all so restart the processing */
355 			if (nr_free)
356 				goto restart;
357 
358 			/* Not allowed to fall through or break because
359 			 * following context is inside spinlock while we are
360 			 * outside here.
361 			 */
362 			goto out;
363 
364 		}
365 	}
366 
367 	/* remove range of pages from the pool */
368 	if (freed_pages) {
369 		__list_del(&p->lru, &pool->list);
370 
371 		ttm_pool_update_free_locked(pool, freed_pages);
372 		nr_free -= freed_pages;
373 	}
374 
375 	spin_unlock_irqrestore(&pool->lock, irq_flags);
376 
377 	if (freed_pages)
378 		ttm_pages_put(pages_to_free, freed_pages);
379 out:
380 	kfree(pages_to_free);
381 	return nr_free;
382 }
383 
384 /* Get good estimation how many pages are free in pools */
ttm_pool_get_num_unused_pages(void)385 static int ttm_pool_get_num_unused_pages(void)
386 {
387 	unsigned i;
388 	int total = 0;
389 	for (i = 0; i < NUM_POOLS; ++i)
390 		total += _manager->pools[i].npages;
391 
392 	return total;
393 }
394 
395 /**
396  * Callback for mm to request pool to reduce number of page held.
397  */
ttm_pool_mm_shrink(struct shrinker * shrink,struct shrink_control * sc)398 static int ttm_pool_mm_shrink(struct shrinker *shrink,
399 			      struct shrink_control *sc)
400 {
401 	static atomic_t start_pool = ATOMIC_INIT(0);
402 	unsigned i;
403 	unsigned pool_offset = atomic_add_return(1, &start_pool);
404 	struct ttm_page_pool *pool;
405 	int shrink_pages = sc->nr_to_scan;
406 
407 	pool_offset = pool_offset % NUM_POOLS;
408 	/* select start pool in round robin fashion */
409 	for (i = 0; i < NUM_POOLS; ++i) {
410 		unsigned nr_free = shrink_pages;
411 		if (shrink_pages == 0)
412 			break;
413 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
414 		shrink_pages = ttm_page_pool_free(pool, nr_free);
415 	}
416 	/* return estimated number of unused pages in pool */
417 	return ttm_pool_get_num_unused_pages();
418 }
419 
ttm_pool_mm_shrink_init(struct ttm_pool_manager * manager)420 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
421 {
422 	manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
423 	manager->mm_shrink.seeks = 1;
424 	register_shrinker(&manager->mm_shrink);
425 }
426 
ttm_pool_mm_shrink_fini(struct ttm_pool_manager * manager)427 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
428 {
429 	unregister_shrinker(&manager->mm_shrink);
430 }
431 
ttm_set_pages_caching(struct page ** pages,enum ttm_caching_state cstate,unsigned cpages)432 static int ttm_set_pages_caching(struct page **pages,
433 		enum ttm_caching_state cstate, unsigned cpages)
434 {
435 	int r = 0;
436 	/* Set page caching */
437 	switch (cstate) {
438 	case tt_uncached:
439 		r = set_pages_array_uc(pages, cpages);
440 		if (r)
441 			printk(KERN_ERR TTM_PFX
442 			       "Failed to set %d pages to uc!\n",
443 			       cpages);
444 		break;
445 	case tt_wc:
446 		r = set_pages_array_wc(pages, cpages);
447 		if (r)
448 			printk(KERN_ERR TTM_PFX
449 			       "Failed to set %d pages to wc!\n",
450 			       cpages);
451 		break;
452 	default:
453 		break;
454 	}
455 	return r;
456 }
457 
458 /**
459  * Free pages the pages that failed to change the caching state. If there is
460  * any pages that have changed their caching state already put them to the
461  * pool.
462  */
ttm_handle_caching_state_failure(struct list_head * pages,int ttm_flags,enum ttm_caching_state cstate,struct page ** failed_pages,unsigned cpages)463 static void ttm_handle_caching_state_failure(struct list_head *pages,
464 		int ttm_flags, enum ttm_caching_state cstate,
465 		struct page **failed_pages, unsigned cpages)
466 {
467 	unsigned i;
468 	/* Failed pages have to be freed */
469 	for (i = 0; i < cpages; ++i) {
470 		list_del(&failed_pages[i]->lru);
471 		__free_page(failed_pages[i]);
472 	}
473 }
474 
475 /**
476  * Allocate new pages with correct caching.
477  *
478  * This function is reentrant if caller updates count depending on number of
479  * pages returned in pages array.
480  */
ttm_alloc_new_pages(struct list_head * pages,gfp_t gfp_flags,int ttm_flags,enum ttm_caching_state cstate,unsigned count)481 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
482 		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
483 {
484 	struct page **caching_array;
485 	struct page *p;
486 	int r = 0;
487 	unsigned i, cpages;
488 	unsigned max_cpages = min(count,
489 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
490 
491 	/* allocate array for page caching change */
492 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
493 
494 	if (!caching_array) {
495 		printk(KERN_ERR TTM_PFX
496 		       "Unable to allocate table for new pages.");
497 		return -ENOMEM;
498 	}
499 
500 	for (i = 0, cpages = 0; i < count; ++i) {
501 		p = alloc_page(gfp_flags);
502 
503 		if (!p) {
504 			printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
505 
506 			/* store already allocated pages in the pool after
507 			 * setting the caching state */
508 			if (cpages) {
509 				r = ttm_set_pages_caching(caching_array,
510 							  cstate, cpages);
511 				if (r)
512 					ttm_handle_caching_state_failure(pages,
513 						ttm_flags, cstate,
514 						caching_array, cpages);
515 			}
516 			r = -ENOMEM;
517 			goto out;
518 		}
519 
520 #ifdef CONFIG_HIGHMEM
521 		/* gfp flags of highmem page should never be dma32 so we
522 		 * we should be fine in such case
523 		 */
524 		if (!PageHighMem(p))
525 #endif
526 		{
527 			caching_array[cpages++] = p;
528 			if (cpages == max_cpages) {
529 
530 				r = ttm_set_pages_caching(caching_array,
531 						cstate, cpages);
532 				if (r) {
533 					ttm_handle_caching_state_failure(pages,
534 						ttm_flags, cstate,
535 						caching_array, cpages);
536 					goto out;
537 				}
538 				cpages = 0;
539 			}
540 		}
541 
542 		list_add(&p->lru, pages);
543 	}
544 
545 	if (cpages) {
546 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
547 		if (r)
548 			ttm_handle_caching_state_failure(pages,
549 					ttm_flags, cstate,
550 					caching_array, cpages);
551 	}
552 out:
553 	kfree(caching_array);
554 
555 	return r;
556 }
557 
558 /**
559  * Fill the given pool if there aren't enough pages and the requested number of
560  * pages is small.
561  */
ttm_page_pool_fill_locked(struct ttm_page_pool * pool,int ttm_flags,enum ttm_caching_state cstate,unsigned count,unsigned long * irq_flags)562 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
563 		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
564 		unsigned long *irq_flags)
565 {
566 	struct page *p;
567 	int r;
568 	unsigned cpages = 0;
569 	/**
570 	 * Only allow one pool fill operation at a time.
571 	 * If pool doesn't have enough pages for the allocation new pages are
572 	 * allocated from outside of pool.
573 	 */
574 	if (pool->fill_lock)
575 		return;
576 
577 	pool->fill_lock = true;
578 
579 	/* If allocation request is small and there are not enough
580 	 * pages in a pool we fill the pool up first. */
581 	if (count < _manager->options.small
582 		&& count > pool->npages) {
583 		struct list_head new_pages;
584 		unsigned alloc_size = _manager->options.alloc_size;
585 
586 		/**
587 		 * Can't change page caching if in irqsave context. We have to
588 		 * drop the pool->lock.
589 		 */
590 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
591 
592 		INIT_LIST_HEAD(&new_pages);
593 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
594 				cstate,	alloc_size);
595 		spin_lock_irqsave(&pool->lock, *irq_flags);
596 
597 		if (!r) {
598 			list_splice(&new_pages, &pool->list);
599 			++pool->nrefills;
600 			pool->npages += alloc_size;
601 		} else {
602 			printk(KERN_ERR TTM_PFX
603 			       "Failed to fill pool (%p).", pool);
604 			/* If we have any pages left put them to the pool. */
605 			list_for_each_entry(p, &pool->list, lru) {
606 				++cpages;
607 			}
608 			list_splice(&new_pages, &pool->list);
609 			pool->npages += cpages;
610 		}
611 
612 	}
613 	pool->fill_lock = false;
614 }
615 
616 /**
617  * Cut 'count' number of pages from the pool and put them on the return list.
618  *
619  * @return count of pages still required to fulfill the request.
620  */
ttm_page_pool_get_pages(struct ttm_page_pool * pool,struct list_head * pages,int ttm_flags,enum ttm_caching_state cstate,unsigned count)621 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
622 					struct list_head *pages,
623 					int ttm_flags,
624 					enum ttm_caching_state cstate,
625 					unsigned count)
626 {
627 	unsigned long irq_flags;
628 	struct list_head *p;
629 	unsigned i;
630 
631 	spin_lock_irqsave(&pool->lock, irq_flags);
632 	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
633 
634 	if (count >= pool->npages) {
635 		/* take all pages from the pool */
636 		list_splice_init(&pool->list, pages);
637 		count -= pool->npages;
638 		pool->npages = 0;
639 		goto out;
640 	}
641 	/* find the last pages to include for requested number of pages. Split
642 	 * pool to begin and halve it to reduce search space. */
643 	if (count <= pool->npages/2) {
644 		i = 0;
645 		list_for_each(p, &pool->list) {
646 			if (++i == count)
647 				break;
648 		}
649 	} else {
650 		i = pool->npages + 1;
651 		list_for_each_prev(p, &pool->list) {
652 			if (--i == count)
653 				break;
654 		}
655 	}
656 	/* Cut 'count' number of pages from the pool */
657 	list_cut_position(pages, &pool->list, p);
658 	pool->npages -= count;
659 	count = 0;
660 out:
661 	spin_unlock_irqrestore(&pool->lock, irq_flags);
662 	return count;
663 }
664 
665 /* Put all pages in pages list to correct pool to wait for reuse */
ttm_put_pages(struct page ** pages,unsigned npages,int flags,enum ttm_caching_state cstate)666 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
667 			  enum ttm_caching_state cstate)
668 {
669 	unsigned long irq_flags;
670 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
671 	unsigned i;
672 
673 	if (pool == NULL) {
674 		/* No pool for this memory type so free the pages */
675 		for (i = 0; i < npages; i++) {
676 			if (pages[i]) {
677 				if (page_count(pages[i]) != 1)
678 					printk(KERN_ERR TTM_PFX
679 					       "Erroneous page count. "
680 					       "Leaking pages.\n");
681 				__free_page(pages[i]);
682 				pages[i] = NULL;
683 			}
684 		}
685 		return;
686 	}
687 
688 	spin_lock_irqsave(&pool->lock, irq_flags);
689 	for (i = 0; i < npages; i++) {
690 		if (pages[i]) {
691 			if (page_count(pages[i]) != 1)
692 				printk(KERN_ERR TTM_PFX
693 				       "Erroneous page count. "
694 				       "Leaking pages.\n");
695 			list_add_tail(&pages[i]->lru, &pool->list);
696 			pages[i] = NULL;
697 			pool->npages++;
698 		}
699 	}
700 	/* Check that we don't go over the pool limit */
701 	npages = 0;
702 	if (pool->npages > _manager->options.max_size) {
703 		npages = pool->npages - _manager->options.max_size;
704 		/* free at least NUM_PAGES_TO_ALLOC number of pages
705 		 * to reduce calls to set_memory_wb */
706 		if (npages < NUM_PAGES_TO_ALLOC)
707 			npages = NUM_PAGES_TO_ALLOC;
708 	}
709 	spin_unlock_irqrestore(&pool->lock, irq_flags);
710 	if (npages)
711 		ttm_page_pool_free(pool, npages);
712 }
713 
714 /*
715  * On success pages list will hold count number of correctly
716  * cached pages.
717  */
ttm_get_pages(struct page ** pages,unsigned npages,int flags,enum ttm_caching_state cstate)718 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
719 			 enum ttm_caching_state cstate)
720 {
721 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
722 	struct list_head plist;
723 	struct page *p = NULL;
724 	gfp_t gfp_flags = GFP_USER;
725 	unsigned count;
726 	int r;
727 
728 	/* set zero flag for page allocation if required */
729 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
730 		gfp_flags |= __GFP_ZERO;
731 
732 	/* No pool for cached pages */
733 	if (pool == NULL) {
734 		if (flags & TTM_PAGE_FLAG_DMA32)
735 			gfp_flags |= GFP_DMA32;
736 		else
737 			gfp_flags |= GFP_HIGHUSER;
738 
739 		for (r = 0; r < npages; ++r) {
740 			p = alloc_page(gfp_flags);
741 			if (!p) {
742 
743 				printk(KERN_ERR TTM_PFX
744 				       "Unable to allocate page.");
745 				return -ENOMEM;
746 			}
747 
748 			pages[r] = p;
749 		}
750 		return 0;
751 	}
752 
753 	/* combine zero flag to pool flags */
754 	gfp_flags |= pool->gfp_flags;
755 
756 	/* First we take pages from the pool */
757 	INIT_LIST_HEAD(&plist);
758 	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
759 	count = 0;
760 	list_for_each_entry(p, &plist, lru) {
761 		pages[count++] = p;
762 	}
763 
764 	/* clear the pages coming from the pool if requested */
765 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
766 		list_for_each_entry(p, &plist, lru) {
767 			clear_page(page_address(p));
768 		}
769 	}
770 
771 	/* If pool didn't have enough pages allocate new one. */
772 	if (npages > 0) {
773 		/* ttm_alloc_new_pages doesn't reference pool so we can run
774 		 * multiple requests in parallel.
775 		 **/
776 		INIT_LIST_HEAD(&plist);
777 		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
778 		list_for_each_entry(p, &plist, lru) {
779 			pages[count++] = p;
780 		}
781 		if (r) {
782 			/* If there is any pages in the list put them back to
783 			 * the pool. */
784 			printk(KERN_ERR TTM_PFX
785 			       "Failed to allocate extra pages "
786 			       "for large request.");
787 			ttm_put_pages(pages, count, flags, cstate);
788 			return r;
789 		}
790 	}
791 
792 	return 0;
793 }
794 
ttm_page_pool_init_locked(struct ttm_page_pool * pool,int flags,char * name)795 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
796 		char *name)
797 {
798 	spin_lock_init(&pool->lock);
799 	pool->fill_lock = false;
800 	INIT_LIST_HEAD(&pool->list);
801 	pool->npages = pool->nfrees = 0;
802 	pool->gfp_flags = flags;
803 	pool->name = name;
804 }
805 
ttm_page_alloc_init(struct ttm_mem_global * glob,unsigned max_pages)806 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
807 {
808 	int ret;
809 
810 	WARN_ON(_manager);
811 
812 	printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
813 
814 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
815 
816 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
817 
818 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
819 
820 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
821 				  GFP_USER | GFP_DMA32, "wc dma");
822 
823 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
824 				  GFP_USER | GFP_DMA32, "uc dma");
825 
826 	_manager->options.max_size = max_pages;
827 	_manager->options.small = SMALL_ALLOCATION;
828 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
829 
830 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
831 				   &glob->kobj, "pool");
832 	if (unlikely(ret != 0)) {
833 		kobject_put(&_manager->kobj);
834 		_manager = NULL;
835 		return ret;
836 	}
837 
838 	ttm_pool_mm_shrink_init(_manager);
839 
840 	return 0;
841 }
842 
ttm_page_alloc_fini(void)843 void ttm_page_alloc_fini(void)
844 {
845 	int i;
846 
847 	printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
848 	ttm_pool_mm_shrink_fini(_manager);
849 
850 	for (i = 0; i < NUM_POOLS; ++i)
851 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
852 
853 	kobject_put(&_manager->kobj);
854 	_manager = NULL;
855 }
856 
ttm_pool_populate(struct ttm_tt * ttm)857 int ttm_pool_populate(struct ttm_tt *ttm)
858 {
859 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
860 	unsigned i;
861 	int ret;
862 
863 	if (ttm->state != tt_unpopulated)
864 		return 0;
865 
866 	for (i = 0; i < ttm->num_pages; ++i) {
867 		ret = ttm_get_pages(&ttm->pages[i], 1,
868 				    ttm->page_flags,
869 				    ttm->caching_state);
870 		if (ret != 0) {
871 			ttm_pool_unpopulate(ttm);
872 			return -ENOMEM;
873 		}
874 
875 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
876 						false, false);
877 		if (unlikely(ret != 0)) {
878 			ttm_pool_unpopulate(ttm);
879 			return -ENOMEM;
880 		}
881 	}
882 
883 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
884 		ret = ttm_tt_swapin(ttm);
885 		if (unlikely(ret != 0)) {
886 			ttm_pool_unpopulate(ttm);
887 			return ret;
888 		}
889 	}
890 
891 	ttm->state = tt_unbound;
892 	return 0;
893 }
894 EXPORT_SYMBOL(ttm_pool_populate);
895 
ttm_pool_unpopulate(struct ttm_tt * ttm)896 void ttm_pool_unpopulate(struct ttm_tt *ttm)
897 {
898 	unsigned i;
899 
900 	for (i = 0; i < ttm->num_pages; ++i) {
901 		if (ttm->pages[i]) {
902 			ttm_mem_global_free_page(ttm->glob->mem_glob,
903 						 ttm->pages[i]);
904 			ttm_put_pages(&ttm->pages[i], 1,
905 				      ttm->page_flags,
906 				      ttm->caching_state);
907 		}
908 	}
909 	ttm->state = tt_unpopulated;
910 }
911 EXPORT_SYMBOL(ttm_pool_unpopulate);
912 
ttm_page_alloc_debugfs(struct seq_file * m,void * data)913 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
914 {
915 	struct ttm_page_pool *p;
916 	unsigned i;
917 	char *h[] = {"pool", "refills", "pages freed", "size"};
918 	if (!_manager) {
919 		seq_printf(m, "No pool allocator running.\n");
920 		return 0;
921 	}
922 	seq_printf(m, "%6s %12s %13s %8s\n",
923 			h[0], h[1], h[2], h[3]);
924 	for (i = 0; i < NUM_POOLS; ++i) {
925 		p = &_manager->pools[i];
926 
927 		seq_printf(m, "%6s %12ld %13ld %8d\n",
928 				p->name, p->nrefills,
929 				p->nfrees, p->npages);
930 	}
931 	return 0;
932 }
933 EXPORT_SYMBOL(ttm_page_alloc_debugfs);
934