1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ION Memory Allocator generic heap helpers
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7 
8 #include <linux/err.h>
9 #include <linux/freezer.h>
10 #include <linux/kthread.h>
11 #include <linux/mm.h>
12 #include <linux/rtmutex.h>
13 #include <linux/sched.h>
14 #include <uapi/linux/sched/types.h>
15 #include <linux/scatterlist.h>
16 #include <linux/vmalloc.h>
17 
18 #include "ion.h"
19 
ion_heap_map_kernel(struct ion_heap * heap,struct ion_buffer * buffer)20 void *ion_heap_map_kernel(struct ion_heap *heap,
21 			  struct ion_buffer *buffer)
22 {
23 	struct sg_page_iter piter;
24 	void *vaddr;
25 	pgprot_t pgprot;
26 	struct sg_table *table = buffer->sg_table;
27 	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
28 	struct page **pages = vmalloc(array_size(npages,
29 						 sizeof(struct page *)));
30 	struct page **tmp = pages;
31 
32 	if (!pages)
33 		return ERR_PTR(-ENOMEM);
34 
35 	if (buffer->flags & ION_FLAG_CACHED)
36 		pgprot = PAGE_KERNEL;
37 	else
38 		pgprot = pgprot_writecombine(PAGE_KERNEL);
39 
40 	for_each_sgtable_page(table, &piter, 0) {
41 		BUG_ON(tmp - pages >= npages);
42 		*tmp++ = sg_page_iter_page(&piter);
43 	}
44 
45 	vaddr = vmap(pages, npages, VM_MAP, pgprot);
46 	vfree(pages);
47 
48 	if (!vaddr)
49 		return ERR_PTR(-ENOMEM);
50 
51 	return vaddr;
52 }
53 
ion_heap_unmap_kernel(struct ion_heap * heap,struct ion_buffer * buffer)54 void ion_heap_unmap_kernel(struct ion_heap *heap,
55 			   struct ion_buffer *buffer)
56 {
57 	vunmap(buffer->vaddr);
58 }
59 
ion_heap_map_user(struct ion_heap * heap,struct ion_buffer * buffer,struct vm_area_struct * vma)60 int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
61 		      struct vm_area_struct *vma)
62 {
63 	struct sg_page_iter piter;
64 	struct sg_table *table = buffer->sg_table;
65 	unsigned long addr = vma->vm_start;
66 	int ret;
67 
68 	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
69 		struct page *page = sg_page_iter_page(&piter);
70 
71 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
72 				      vma->vm_page_prot);
73 		if (ret)
74 			return ret;
75 		addr += PAGE_SIZE;
76 		if (addr >= vma->vm_end)
77 			return 0;
78 	}
79 
80 	return 0;
81 }
82 
ion_heap_clear_pages(struct page ** pages,int num,pgprot_t pgprot)83 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
84 {
85 	void *addr = vmap(pages, num, VM_MAP, pgprot);
86 
87 	if (!addr)
88 		return -ENOMEM;
89 	memset(addr, 0, PAGE_SIZE * num);
90 	vunmap(addr);
91 
92 	return 0;
93 }
94 
ion_heap_sglist_zero(struct sg_table * sgt,pgprot_t pgprot)95 static int ion_heap_sglist_zero(struct sg_table *sgt, pgprot_t pgprot)
96 {
97 	int p = 0;
98 	int ret = 0;
99 	struct sg_page_iter piter;
100 	struct page *pages[32];
101 
102 	for_each_sgtable_page(sgt, &piter, 0) {
103 		pages[p++] = sg_page_iter_page(&piter);
104 		if (p == ARRAY_SIZE(pages)) {
105 			ret = ion_heap_clear_pages(pages, p, pgprot);
106 			if (ret)
107 				return ret;
108 			p = 0;
109 		}
110 	}
111 	if (p)
112 		ret = ion_heap_clear_pages(pages, p, pgprot);
113 
114 	return ret;
115 }
116 
ion_heap_buffer_zero(struct ion_buffer * buffer)117 int ion_heap_buffer_zero(struct ion_buffer *buffer)
118 {
119 	struct sg_table *table = buffer->sg_table;
120 	pgprot_t pgprot;
121 
122 	if (buffer->flags & ION_FLAG_CACHED)
123 		pgprot = PAGE_KERNEL;
124 	else
125 		pgprot = pgprot_writecombine(PAGE_KERNEL);
126 
127 	return ion_heap_sglist_zero(table, pgprot);
128 }
129 
ion_heap_freelist_add(struct ion_heap * heap,struct ion_buffer * buffer)130 void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
131 {
132 	spin_lock(&heap->free_lock);
133 	list_add(&buffer->list, &heap->free_list);
134 	heap->free_list_size += buffer->size;
135 	spin_unlock(&heap->free_lock);
136 	wake_up(&heap->waitqueue);
137 }
138 
ion_heap_freelist_size(struct ion_heap * heap)139 size_t ion_heap_freelist_size(struct ion_heap *heap)
140 {
141 	size_t size;
142 
143 	spin_lock(&heap->free_lock);
144 	size = heap->free_list_size;
145 	spin_unlock(&heap->free_lock);
146 
147 	return size;
148 }
149 
_ion_heap_freelist_drain(struct ion_heap * heap,size_t size,bool skip_pools)150 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
151 				       bool skip_pools)
152 {
153 	struct ion_buffer *buffer;
154 	size_t total_drained = 0;
155 
156 	if (ion_heap_freelist_size(heap) == 0)
157 		return 0;
158 
159 	spin_lock(&heap->free_lock);
160 	if (size == 0)
161 		size = heap->free_list_size;
162 
163 	while (!list_empty(&heap->free_list)) {
164 		if (total_drained >= size)
165 			break;
166 		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
167 					  list);
168 		list_del(&buffer->list);
169 		heap->free_list_size -= buffer->size;
170 		if (skip_pools)
171 			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
172 		total_drained += buffer->size;
173 		spin_unlock(&heap->free_lock);
174 		ion_buffer_destroy(buffer);
175 		spin_lock(&heap->free_lock);
176 	}
177 	spin_unlock(&heap->free_lock);
178 
179 	return total_drained;
180 }
181 
ion_heap_freelist_drain(struct ion_heap * heap,size_t size)182 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
183 {
184 	return _ion_heap_freelist_drain(heap, size, false);
185 }
186 
ion_heap_freelist_shrink(struct ion_heap * heap,size_t size)187 size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
188 {
189 	return _ion_heap_freelist_drain(heap, size, true);
190 }
191 
ion_heap_deferred_free(void * data)192 static int ion_heap_deferred_free(void *data)
193 {
194 	struct ion_heap *heap = data;
195 
196 	while (true) {
197 		struct ion_buffer *buffer;
198 
199 		wait_event_freezable(heap->waitqueue,
200 				     ion_heap_freelist_size(heap) > 0);
201 
202 		spin_lock(&heap->free_lock);
203 		if (list_empty(&heap->free_list)) {
204 			spin_unlock(&heap->free_lock);
205 			continue;
206 		}
207 		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
208 					  list);
209 		list_del(&buffer->list);
210 		heap->free_list_size -= buffer->size;
211 		spin_unlock(&heap->free_lock);
212 		ion_buffer_destroy(buffer);
213 	}
214 
215 	return 0;
216 }
217 
ion_heap_init_deferred_free(struct ion_heap * heap)218 int ion_heap_init_deferred_free(struct ion_heap *heap)
219 {
220 	INIT_LIST_HEAD(&heap->free_list);
221 	init_waitqueue_head(&heap->waitqueue);
222 	heap->task = kthread_run(ion_heap_deferred_free, heap,
223 				 "%s", heap->name);
224 	if (IS_ERR(heap->task)) {
225 		pr_err("%s: creating thread for deferred free failed\n",
226 		       __func__);
227 		return PTR_ERR_OR_ZERO(heap->task);
228 	}
229 	sched_set_normal(heap->task, 19);
230 
231 	return 0;
232 }
233 
ion_heap_shrink_count(struct shrinker * shrinker,struct shrink_control * sc)234 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
235 					   struct shrink_control *sc)
236 {
237 	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
238 					     shrinker);
239 	int total = 0;
240 
241 	total = ion_heap_freelist_size(heap) / PAGE_SIZE;
242 
243 	if (heap->ops->shrink)
244 		total += heap->ops->shrink(heap, sc->gfp_mask, 0);
245 
246 	return total;
247 }
248 
ion_heap_shrink_scan(struct shrinker * shrinker,struct shrink_control * sc)249 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
250 					  struct shrink_control *sc)
251 {
252 	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
253 					     shrinker);
254 	int freed = 0;
255 	int to_scan = sc->nr_to_scan;
256 
257 	if (to_scan == 0)
258 		return 0;
259 
260 	/*
261 	 * shrink the free list first, no point in zeroing the memory if we're
262 	 * just going to reclaim it. Also, skip any possible page pooling.
263 	 */
264 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
265 		freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
266 				PAGE_SIZE;
267 
268 	to_scan -= freed;
269 	if (to_scan <= 0)
270 		return freed;
271 
272 	if (heap->ops->shrink)
273 		freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
274 
275 	return freed;
276 }
277 
ion_heap_init_shrinker(struct ion_heap * heap)278 int ion_heap_init_shrinker(struct ion_heap *heap)
279 {
280 	heap->shrinker.count_objects = ion_heap_shrink_count;
281 	heap->shrinker.scan_objects = ion_heap_shrink_scan;
282 	heap->shrinker.seeks = DEFAULT_SEEKS;
283 	heap->shrinker.batch = 0;
284 
285 	return register_shrinker(&heap->shrinker);
286 }
287