1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 *
5 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 *
7 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 */
9 /*
10 * This file contains functions for buffer object structure management
11 */
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/gfp.h> /* for GFP_ATOMIC */
15 #include <linux/mm.h>
16 #include <linux/mm_types.h>
17 #include <linux/hugetlb.h>
18 #include <linux/highmem.h>
19 #include <linux/slab.h> /* for kmalloc */
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/string.h>
23 #include <linux/list.h>
24 #include <linux/errno.h>
25 #include <linux/io.h>
26 #include <asm/current.h>
27 #include <linux/sched/signal.h>
28 #include <linux/file.h>
29
30 #include <asm/set_memory.h>
31
32 #include "atomisp_internal.h"
33 #include "hmm/hmm_common.h"
34 #include "hmm/hmm_bo.h"
35
__bo_init(struct hmm_bo_device * bdev,struct hmm_buffer_object * bo,unsigned int pgnr)36 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
37 unsigned int pgnr)
38 {
39 check_bodev_null_return(bdev, -EINVAL);
40 /* prevent zero size buffer object */
41 if (pgnr == 0) {
42 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
43 return -EINVAL;
44 }
45
46 memset(bo, 0, sizeof(*bo));
47 mutex_init(&bo->mutex);
48
49 /* init the bo->list HEAD as an element of entire_bo_list */
50 INIT_LIST_HEAD(&bo->list);
51
52 bo->bdev = bdev;
53 bo->vmap_addr = NULL;
54 bo->status = HMM_BO_FREE;
55 bo->start = bdev->start;
56 bo->pgnr = pgnr;
57 bo->end = bo->start + pgnr_to_size(pgnr);
58 bo->prev = NULL;
59 bo->next = NULL;
60
61 return 0;
62 }
63
__bo_search_and_remove_from_free_rbtree(struct rb_node * node,unsigned int pgnr)64 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
65 struct rb_node *node, unsigned int pgnr)
66 {
67 struct hmm_buffer_object *this, *ret_bo, *temp_bo;
68
69 this = rb_entry(node, struct hmm_buffer_object, node);
70 if (this->pgnr == pgnr ||
71 (this->pgnr > pgnr && !this->node.rb_left)) {
72 goto remove_bo_and_return;
73 } else {
74 if (this->pgnr < pgnr) {
75 if (!this->node.rb_right)
76 return NULL;
77 ret_bo = __bo_search_and_remove_from_free_rbtree(
78 this->node.rb_right, pgnr);
79 } else {
80 ret_bo = __bo_search_and_remove_from_free_rbtree(
81 this->node.rb_left, pgnr);
82 }
83 if (!ret_bo) {
84 if (this->pgnr > pgnr)
85 goto remove_bo_and_return;
86 else
87 return NULL;
88 }
89 return ret_bo;
90 }
91
92 remove_bo_and_return:
93 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
94 * 1. check if 'this->next' is NULL:
95 * yes: erase 'this' node and rebalance rbtree, return 'this'.
96 */
97 if (!this->next) {
98 rb_erase(&this->node, &this->bdev->free_rbtree);
99 return this;
100 }
101 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
102 * 2. check if 'this->next->next' is NULL:
103 * yes: change the related 'next/prev' pointer,
104 * return 'this->next' but the rbtree stays unchanged.
105 */
106 temp_bo = this->next;
107 this->next = temp_bo->next;
108 if (temp_bo->next)
109 temp_bo->next->prev = this;
110 temp_bo->next = NULL;
111 temp_bo->prev = NULL;
112 return temp_bo;
113 }
114
__bo_search_by_addr(struct rb_root * root,ia_css_ptr start)115 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
116 ia_css_ptr start)
117 {
118 struct rb_node *n = root->rb_node;
119 struct hmm_buffer_object *bo;
120
121 do {
122 bo = rb_entry(n, struct hmm_buffer_object, node);
123
124 if (bo->start > start) {
125 if (!n->rb_left)
126 return NULL;
127 n = n->rb_left;
128 } else if (bo->start < start) {
129 if (!n->rb_right)
130 return NULL;
131 n = n->rb_right;
132 } else {
133 return bo;
134 }
135 } while (n);
136
137 return NULL;
138 }
139
__bo_search_by_addr_in_range(struct rb_root * root,unsigned int start)140 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
141 struct rb_root *root, unsigned int start)
142 {
143 struct rb_node *n = root->rb_node;
144 struct hmm_buffer_object *bo;
145
146 do {
147 bo = rb_entry(n, struct hmm_buffer_object, node);
148
149 if (bo->start > start) {
150 if (!n->rb_left)
151 return NULL;
152 n = n->rb_left;
153 } else {
154 if (bo->end > start)
155 return bo;
156 if (!n->rb_right)
157 return NULL;
158 n = n->rb_right;
159 }
160 } while (n);
161
162 return NULL;
163 }
164
__bo_insert_to_free_rbtree(struct rb_root * root,struct hmm_buffer_object * bo)165 static void __bo_insert_to_free_rbtree(struct rb_root *root,
166 struct hmm_buffer_object *bo)
167 {
168 struct rb_node **new = &root->rb_node;
169 struct rb_node *parent = NULL;
170 struct hmm_buffer_object *this;
171 unsigned int pgnr = bo->pgnr;
172
173 while (*new) {
174 parent = *new;
175 this = container_of(*new, struct hmm_buffer_object, node);
176
177 if (pgnr < this->pgnr) {
178 new = &((*new)->rb_left);
179 } else if (pgnr > this->pgnr) {
180 new = &((*new)->rb_right);
181 } else {
182 bo->prev = this;
183 bo->next = this->next;
184 if (this->next)
185 this->next->prev = bo;
186 this->next = bo;
187 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
188 return;
189 }
190 }
191
192 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
193
194 rb_link_node(&bo->node, parent, new);
195 rb_insert_color(&bo->node, root);
196 }
197
__bo_insert_to_alloc_rbtree(struct rb_root * root,struct hmm_buffer_object * bo)198 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
199 struct hmm_buffer_object *bo)
200 {
201 struct rb_node **new = &root->rb_node;
202 struct rb_node *parent = NULL;
203 struct hmm_buffer_object *this;
204 unsigned int start = bo->start;
205
206 while (*new) {
207 parent = *new;
208 this = container_of(*new, struct hmm_buffer_object, node);
209
210 if (start < this->start)
211 new = &((*new)->rb_left);
212 else
213 new = &((*new)->rb_right);
214 }
215
216 kref_init(&bo->kref);
217 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
218
219 rb_link_node(&bo->node, parent, new);
220 rb_insert_color(&bo->node, root);
221 }
222
__bo_break_up(struct hmm_bo_device * bdev,struct hmm_buffer_object * bo,unsigned int pgnr)223 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
224 struct hmm_buffer_object *bo,
225 unsigned int pgnr)
226 {
227 struct hmm_buffer_object *new_bo;
228 unsigned long flags;
229 int ret;
230
231 new_bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
232 if (!new_bo) {
233 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
234 return NULL;
235 }
236 ret = __bo_init(bdev, new_bo, pgnr);
237 if (ret) {
238 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
239 kmem_cache_free(bdev->bo_cache, new_bo);
240 return NULL;
241 }
242
243 new_bo->start = bo->start;
244 new_bo->end = new_bo->start + pgnr_to_size(pgnr);
245 bo->start = new_bo->end;
246 bo->pgnr = bo->pgnr - pgnr;
247
248 spin_lock_irqsave(&bdev->list_lock, flags);
249 list_add_tail(&new_bo->list, &bo->list);
250 spin_unlock_irqrestore(&bdev->list_lock, flags);
251
252 return new_bo;
253 }
254
__bo_take_off_handling(struct hmm_buffer_object * bo)255 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
256 {
257 struct hmm_bo_device *bdev = bo->bdev;
258 /* There are 4 situations when we take off a known bo from free rbtree:
259 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
260 * and does not have a linked list after bo, to take off this bo,
261 * we just need erase bo directly and rebalance the free rbtree
262 */
263 if (!bo->prev && !bo->next) {
264 rb_erase(&bo->node, &bdev->free_rbtree);
265 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
266 * and has a linked list,to take off this bo we need erase bo
267 * first, then, insert bo->next into free rbtree and rebalance
268 * the free rbtree
269 */
270 } else if (!bo->prev && bo->next) {
271 bo->next->prev = NULL;
272 rb_erase(&bo->node, &bdev->free_rbtree);
273 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
274 bo->next = NULL;
275 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
276 * node, bo is the last element of the linked list after rbtree
277 * node, to take off this bo, we just need set the "prev/next"
278 * pointers to NULL, the free rbtree stays unchanged
279 */
280 } else if (bo->prev && !bo->next) {
281 bo->prev->next = NULL;
282 bo->prev = NULL;
283 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
284 * node, bo is in the middle of the linked list after rbtree node,
285 * to take off this bo, we just set take the "prev/next" pointers
286 * to NULL, the free rbtree stays unchanged
287 */
288 } else if (bo->prev && bo->next) {
289 bo->next->prev = bo->prev;
290 bo->prev->next = bo->next;
291 bo->next = NULL;
292 bo->prev = NULL;
293 }
294 }
295
__bo_merge(struct hmm_buffer_object * bo,struct hmm_buffer_object * next_bo)296 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
297 struct hmm_buffer_object *next_bo)
298 {
299 struct hmm_bo_device *bdev;
300 unsigned long flags;
301
302 bdev = bo->bdev;
303 next_bo->start = bo->start;
304 next_bo->pgnr = next_bo->pgnr + bo->pgnr;
305
306 spin_lock_irqsave(&bdev->list_lock, flags);
307 list_del(&bo->list);
308 spin_unlock_irqrestore(&bdev->list_lock, flags);
309
310 kmem_cache_free(bo->bdev->bo_cache, bo);
311
312 return next_bo;
313 }
314
315 /*
316 * hmm_bo_device functions.
317 */
hmm_bo_device_init(struct hmm_bo_device * bdev,struct isp_mmu_client * mmu_driver,unsigned int vaddr_start,unsigned int size)318 int hmm_bo_device_init(struct hmm_bo_device *bdev,
319 struct isp_mmu_client *mmu_driver,
320 unsigned int vaddr_start,
321 unsigned int size)
322 {
323 struct hmm_buffer_object *bo;
324 unsigned long flags;
325 int ret;
326
327 check_bodev_null_return(bdev, -EINVAL);
328
329 ret = isp_mmu_init(&bdev->mmu, mmu_driver);
330 if (ret) {
331 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
332 return ret;
333 }
334
335 bdev->start = vaddr_start;
336 bdev->pgnr = size_to_pgnr_ceil(size);
337 bdev->size = pgnr_to_size(bdev->pgnr);
338
339 spin_lock_init(&bdev->list_lock);
340 mutex_init(&bdev->rbtree_mutex);
341
342
343 INIT_LIST_HEAD(&bdev->entire_bo_list);
344 bdev->allocated_rbtree = RB_ROOT;
345 bdev->free_rbtree = RB_ROOT;
346
347 bdev->bo_cache = kmem_cache_create("bo_cache",
348 sizeof(struct hmm_buffer_object), 0, 0, NULL);
349 if (!bdev->bo_cache) {
350 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
351 isp_mmu_exit(&bdev->mmu);
352 return -ENOMEM;
353 }
354
355 bo = kmem_cache_alloc(bdev->bo_cache, GFP_KERNEL);
356 if (!bo) {
357 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
358 isp_mmu_exit(&bdev->mmu);
359 return -ENOMEM;
360 }
361
362 ret = __bo_init(bdev, bo, bdev->pgnr);
363 if (ret) {
364 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
365 kmem_cache_free(bdev->bo_cache, bo);
366 isp_mmu_exit(&bdev->mmu);
367 return -EINVAL;
368 }
369
370 spin_lock_irqsave(&bdev->list_lock, flags);
371 list_add_tail(&bo->list, &bdev->entire_bo_list);
372 spin_unlock_irqrestore(&bdev->list_lock, flags);
373
374 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
375
376 bdev->flag = HMM_BO_DEVICE_INITED;
377
378 return 0;
379 }
380
hmm_bo_alloc(struct hmm_bo_device * bdev,unsigned int pgnr)381 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
382 unsigned int pgnr)
383 {
384 struct hmm_buffer_object *bo, *new_bo;
385 struct rb_root *root = &bdev->free_rbtree;
386
387 check_bodev_null_return(bdev, NULL);
388 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
389 "hmm_bo_device not inited yet.\n");
390
391 if (pgnr == 0) {
392 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
393 return NULL;
394 }
395
396 mutex_lock(&bdev->rbtree_mutex);
397 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
398 if (!bo) {
399 mutex_unlock(&bdev->rbtree_mutex);
400 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
401 __func__);
402 return NULL;
403 }
404
405 if (bo->pgnr > pgnr) {
406 new_bo = __bo_break_up(bdev, bo, pgnr);
407 if (!new_bo) {
408 mutex_unlock(&bdev->rbtree_mutex);
409 dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
410 __func__);
411 return NULL;
412 }
413
414 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
415 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
416
417 mutex_unlock(&bdev->rbtree_mutex);
418 return new_bo;
419 }
420
421 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
422
423 mutex_unlock(&bdev->rbtree_mutex);
424 return bo;
425 }
426
hmm_bo_release(struct hmm_buffer_object * bo)427 void hmm_bo_release(struct hmm_buffer_object *bo)
428 {
429 struct hmm_bo_device *bdev = bo->bdev;
430 struct hmm_buffer_object *next_bo, *prev_bo;
431
432 mutex_lock(&bdev->rbtree_mutex);
433
434 /*
435 * FIX ME:
436 *
437 * how to destroy the bo when it is stilled MMAPED?
438 *
439 * ideally, this will not happened as hmm_bo_release
440 * will only be called when kref reaches 0, and in mmap
441 * operation the hmm_bo_ref will eventually be called.
442 * so, if this happened, something goes wrong.
443 */
444 if (bo->status & HMM_BO_MMAPED) {
445 mutex_unlock(&bdev->rbtree_mutex);
446 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
447 return;
448 }
449
450 if (bo->status & HMM_BO_BINDED) {
451 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
452 hmm_bo_unbind(bo);
453 }
454
455 if (bo->status & HMM_BO_PAGE_ALLOCED) {
456 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
457 hmm_bo_free_pages(bo);
458 }
459 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
460 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
461 hmm_bo_vunmap(bo);
462 }
463
464 rb_erase(&bo->node, &bdev->allocated_rbtree);
465
466 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
467 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
468
469 if (bo->list.prev != &bdev->entire_bo_list &&
470 prev_bo->end == bo->start &&
471 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
472 __bo_take_off_handling(prev_bo);
473 bo = __bo_merge(prev_bo, bo);
474 }
475
476 if (bo->list.next != &bdev->entire_bo_list &&
477 next_bo->start == bo->end &&
478 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
479 __bo_take_off_handling(next_bo);
480 bo = __bo_merge(bo, next_bo);
481 }
482
483 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
484
485 mutex_unlock(&bdev->rbtree_mutex);
486 return;
487 }
488
hmm_bo_device_exit(struct hmm_bo_device * bdev)489 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
490 {
491 struct hmm_buffer_object *bo;
492 unsigned long flags;
493
494 dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
495
496 check_bodev_null_return_void(bdev);
497
498 /*
499 * release all allocated bos even they a in use
500 * and all bos will be merged into a big bo
501 */
502 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
503 hmm_bo_release(
504 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
505
506 dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
507 __func__);
508
509 /* free all bos to release all ISP virtual memory */
510 while (!list_empty(&bdev->entire_bo_list)) {
511 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
512
513 spin_lock_irqsave(&bdev->list_lock, flags);
514 list_del(&bo->list);
515 spin_unlock_irqrestore(&bdev->list_lock, flags);
516
517 kmem_cache_free(bdev->bo_cache, bo);
518 }
519
520 dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
521
522 kmem_cache_destroy(bdev->bo_cache);
523
524 isp_mmu_exit(&bdev->mmu);
525 }
526
hmm_bo_device_inited(struct hmm_bo_device * bdev)527 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
528 {
529 check_bodev_null_return(bdev, -EINVAL);
530
531 return bdev->flag == HMM_BO_DEVICE_INITED;
532 }
533
hmm_bo_allocated(struct hmm_buffer_object * bo)534 int hmm_bo_allocated(struct hmm_buffer_object *bo)
535 {
536 check_bo_null_return(bo, 0);
537
538 return bo->status & HMM_BO_ALLOCED;
539 }
540
hmm_bo_device_search_start(struct hmm_bo_device * bdev,ia_css_ptr vaddr)541 struct hmm_buffer_object *hmm_bo_device_search_start(
542 struct hmm_bo_device *bdev, ia_css_ptr vaddr)
543 {
544 struct hmm_buffer_object *bo;
545
546 check_bodev_null_return(bdev, NULL);
547
548 mutex_lock(&bdev->rbtree_mutex);
549 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
550 if (!bo) {
551 mutex_unlock(&bdev->rbtree_mutex);
552 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
553 __func__, vaddr);
554 return NULL;
555 }
556 mutex_unlock(&bdev->rbtree_mutex);
557
558 return bo;
559 }
560
hmm_bo_device_search_in_range(struct hmm_bo_device * bdev,unsigned int vaddr)561 struct hmm_buffer_object *hmm_bo_device_search_in_range(
562 struct hmm_bo_device *bdev, unsigned int vaddr)
563 {
564 struct hmm_buffer_object *bo;
565
566 check_bodev_null_return(bdev, NULL);
567
568 mutex_lock(&bdev->rbtree_mutex);
569 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
570 if (!bo) {
571 mutex_unlock(&bdev->rbtree_mutex);
572 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
573 __func__, vaddr);
574 return NULL;
575 }
576 mutex_unlock(&bdev->rbtree_mutex);
577
578 return bo;
579 }
580
hmm_bo_device_search_vmap_start(struct hmm_bo_device * bdev,const void * vaddr)581 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
582 struct hmm_bo_device *bdev, const void *vaddr)
583 {
584 struct list_head *pos;
585 struct hmm_buffer_object *bo;
586 unsigned long flags;
587
588 check_bodev_null_return(bdev, NULL);
589
590 spin_lock_irqsave(&bdev->list_lock, flags);
591 list_for_each(pos, &bdev->entire_bo_list) {
592 bo = list_to_hmm_bo(pos);
593 /* pass bo which has no vm_node allocated */
594 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
595 continue;
596 if (bo->vmap_addr == vaddr)
597 goto found;
598 }
599 spin_unlock_irqrestore(&bdev->list_lock, flags);
600 return NULL;
601 found:
602 spin_unlock_irqrestore(&bdev->list_lock, flags);
603 return bo;
604 }
605
free_pages_bulk_array(unsigned long nr_pages,struct page ** page_array)606 static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array)
607 {
608 unsigned long i;
609
610 for (i = 0; i < nr_pages; i++)
611 __free_pages(page_array[i], 0);
612 }
613
free_private_bo_pages(struct hmm_buffer_object * bo)614 static void free_private_bo_pages(struct hmm_buffer_object *bo)
615 {
616 set_pages_array_wb(bo->pages, bo->pgnr);
617 free_pages_bulk_array(bo->pgnr, bo->pages);
618 }
619
620 /*Allocate pages which will be used only by ISP*/
alloc_private_pages(struct hmm_buffer_object * bo)621 static int alloc_private_pages(struct hmm_buffer_object *bo)
622 {
623 const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS;
624 int ret;
625
626 ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages);
627 if (ret != bo->pgnr) {
628 free_pages_bulk_array(ret, bo->pages);
629 dev_err(atomisp_dev, "alloc_pages_bulk() failed\n");
630 return -ENOMEM;
631 }
632
633 ret = set_pages_array_uc(bo->pages, bo->pgnr);
634 if (ret) {
635 dev_err(atomisp_dev, "set pages uncacheable failed.\n");
636 free_pages_bulk_array(bo->pgnr, bo->pages);
637 return ret;
638 }
639
640 return 0;
641 }
642
alloc_vmalloc_pages(struct hmm_buffer_object * bo,void * vmalloc_addr)643 static int alloc_vmalloc_pages(struct hmm_buffer_object *bo, void *vmalloc_addr)
644 {
645 void *vaddr = vmalloc_addr;
646 int i;
647
648 for (i = 0; i < bo->pgnr; i++) {
649 bo->pages[i] = vmalloc_to_page(vaddr);
650 if (!bo->pages[i]) {
651 dev_err(atomisp_dev, "Error could not get page %d of vmalloc buf\n", i);
652 return -ENOMEM;
653 }
654 vaddr += PAGE_SIZE;
655 }
656
657 return 0;
658 }
659
660 /*
661 * allocate/free physical pages for the bo.
662 *
663 * type indicate where are the pages from. currently we have 3 types
664 * of memory: HMM_BO_PRIVATE, HMM_BO_VMALLOC.
665 *
666 * vmalloc_addr is only valid when type is HMM_BO_VMALLOC.
667 */
hmm_bo_alloc_pages(struct hmm_buffer_object * bo,enum hmm_bo_type type,void * vmalloc_addr)668 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
669 enum hmm_bo_type type,
670 void *vmalloc_addr)
671 {
672 int ret = -EINVAL;
673
674 check_bo_null_return(bo, -EINVAL);
675
676 mutex_lock(&bo->mutex);
677 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
678
679 bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
680 if (unlikely(!bo->pages)) {
681 ret = -ENOMEM;
682 goto alloc_err;
683 }
684
685 if (type == HMM_BO_PRIVATE) {
686 ret = alloc_private_pages(bo);
687 } else if (type == HMM_BO_VMALLOC) {
688 ret = alloc_vmalloc_pages(bo, vmalloc_addr);
689 } else {
690 dev_err(atomisp_dev, "invalid buffer type.\n");
691 ret = -EINVAL;
692 }
693 if (ret)
694 goto alloc_err;
695
696 bo->type = type;
697
698 bo->status |= HMM_BO_PAGE_ALLOCED;
699
700 mutex_unlock(&bo->mutex);
701
702 return 0;
703
704 alloc_err:
705 kfree(bo->pages);
706 mutex_unlock(&bo->mutex);
707 dev_err(atomisp_dev, "alloc pages err...\n");
708 return ret;
709 status_err:
710 mutex_unlock(&bo->mutex);
711 dev_err(atomisp_dev,
712 "buffer object has already page allocated.\n");
713 return -EINVAL;
714 }
715
716 /*
717 * free physical pages of the bo.
718 */
hmm_bo_free_pages(struct hmm_buffer_object * bo)719 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
720 {
721 check_bo_null_return_void(bo);
722
723 mutex_lock(&bo->mutex);
724
725 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
726
727 /* clear the flag anyway. */
728 bo->status &= (~HMM_BO_PAGE_ALLOCED);
729
730 if (bo->type == HMM_BO_PRIVATE)
731 free_private_bo_pages(bo);
732 else if (bo->type == HMM_BO_VMALLOC)
733 ; /* No-op, nothing to do */
734 else
735 dev_err(atomisp_dev, "invalid buffer type.\n");
736
737 kfree(bo->pages);
738 mutex_unlock(&bo->mutex);
739
740 return;
741
742 status_err2:
743 mutex_unlock(&bo->mutex);
744 dev_err(atomisp_dev,
745 "buffer object not page allocated yet.\n");
746 }
747
hmm_bo_page_allocated(struct hmm_buffer_object * bo)748 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
749 {
750 check_bo_null_return(bo, 0);
751
752 return bo->status & HMM_BO_PAGE_ALLOCED;
753 }
754
755 /*
756 * bind the physical pages to a virtual address space.
757 */
hmm_bo_bind(struct hmm_buffer_object * bo)758 int hmm_bo_bind(struct hmm_buffer_object *bo)
759 {
760 int ret;
761 unsigned int virt;
762 struct hmm_bo_device *bdev;
763 unsigned int i;
764
765 check_bo_null_return(bo, -EINVAL);
766
767 mutex_lock(&bo->mutex);
768
769 check_bo_status_yes_goto(bo,
770 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
771 status_err1);
772
773 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
774
775 bdev = bo->bdev;
776
777 virt = bo->start;
778
779 for (i = 0; i < bo->pgnr; i++) {
780 ret =
781 isp_mmu_map(&bdev->mmu, virt,
782 page_to_phys(bo->pages[i]), 1);
783 if (ret)
784 goto map_err;
785 virt += (1 << PAGE_SHIFT);
786 }
787
788 /*
789 * flush TBL here.
790 *
791 * theoretically, we donot need to flush TLB as we didnot change
792 * any existed address mappings, but for Silicon Hive's MMU, its
793 * really a bug here. I guess when fetching PTEs (page table entity)
794 * to TLB, its MMU will fetch additional INVALID PTEs automatically
795 * for performance issue. EX, we only set up 1 page address mapping,
796 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
797 * so the additional 3 PTEs are invalid.
798 */
799 if (bo->start != 0x0)
800 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
801 (bo->pgnr << PAGE_SHIFT));
802
803 bo->status |= HMM_BO_BINDED;
804
805 mutex_unlock(&bo->mutex);
806
807 return 0;
808
809 map_err:
810 /* unbind the physical pages with related virtual address space */
811 virt = bo->start;
812 for ( ; i > 0; i--) {
813 isp_mmu_unmap(&bdev->mmu, virt, 1);
814 virt += pgnr_to_size(1);
815 }
816
817 mutex_unlock(&bo->mutex);
818 dev_err(atomisp_dev,
819 "setup MMU address mapping failed.\n");
820 return ret;
821
822 status_err2:
823 mutex_unlock(&bo->mutex);
824 dev_err(atomisp_dev, "buffer object already binded.\n");
825 return -EINVAL;
826 status_err1:
827 mutex_unlock(&bo->mutex);
828 dev_err(atomisp_dev,
829 "buffer object vm_node or page not allocated.\n");
830 return -EINVAL;
831 }
832
833 /*
834 * unbind the physical pages with related virtual address space.
835 */
hmm_bo_unbind(struct hmm_buffer_object * bo)836 void hmm_bo_unbind(struct hmm_buffer_object *bo)
837 {
838 unsigned int virt;
839 struct hmm_bo_device *bdev;
840 unsigned int i;
841
842 check_bo_null_return_void(bo);
843
844 mutex_lock(&bo->mutex);
845
846 check_bo_status_yes_goto(bo,
847 HMM_BO_PAGE_ALLOCED |
848 HMM_BO_ALLOCED |
849 HMM_BO_BINDED, status_err);
850
851 bdev = bo->bdev;
852
853 virt = bo->start;
854
855 for (i = 0; i < bo->pgnr; i++) {
856 isp_mmu_unmap(&bdev->mmu, virt, 1);
857 virt += pgnr_to_size(1);
858 }
859
860 /*
861 * flush TLB as the address mapping has been removed and
862 * related TLBs should be invalidated.
863 */
864 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
865 (bo->pgnr << PAGE_SHIFT));
866
867 bo->status &= (~HMM_BO_BINDED);
868
869 mutex_unlock(&bo->mutex);
870
871 return;
872
873 status_err:
874 mutex_unlock(&bo->mutex);
875 dev_err(atomisp_dev,
876 "buffer vm or page not allocated or not binded yet.\n");
877 }
878
hmm_bo_binded(struct hmm_buffer_object * bo)879 int hmm_bo_binded(struct hmm_buffer_object *bo)
880 {
881 int ret;
882
883 check_bo_null_return(bo, 0);
884
885 mutex_lock(&bo->mutex);
886
887 ret = bo->status & HMM_BO_BINDED;
888
889 mutex_unlock(&bo->mutex);
890
891 return ret;
892 }
893
hmm_bo_vmap(struct hmm_buffer_object * bo,bool cached)894 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
895 {
896 check_bo_null_return(bo, NULL);
897
898 mutex_lock(&bo->mutex);
899 if (((bo->status & HMM_BO_VMAPED) && !cached) ||
900 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
901 mutex_unlock(&bo->mutex);
902 return bo->vmap_addr;
903 }
904
905 /* cached status need to be changed, so vunmap first */
906 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
907 vunmap(bo->vmap_addr);
908 bo->vmap_addr = NULL;
909 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
910 }
911
912 bo->vmap_addr = vmap(bo->pages, bo->pgnr, VM_MAP,
913 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
914 if (unlikely(!bo->vmap_addr)) {
915 mutex_unlock(&bo->mutex);
916 dev_err(atomisp_dev, "vmap failed...\n");
917 return NULL;
918 }
919 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
920
921 mutex_unlock(&bo->mutex);
922 return bo->vmap_addr;
923 }
924
hmm_bo_flush_vmap(struct hmm_buffer_object * bo)925 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
926 {
927 check_bo_null_return_void(bo);
928
929 mutex_lock(&bo->mutex);
930 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
931 mutex_unlock(&bo->mutex);
932 return;
933 }
934
935 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
936 mutex_unlock(&bo->mutex);
937 }
938
hmm_bo_vunmap(struct hmm_buffer_object * bo)939 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
940 {
941 check_bo_null_return_void(bo);
942
943 mutex_lock(&bo->mutex);
944 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
945 vunmap(bo->vmap_addr);
946 bo->vmap_addr = NULL;
947 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
948 }
949
950 mutex_unlock(&bo->mutex);
951 return;
952 }
953
hmm_bo_ref(struct hmm_buffer_object * bo)954 void hmm_bo_ref(struct hmm_buffer_object *bo)
955 {
956 check_bo_null_return_void(bo);
957
958 kref_get(&bo->kref);
959 }
960
kref_hmm_bo_release(struct kref * kref)961 static void kref_hmm_bo_release(struct kref *kref)
962 {
963 if (!kref)
964 return;
965
966 hmm_bo_release(kref_to_hmm_bo(kref));
967 }
968
hmm_bo_unref(struct hmm_buffer_object * bo)969 void hmm_bo_unref(struct hmm_buffer_object *bo)
970 {
971 check_bo_null_return_void(bo);
972
973 kref_put(&bo->kref, kref_hmm_bo_release);
974 }
975
hmm_bo_vm_open(struct vm_area_struct * vma)976 static void hmm_bo_vm_open(struct vm_area_struct *vma)
977 {
978 struct hmm_buffer_object *bo =
979 (struct hmm_buffer_object *)vma->vm_private_data;
980
981 check_bo_null_return_void(bo);
982
983 hmm_bo_ref(bo);
984
985 mutex_lock(&bo->mutex);
986
987 bo->status |= HMM_BO_MMAPED;
988
989 bo->mmap_count++;
990
991 mutex_unlock(&bo->mutex);
992 }
993
hmm_bo_vm_close(struct vm_area_struct * vma)994 static void hmm_bo_vm_close(struct vm_area_struct *vma)
995 {
996 struct hmm_buffer_object *bo =
997 (struct hmm_buffer_object *)vma->vm_private_data;
998
999 check_bo_null_return_void(bo);
1000
1001 hmm_bo_unref(bo);
1002
1003 mutex_lock(&bo->mutex);
1004
1005 bo->mmap_count--;
1006
1007 if (!bo->mmap_count) {
1008 bo->status &= (~HMM_BO_MMAPED);
1009 vma->vm_private_data = NULL;
1010 }
1011
1012 mutex_unlock(&bo->mutex);
1013 }
1014
1015 static const struct vm_operations_struct hmm_bo_vm_ops = {
1016 .open = hmm_bo_vm_open,
1017 .close = hmm_bo_vm_close,
1018 };
1019
1020 /*
1021 * mmap the bo to user space.
1022 */
hmm_bo_mmap(struct vm_area_struct * vma,struct hmm_buffer_object * bo)1023 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1024 {
1025 unsigned int start, end;
1026 unsigned int virt;
1027 unsigned int pgnr, i;
1028 unsigned int pfn;
1029
1030 check_bo_null_return(bo, -EINVAL);
1031
1032 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1033
1034 pgnr = bo->pgnr;
1035 start = vma->vm_start;
1036 end = vma->vm_end;
1037
1038 /*
1039 * check vma's virtual address space size and buffer object's size.
1040 * must be the same.
1041 */
1042 if ((start + pgnr_to_size(pgnr)) != end) {
1043 dev_warn(atomisp_dev,
1044 "vma's address space size not equal to buffer object's size");
1045 return -EINVAL;
1046 }
1047
1048 virt = vma->vm_start;
1049 for (i = 0; i < pgnr; i++) {
1050 pfn = page_to_pfn(bo->pages[i]);
1051 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1052 dev_warn(atomisp_dev,
1053 "remap_pfn_range failed: virt = 0x%x, pfn = 0x%x, mapped_pgnr = %d\n",
1054 virt, pfn, 1);
1055 return -EINVAL;
1056 }
1057 virt += PAGE_SIZE;
1058 }
1059
1060 vma->vm_private_data = bo;
1061
1062 vma->vm_ops = &hmm_bo_vm_ops;
1063 vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
1064
1065 /*
1066 * call hmm_bo_vm_open explicitly.
1067 */
1068 hmm_bo_vm_open(vma);
1069
1070 return 0;
1071
1072 status_err:
1073 dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1074 return -EINVAL;
1075 }
1076