1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <kunit/test-bug.h>
7 
8 #include <linux/kmemleak.h>
9 #include <linux/module.h>
10 #include <linux/sizes.h>
11 
12 #include <drm/drm_buddy.h>
13 
14 static struct kmem_cache *slab_blocks;
15 
drm_block_alloc(struct drm_buddy * mm,struct drm_buddy_block * parent,unsigned int order,u64 offset)16 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
17 					       struct drm_buddy_block *parent,
18 					       unsigned int order,
19 					       u64 offset)
20 {
21 	struct drm_buddy_block *block;
22 
23 	BUG_ON(order > DRM_BUDDY_MAX_ORDER);
24 
25 	block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
26 	if (!block)
27 		return NULL;
28 
29 	block->header = offset;
30 	block->header |= order;
31 	block->parent = parent;
32 
33 	BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
34 	return block;
35 }
36 
drm_block_free(struct drm_buddy * mm,struct drm_buddy_block * block)37 static void drm_block_free(struct drm_buddy *mm,
38 			   struct drm_buddy_block *block)
39 {
40 	kmem_cache_free(slab_blocks, block);
41 }
42 
list_insert_sorted(struct drm_buddy * mm,struct drm_buddy_block * block)43 static void list_insert_sorted(struct drm_buddy *mm,
44 			       struct drm_buddy_block *block)
45 {
46 	struct drm_buddy_block *node;
47 	struct list_head *head;
48 
49 	head = &mm->free_list[drm_buddy_block_order(block)];
50 	if (list_empty(head)) {
51 		list_add(&block->link, head);
52 		return;
53 	}
54 
55 	list_for_each_entry(node, head, link)
56 		if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
57 			break;
58 
59 	__list_add(&block->link, node->link.prev, &node->link);
60 }
61 
clear_reset(struct drm_buddy_block * block)62 static void clear_reset(struct drm_buddy_block *block)
63 {
64 	block->header &= ~DRM_BUDDY_HEADER_CLEAR;
65 }
66 
mark_cleared(struct drm_buddy_block * block)67 static void mark_cleared(struct drm_buddy_block *block)
68 {
69 	block->header |= DRM_BUDDY_HEADER_CLEAR;
70 }
71 
mark_allocated(struct drm_buddy_block * block)72 static void mark_allocated(struct drm_buddy_block *block)
73 {
74 	block->header &= ~DRM_BUDDY_HEADER_STATE;
75 	block->header |= DRM_BUDDY_ALLOCATED;
76 
77 	list_del(&block->link);
78 }
79 
mark_free(struct drm_buddy * mm,struct drm_buddy_block * block)80 static void mark_free(struct drm_buddy *mm,
81 		      struct drm_buddy_block *block)
82 {
83 	block->header &= ~DRM_BUDDY_HEADER_STATE;
84 	block->header |= DRM_BUDDY_FREE;
85 
86 	list_insert_sorted(mm, block);
87 }
88 
mark_split(struct drm_buddy_block * block)89 static void mark_split(struct drm_buddy_block *block)
90 {
91 	block->header &= ~DRM_BUDDY_HEADER_STATE;
92 	block->header |= DRM_BUDDY_SPLIT;
93 
94 	list_del(&block->link);
95 }
96 
overlaps(u64 s1,u64 e1,u64 s2,u64 e2)97 static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
98 {
99 	return s1 <= e2 && e1 >= s2;
100 }
101 
contains(u64 s1,u64 e1,u64 s2,u64 e2)102 static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
103 {
104 	return s1 <= s2 && e1 >= e2;
105 }
106 
107 static struct drm_buddy_block *
__get_buddy(struct drm_buddy_block * block)108 __get_buddy(struct drm_buddy_block *block)
109 {
110 	struct drm_buddy_block *parent;
111 
112 	parent = block->parent;
113 	if (!parent)
114 		return NULL;
115 
116 	if (parent->left == block)
117 		return parent->right;
118 
119 	return parent->left;
120 }
121 
__drm_buddy_free(struct drm_buddy * mm,struct drm_buddy_block * block,bool force_merge)122 static unsigned int __drm_buddy_free(struct drm_buddy *mm,
123 				     struct drm_buddy_block *block,
124 				     bool force_merge)
125 {
126 	struct drm_buddy_block *parent;
127 	unsigned int order;
128 
129 	while ((parent = block->parent)) {
130 		struct drm_buddy_block *buddy;
131 
132 		buddy = __get_buddy(block);
133 
134 		if (!drm_buddy_block_is_free(buddy))
135 			break;
136 
137 		if (!force_merge) {
138 			/*
139 			 * Check the block and its buddy clear state and exit
140 			 * the loop if they both have the dissimilar state.
141 			 */
142 			if (drm_buddy_block_is_clear(block) !=
143 			    drm_buddy_block_is_clear(buddy))
144 				break;
145 
146 			if (drm_buddy_block_is_clear(block))
147 				mark_cleared(parent);
148 		}
149 
150 		list_del(&buddy->link);
151 		if (force_merge && drm_buddy_block_is_clear(buddy))
152 			mm->clear_avail -= drm_buddy_block_size(mm, buddy);
153 
154 		drm_block_free(mm, block);
155 		drm_block_free(mm, buddy);
156 
157 		block = parent;
158 	}
159 
160 	order = drm_buddy_block_order(block);
161 	mark_free(mm, block);
162 
163 	return order;
164 }
165 
__force_merge(struct drm_buddy * mm,u64 start,u64 end,unsigned int min_order)166 static int __force_merge(struct drm_buddy *mm,
167 			 u64 start,
168 			 u64 end,
169 			 unsigned int min_order)
170 {
171 	unsigned int order;
172 	int i;
173 
174 	if (!min_order)
175 		return -ENOMEM;
176 
177 	if (min_order > mm->max_order)
178 		return -EINVAL;
179 
180 	for (i = min_order - 1; i >= 0; i--) {
181 		struct drm_buddy_block *block, *prev;
182 
183 		list_for_each_entry_safe_reverse(block, prev, &mm->free_list[i], link) {
184 			struct drm_buddy_block *buddy;
185 			u64 block_start, block_end;
186 
187 			if (!block->parent)
188 				continue;
189 
190 			block_start = drm_buddy_block_offset(block);
191 			block_end = block_start + drm_buddy_block_size(mm, block) - 1;
192 
193 			if (!contains(start, end, block_start, block_end))
194 				continue;
195 
196 			buddy = __get_buddy(block);
197 			if (!drm_buddy_block_is_free(buddy))
198 				continue;
199 
200 			WARN_ON(drm_buddy_block_is_clear(block) ==
201 				drm_buddy_block_is_clear(buddy));
202 
203 			/*
204 			 * If the prev block is same as buddy, don't access the
205 			 * block in the next iteration as we would free the
206 			 * buddy block as part of the free function.
207 			 */
208 			if (prev == buddy)
209 				prev = list_prev_entry(prev, link);
210 
211 			list_del(&block->link);
212 			if (drm_buddy_block_is_clear(block))
213 				mm->clear_avail -= drm_buddy_block_size(mm, block);
214 
215 			order = __drm_buddy_free(mm, block, true);
216 			if (order >= min_order)
217 				return 0;
218 		}
219 	}
220 
221 	return -ENOMEM;
222 }
223 
224 /**
225  * drm_buddy_init - init memory manager
226  *
227  * @mm: DRM buddy manager to initialize
228  * @size: size in bytes to manage
229  * @chunk_size: minimum page size in bytes for our allocations
230  *
231  * Initializes the memory manager and its resources.
232  *
233  * Returns:
234  * 0 on success, error code on failure.
235  */
drm_buddy_init(struct drm_buddy * mm,u64 size,u64 chunk_size)236 int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
237 {
238 	unsigned int i;
239 	u64 offset;
240 
241 	if (size < chunk_size)
242 		return -EINVAL;
243 
244 	if (chunk_size < SZ_4K)
245 		return -EINVAL;
246 
247 	if (!is_power_of_2(chunk_size))
248 		return -EINVAL;
249 
250 	size = round_down(size, chunk_size);
251 
252 	mm->size = size;
253 	mm->avail = size;
254 	mm->clear_avail = 0;
255 	mm->chunk_size = chunk_size;
256 	mm->max_order = ilog2(size) - ilog2(chunk_size);
257 
258 	BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
259 
260 	mm->free_list = kmalloc_array(mm->max_order + 1,
261 				      sizeof(struct list_head),
262 				      GFP_KERNEL);
263 	if (!mm->free_list)
264 		return -ENOMEM;
265 
266 	for (i = 0; i <= mm->max_order; ++i)
267 		INIT_LIST_HEAD(&mm->free_list[i]);
268 
269 	mm->n_roots = hweight64(size);
270 
271 	mm->roots = kmalloc_array(mm->n_roots,
272 				  sizeof(struct drm_buddy_block *),
273 				  GFP_KERNEL);
274 	if (!mm->roots)
275 		goto out_free_list;
276 
277 	offset = 0;
278 	i = 0;
279 
280 	/*
281 	 * Split into power-of-two blocks, in case we are given a size that is
282 	 * not itself a power-of-two.
283 	 */
284 	do {
285 		struct drm_buddy_block *root;
286 		unsigned int order;
287 		u64 root_size;
288 
289 		order = ilog2(size) - ilog2(chunk_size);
290 		root_size = chunk_size << order;
291 
292 		root = drm_block_alloc(mm, NULL, order, offset);
293 		if (!root)
294 			goto out_free_roots;
295 
296 		mark_free(mm, root);
297 
298 		BUG_ON(i > mm->max_order);
299 		BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
300 
301 		mm->roots[i] = root;
302 
303 		offset += root_size;
304 		size -= root_size;
305 		i++;
306 	} while (size);
307 
308 	return 0;
309 
310 out_free_roots:
311 	while (i--)
312 		drm_block_free(mm, mm->roots[i]);
313 	kfree(mm->roots);
314 out_free_list:
315 	kfree(mm->free_list);
316 	return -ENOMEM;
317 }
318 EXPORT_SYMBOL(drm_buddy_init);
319 
320 /**
321  * drm_buddy_fini - tear down the memory manager
322  *
323  * @mm: DRM buddy manager to free
324  *
325  * Cleanup memory manager resources and the freelist
326  */
drm_buddy_fini(struct drm_buddy * mm)327 void drm_buddy_fini(struct drm_buddy *mm)
328 {
329 	u64 root_size, size, start;
330 	unsigned int order;
331 	int i;
332 
333 	size = mm->size;
334 
335 	for (i = 0; i < mm->n_roots; ++i) {
336 		order = ilog2(size) - ilog2(mm->chunk_size);
337 		start = drm_buddy_block_offset(mm->roots[i]);
338 		__force_merge(mm, start, start + size, order);
339 
340 		if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i])))
341 			kunit_fail_current_test("buddy_fini() root");
342 
343 		drm_block_free(mm, mm->roots[i]);
344 
345 		root_size = mm->chunk_size << order;
346 		size -= root_size;
347 	}
348 
349 	WARN_ON(mm->avail != mm->size);
350 
351 	kfree(mm->roots);
352 	kfree(mm->free_list);
353 }
354 EXPORT_SYMBOL(drm_buddy_fini);
355 
split_block(struct drm_buddy * mm,struct drm_buddy_block * block)356 static int split_block(struct drm_buddy *mm,
357 		       struct drm_buddy_block *block)
358 {
359 	unsigned int block_order = drm_buddy_block_order(block) - 1;
360 	u64 offset = drm_buddy_block_offset(block);
361 
362 	BUG_ON(!drm_buddy_block_is_free(block));
363 	BUG_ON(!drm_buddy_block_order(block));
364 
365 	block->left = drm_block_alloc(mm, block, block_order, offset);
366 	if (!block->left)
367 		return -ENOMEM;
368 
369 	block->right = drm_block_alloc(mm, block, block_order,
370 				       offset + (mm->chunk_size << block_order));
371 	if (!block->right) {
372 		drm_block_free(mm, block->left);
373 		return -ENOMEM;
374 	}
375 
376 	mark_free(mm, block->left);
377 	mark_free(mm, block->right);
378 
379 	if (drm_buddy_block_is_clear(block)) {
380 		mark_cleared(block->left);
381 		mark_cleared(block->right);
382 		clear_reset(block);
383 	}
384 
385 	mark_split(block);
386 
387 	return 0;
388 }
389 
390 /**
391  * drm_get_buddy - get buddy address
392  *
393  * @block: DRM buddy block
394  *
395  * Returns the corresponding buddy block for @block, or NULL
396  * if this is a root block and can't be merged further.
397  * Requires some kind of locking to protect against
398  * any concurrent allocate and free operations.
399  */
400 struct drm_buddy_block *
drm_get_buddy(struct drm_buddy_block * block)401 drm_get_buddy(struct drm_buddy_block *block)
402 {
403 	return __get_buddy(block);
404 }
405 EXPORT_SYMBOL(drm_get_buddy);
406 
407 /**
408  * drm_buddy_free_block - free a block
409  *
410  * @mm: DRM buddy manager
411  * @block: block to be freed
412  */
drm_buddy_free_block(struct drm_buddy * mm,struct drm_buddy_block * block)413 void drm_buddy_free_block(struct drm_buddy *mm,
414 			  struct drm_buddy_block *block)
415 {
416 	BUG_ON(!drm_buddy_block_is_allocated(block));
417 	mm->avail += drm_buddy_block_size(mm, block);
418 	if (drm_buddy_block_is_clear(block))
419 		mm->clear_avail += drm_buddy_block_size(mm, block);
420 
421 	__drm_buddy_free(mm, block, false);
422 }
423 EXPORT_SYMBOL(drm_buddy_free_block);
424 
__drm_buddy_free_list(struct drm_buddy * mm,struct list_head * objects,bool mark_clear,bool mark_dirty)425 static void __drm_buddy_free_list(struct drm_buddy *mm,
426 				  struct list_head *objects,
427 				  bool mark_clear,
428 				  bool mark_dirty)
429 {
430 	struct drm_buddy_block *block, *on;
431 
432 	WARN_ON(mark_dirty && mark_clear);
433 
434 	list_for_each_entry_safe(block, on, objects, link) {
435 		if (mark_clear)
436 			mark_cleared(block);
437 		else if (mark_dirty)
438 			clear_reset(block);
439 		drm_buddy_free_block(mm, block);
440 		cond_resched();
441 	}
442 	INIT_LIST_HEAD(objects);
443 }
444 
drm_buddy_free_list_internal(struct drm_buddy * mm,struct list_head * objects)445 static void drm_buddy_free_list_internal(struct drm_buddy *mm,
446 					 struct list_head *objects)
447 {
448 	/*
449 	 * Don't touch the clear/dirty bit, since allocation is still internal
450 	 * at this point. For example we might have just failed part of the
451 	 * allocation.
452 	 */
453 	__drm_buddy_free_list(mm, objects, false, false);
454 }
455 
456 /**
457  * drm_buddy_free_list - free blocks
458  *
459  * @mm: DRM buddy manager
460  * @objects: input list head to free blocks
461  * @flags: optional flags like DRM_BUDDY_CLEARED
462  */
drm_buddy_free_list(struct drm_buddy * mm,struct list_head * objects,unsigned int flags)463 void drm_buddy_free_list(struct drm_buddy *mm,
464 			 struct list_head *objects,
465 			 unsigned int flags)
466 {
467 	bool mark_clear = flags & DRM_BUDDY_CLEARED;
468 
469 	__drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
470 }
471 EXPORT_SYMBOL(drm_buddy_free_list);
472 
block_incompatible(struct drm_buddy_block * block,unsigned int flags)473 static bool block_incompatible(struct drm_buddy_block *block, unsigned int flags)
474 {
475 	bool needs_clear = flags & DRM_BUDDY_CLEAR_ALLOCATION;
476 
477 	return needs_clear != drm_buddy_block_is_clear(block);
478 }
479 
480 static struct drm_buddy_block *
__alloc_range_bias(struct drm_buddy * mm,u64 start,u64 end,unsigned int order,unsigned long flags,bool fallback)481 __alloc_range_bias(struct drm_buddy *mm,
482 		   u64 start, u64 end,
483 		   unsigned int order,
484 		   unsigned long flags,
485 		   bool fallback)
486 {
487 	u64 req_size = mm->chunk_size << order;
488 	struct drm_buddy_block *block;
489 	struct drm_buddy_block *buddy;
490 	LIST_HEAD(dfs);
491 	int err;
492 	int i;
493 
494 	end = end - 1;
495 
496 	for (i = 0; i < mm->n_roots; ++i)
497 		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
498 
499 	do {
500 		u64 block_start;
501 		u64 block_end;
502 
503 		block = list_first_entry_or_null(&dfs,
504 						 struct drm_buddy_block,
505 						 tmp_link);
506 		if (!block)
507 			break;
508 
509 		list_del(&block->tmp_link);
510 
511 		if (drm_buddy_block_order(block) < order)
512 			continue;
513 
514 		block_start = drm_buddy_block_offset(block);
515 		block_end = block_start + drm_buddy_block_size(mm, block) - 1;
516 
517 		if (!overlaps(start, end, block_start, block_end))
518 			continue;
519 
520 		if (drm_buddy_block_is_allocated(block))
521 			continue;
522 
523 		if (block_start < start || block_end > end) {
524 			u64 adjusted_start = max(block_start, start);
525 			u64 adjusted_end = min(block_end, end);
526 
527 			if (round_down(adjusted_end + 1, req_size) <=
528 			    round_up(adjusted_start, req_size))
529 				continue;
530 		}
531 
532 		if (!fallback && block_incompatible(block, flags))
533 			continue;
534 
535 		if (contains(start, end, block_start, block_end) &&
536 		    order == drm_buddy_block_order(block)) {
537 			/*
538 			 * Find the free block within the range.
539 			 */
540 			if (drm_buddy_block_is_free(block))
541 				return block;
542 
543 			continue;
544 		}
545 
546 		if (!drm_buddy_block_is_split(block)) {
547 			err = split_block(mm, block);
548 			if (unlikely(err))
549 				goto err_undo;
550 		}
551 
552 		list_add(&block->right->tmp_link, &dfs);
553 		list_add(&block->left->tmp_link, &dfs);
554 	} while (1);
555 
556 	return ERR_PTR(-ENOSPC);
557 
558 err_undo:
559 	/*
560 	 * We really don't want to leave around a bunch of split blocks, since
561 	 * bigger is better, so make sure we merge everything back before we
562 	 * free the allocated blocks.
563 	 */
564 	buddy = __get_buddy(block);
565 	if (buddy &&
566 	    (drm_buddy_block_is_free(block) &&
567 	     drm_buddy_block_is_free(buddy)))
568 		__drm_buddy_free(mm, block, false);
569 	return ERR_PTR(err);
570 }
571 
572 static struct drm_buddy_block *
__drm_buddy_alloc_range_bias(struct drm_buddy * mm,u64 start,u64 end,unsigned int order,unsigned long flags)573 __drm_buddy_alloc_range_bias(struct drm_buddy *mm,
574 			     u64 start, u64 end,
575 			     unsigned int order,
576 			     unsigned long flags)
577 {
578 	struct drm_buddy_block *block;
579 	bool fallback = false;
580 
581 	block = __alloc_range_bias(mm, start, end, order,
582 				   flags, fallback);
583 	if (IS_ERR(block))
584 		return __alloc_range_bias(mm, start, end, order,
585 					  flags, !fallback);
586 
587 	return block;
588 }
589 
590 static struct drm_buddy_block *
get_maxblock(struct drm_buddy * mm,unsigned int order,unsigned long flags)591 get_maxblock(struct drm_buddy *mm, unsigned int order,
592 	     unsigned long flags)
593 {
594 	struct drm_buddy_block *max_block = NULL, *block = NULL;
595 	unsigned int i;
596 
597 	for (i = order; i <= mm->max_order; ++i) {
598 		struct drm_buddy_block *tmp_block;
599 
600 		list_for_each_entry_reverse(tmp_block, &mm->free_list[i], link) {
601 			if (block_incompatible(tmp_block, flags))
602 				continue;
603 
604 			block = tmp_block;
605 			break;
606 		}
607 
608 		if (!block)
609 			continue;
610 
611 		if (!max_block) {
612 			max_block = block;
613 			continue;
614 		}
615 
616 		if (drm_buddy_block_offset(block) >
617 		    drm_buddy_block_offset(max_block)) {
618 			max_block = block;
619 		}
620 	}
621 
622 	return max_block;
623 }
624 
625 static struct drm_buddy_block *
alloc_from_freelist(struct drm_buddy * mm,unsigned int order,unsigned long flags)626 alloc_from_freelist(struct drm_buddy *mm,
627 		    unsigned int order,
628 		    unsigned long flags)
629 {
630 	struct drm_buddy_block *block = NULL;
631 	unsigned int tmp;
632 	int err;
633 
634 	if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
635 		block = get_maxblock(mm, order, flags);
636 		if (block)
637 			/* Store the obtained block order */
638 			tmp = drm_buddy_block_order(block);
639 	} else {
640 		for (tmp = order; tmp <= mm->max_order; ++tmp) {
641 			struct drm_buddy_block *tmp_block;
642 
643 			list_for_each_entry_reverse(tmp_block, &mm->free_list[tmp], link) {
644 				if (block_incompatible(tmp_block, flags))
645 					continue;
646 
647 				block = tmp_block;
648 				break;
649 			}
650 
651 			if (block)
652 				break;
653 		}
654 	}
655 
656 	if (!block) {
657 		/* Fallback method */
658 		for (tmp = order; tmp <= mm->max_order; ++tmp) {
659 			if (!list_empty(&mm->free_list[tmp])) {
660 				block = list_last_entry(&mm->free_list[tmp],
661 							struct drm_buddy_block,
662 							link);
663 				if (block)
664 					break;
665 			}
666 		}
667 
668 		if (!block)
669 			return ERR_PTR(-ENOSPC);
670 	}
671 
672 	BUG_ON(!drm_buddy_block_is_free(block));
673 
674 	while (tmp != order) {
675 		err = split_block(mm, block);
676 		if (unlikely(err))
677 			goto err_undo;
678 
679 		block = block->right;
680 		tmp--;
681 	}
682 	return block;
683 
684 err_undo:
685 	if (tmp != order)
686 		__drm_buddy_free(mm, block, false);
687 	return ERR_PTR(err);
688 }
689 
__alloc_range(struct drm_buddy * mm,struct list_head * dfs,u64 start,u64 size,struct list_head * blocks,u64 * total_allocated_on_err)690 static int __alloc_range(struct drm_buddy *mm,
691 			 struct list_head *dfs,
692 			 u64 start, u64 size,
693 			 struct list_head *blocks,
694 			 u64 *total_allocated_on_err)
695 {
696 	struct drm_buddy_block *block;
697 	struct drm_buddy_block *buddy;
698 	u64 total_allocated = 0;
699 	LIST_HEAD(allocated);
700 	u64 end;
701 	int err;
702 
703 	end = start + size - 1;
704 
705 	do {
706 		u64 block_start;
707 		u64 block_end;
708 
709 		block = list_first_entry_or_null(dfs,
710 						 struct drm_buddy_block,
711 						 tmp_link);
712 		if (!block)
713 			break;
714 
715 		list_del(&block->tmp_link);
716 
717 		block_start = drm_buddy_block_offset(block);
718 		block_end = block_start + drm_buddy_block_size(mm, block) - 1;
719 
720 		if (!overlaps(start, end, block_start, block_end))
721 			continue;
722 
723 		if (drm_buddy_block_is_allocated(block)) {
724 			err = -ENOSPC;
725 			goto err_free;
726 		}
727 
728 		if (contains(start, end, block_start, block_end)) {
729 			if (drm_buddy_block_is_free(block)) {
730 				mark_allocated(block);
731 				total_allocated += drm_buddy_block_size(mm, block);
732 				mm->avail -= drm_buddy_block_size(mm, block);
733 				if (drm_buddy_block_is_clear(block))
734 					mm->clear_avail -= drm_buddy_block_size(mm, block);
735 				list_add_tail(&block->link, &allocated);
736 				continue;
737 			} else if (!mm->clear_avail) {
738 				err = -ENOSPC;
739 				goto err_free;
740 			}
741 		}
742 
743 		if (!drm_buddy_block_is_split(block)) {
744 			err = split_block(mm, block);
745 			if (unlikely(err))
746 				goto err_undo;
747 		}
748 
749 		list_add(&block->right->tmp_link, dfs);
750 		list_add(&block->left->tmp_link, dfs);
751 	} while (1);
752 
753 	if (total_allocated < size) {
754 		err = -ENOSPC;
755 		goto err_free;
756 	}
757 
758 	list_splice_tail(&allocated, blocks);
759 
760 	return 0;
761 
762 err_undo:
763 	/*
764 	 * We really don't want to leave around a bunch of split blocks, since
765 	 * bigger is better, so make sure we merge everything back before we
766 	 * free the allocated blocks.
767 	 */
768 	buddy = __get_buddy(block);
769 	if (buddy &&
770 	    (drm_buddy_block_is_free(block) &&
771 	     drm_buddy_block_is_free(buddy)))
772 		__drm_buddy_free(mm, block, false);
773 
774 err_free:
775 	if (err == -ENOSPC && total_allocated_on_err) {
776 		list_splice_tail(&allocated, blocks);
777 		*total_allocated_on_err = total_allocated;
778 	} else {
779 		drm_buddy_free_list_internal(mm, &allocated);
780 	}
781 
782 	return err;
783 }
784 
__drm_buddy_alloc_range(struct drm_buddy * mm,u64 start,u64 size,u64 * total_allocated_on_err,struct list_head * blocks)785 static int __drm_buddy_alloc_range(struct drm_buddy *mm,
786 				   u64 start,
787 				   u64 size,
788 				   u64 *total_allocated_on_err,
789 				   struct list_head *blocks)
790 {
791 	LIST_HEAD(dfs);
792 	int i;
793 
794 	for (i = 0; i < mm->n_roots; ++i)
795 		list_add_tail(&mm->roots[i]->tmp_link, &dfs);
796 
797 	return __alloc_range(mm, &dfs, start, size,
798 			     blocks, total_allocated_on_err);
799 }
800 
__alloc_contig_try_harder(struct drm_buddy * mm,u64 size,u64 min_block_size,struct list_head * blocks)801 static int __alloc_contig_try_harder(struct drm_buddy *mm,
802 				     u64 size,
803 				     u64 min_block_size,
804 				     struct list_head *blocks)
805 {
806 	u64 rhs_offset, lhs_offset, lhs_size, filled;
807 	struct drm_buddy_block *block;
808 	struct list_head *list;
809 	LIST_HEAD(blocks_lhs);
810 	unsigned long pages;
811 	unsigned int order;
812 	u64 modify_size;
813 	int err;
814 
815 	modify_size = rounddown_pow_of_two(size);
816 	pages = modify_size >> ilog2(mm->chunk_size);
817 	order = fls(pages) - 1;
818 	if (order == 0)
819 		return -ENOSPC;
820 
821 	list = &mm->free_list[order];
822 	if (list_empty(list))
823 		return -ENOSPC;
824 
825 	list_for_each_entry_reverse(block, list, link) {
826 		/* Allocate blocks traversing RHS */
827 		rhs_offset = drm_buddy_block_offset(block);
828 		err =  __drm_buddy_alloc_range(mm, rhs_offset, size,
829 					       &filled, blocks);
830 		if (!err || err != -ENOSPC)
831 			return err;
832 
833 		lhs_size = max((size - filled), min_block_size);
834 		if (!IS_ALIGNED(lhs_size, min_block_size))
835 			lhs_size = round_up(lhs_size, min_block_size);
836 
837 		/* Allocate blocks traversing LHS */
838 		lhs_offset = drm_buddy_block_offset(block) - lhs_size;
839 		err =  __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
840 					       NULL, &blocks_lhs);
841 		if (!err) {
842 			list_splice(&blocks_lhs, blocks);
843 			return 0;
844 		} else if (err != -ENOSPC) {
845 			drm_buddy_free_list_internal(mm, blocks);
846 			return err;
847 		}
848 		/* Free blocks for the next iteration */
849 		drm_buddy_free_list_internal(mm, blocks);
850 	}
851 
852 	return -ENOSPC;
853 }
854 
855 /**
856  * drm_buddy_block_trim - free unused pages
857  *
858  * @mm: DRM buddy manager
859  * @start: start address to begin the trimming.
860  * @new_size: original size requested
861  * @blocks: Input and output list of allocated blocks.
862  * MUST contain single block as input to be trimmed.
863  * On success will contain the newly allocated blocks
864  * making up the @new_size. Blocks always appear in
865  * ascending order
866  *
867  * For contiguous allocation, we round up the size to the nearest
868  * power of two value, drivers consume *actual* size, so remaining
869  * portions are unused and can be optionally freed with this function
870  *
871  * Returns:
872  * 0 on success, error code on failure.
873  */
drm_buddy_block_trim(struct drm_buddy * mm,u64 * start,u64 new_size,struct list_head * blocks)874 int drm_buddy_block_trim(struct drm_buddy *mm,
875 			 u64 *start,
876 			 u64 new_size,
877 			 struct list_head *blocks)
878 {
879 	struct drm_buddy_block *parent;
880 	struct drm_buddy_block *block;
881 	u64 block_start, block_end;
882 	LIST_HEAD(dfs);
883 	u64 new_start;
884 	int err;
885 
886 	if (!list_is_singular(blocks))
887 		return -EINVAL;
888 
889 	block = list_first_entry(blocks,
890 				 struct drm_buddy_block,
891 				 link);
892 
893 	block_start = drm_buddy_block_offset(block);
894 	block_end = block_start + drm_buddy_block_size(mm, block);
895 
896 	if (WARN_ON(!drm_buddy_block_is_allocated(block)))
897 		return -EINVAL;
898 
899 	if (new_size > drm_buddy_block_size(mm, block))
900 		return -EINVAL;
901 
902 	if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
903 		return -EINVAL;
904 
905 	if (new_size == drm_buddy_block_size(mm, block))
906 		return 0;
907 
908 	new_start = block_start;
909 	if (start) {
910 		new_start = *start;
911 
912 		if (new_start < block_start)
913 			return -EINVAL;
914 
915 		if (!IS_ALIGNED(new_start, mm->chunk_size))
916 			return -EINVAL;
917 
918 		if (range_overflows(new_start, new_size, block_end))
919 			return -EINVAL;
920 	}
921 
922 	list_del(&block->link);
923 	mark_free(mm, block);
924 	mm->avail += drm_buddy_block_size(mm, block);
925 	if (drm_buddy_block_is_clear(block))
926 		mm->clear_avail += drm_buddy_block_size(mm, block);
927 
928 	/* Prevent recursively freeing this node */
929 	parent = block->parent;
930 	block->parent = NULL;
931 
932 	list_add(&block->tmp_link, &dfs);
933 	err =  __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
934 	if (err) {
935 		mark_allocated(block);
936 		mm->avail -= drm_buddy_block_size(mm, block);
937 		if (drm_buddy_block_is_clear(block))
938 			mm->clear_avail -= drm_buddy_block_size(mm, block);
939 		list_add(&block->link, blocks);
940 	}
941 
942 	block->parent = parent;
943 	return err;
944 }
945 EXPORT_SYMBOL(drm_buddy_block_trim);
946 
947 static struct drm_buddy_block *
__drm_buddy_alloc_blocks(struct drm_buddy * mm,u64 start,u64 end,unsigned int order,unsigned long flags)948 __drm_buddy_alloc_blocks(struct drm_buddy *mm,
949 			 u64 start, u64 end,
950 			 unsigned int order,
951 			 unsigned long flags)
952 {
953 	if (flags & DRM_BUDDY_RANGE_ALLOCATION)
954 		/* Allocate traversing within the range */
955 		return  __drm_buddy_alloc_range_bias(mm, start, end,
956 						     order, flags);
957 	else
958 		/* Allocate from freelist */
959 		return alloc_from_freelist(mm, order, flags);
960 }
961 
962 /**
963  * drm_buddy_alloc_blocks - allocate power-of-two blocks
964  *
965  * @mm: DRM buddy manager to allocate from
966  * @start: start of the allowed range for this block
967  * @end: end of the allowed range for this block
968  * @size: size of the allocation in bytes
969  * @min_block_size: alignment of the allocation
970  * @blocks: output list head to add allocated blocks
971  * @flags: DRM_BUDDY_*_ALLOCATION flags
972  *
973  * alloc_range_bias() called on range limitations, which traverses
974  * the tree and returns the desired block.
975  *
976  * alloc_from_freelist() called when *no* range restrictions
977  * are enforced, which picks the block from the freelist.
978  *
979  * Returns:
980  * 0 on success, error code on failure.
981  */
drm_buddy_alloc_blocks(struct drm_buddy * mm,u64 start,u64 end,u64 size,u64 min_block_size,struct list_head * blocks,unsigned long flags)982 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
983 			   u64 start, u64 end, u64 size,
984 			   u64 min_block_size,
985 			   struct list_head *blocks,
986 			   unsigned long flags)
987 {
988 	struct drm_buddy_block *block = NULL;
989 	u64 original_size, original_min_size;
990 	unsigned int min_order, order;
991 	LIST_HEAD(allocated);
992 	unsigned long pages;
993 	int err;
994 
995 	if (size < mm->chunk_size)
996 		return -EINVAL;
997 
998 	if (min_block_size < mm->chunk_size)
999 		return -EINVAL;
1000 
1001 	if (!is_power_of_2(min_block_size))
1002 		return -EINVAL;
1003 
1004 	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
1005 		return -EINVAL;
1006 
1007 	if (end > mm->size)
1008 		return -EINVAL;
1009 
1010 	if (range_overflows(start, size, mm->size))
1011 		return -EINVAL;
1012 
1013 	/* Actual range allocation */
1014 	if (start + size == end) {
1015 		if (!IS_ALIGNED(start | end, min_block_size))
1016 			return -EINVAL;
1017 
1018 		return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
1019 	}
1020 
1021 	original_size = size;
1022 	original_min_size = min_block_size;
1023 
1024 	/* Roundup the size to power of 2 */
1025 	if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
1026 		size = roundup_pow_of_two(size);
1027 		min_block_size = size;
1028 	/* Align size value to min_block_size */
1029 	} else if (!IS_ALIGNED(size, min_block_size)) {
1030 		size = round_up(size, min_block_size);
1031 	}
1032 
1033 	pages = size >> ilog2(mm->chunk_size);
1034 	order = fls(pages) - 1;
1035 	min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
1036 
1037 	do {
1038 		order = min(order, (unsigned int)fls(pages) - 1);
1039 		BUG_ON(order > mm->max_order);
1040 		BUG_ON(order < min_order);
1041 
1042 		do {
1043 			block = __drm_buddy_alloc_blocks(mm, start,
1044 							 end,
1045 							 order,
1046 							 flags);
1047 			if (!IS_ERR(block))
1048 				break;
1049 
1050 			if (order-- == min_order) {
1051 				/* Try allocation through force merge method */
1052 				if (mm->clear_avail &&
1053 				    !__force_merge(mm, start, end, min_order)) {
1054 					block = __drm_buddy_alloc_blocks(mm, start,
1055 									 end,
1056 									 min_order,
1057 									 flags);
1058 					if (!IS_ERR(block)) {
1059 						order = min_order;
1060 						break;
1061 					}
1062 				}
1063 
1064 				/*
1065 				 * Try contiguous block allocation through
1066 				 * try harder method.
1067 				 */
1068 				if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
1069 				    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
1070 					return __alloc_contig_try_harder(mm,
1071 									 original_size,
1072 									 original_min_size,
1073 									 blocks);
1074 				err = -ENOSPC;
1075 				goto err_free;
1076 			}
1077 		} while (1);
1078 
1079 		mark_allocated(block);
1080 		mm->avail -= drm_buddy_block_size(mm, block);
1081 		if (drm_buddy_block_is_clear(block))
1082 			mm->clear_avail -= drm_buddy_block_size(mm, block);
1083 		kmemleak_update_trace(block);
1084 		list_add_tail(&block->link, &allocated);
1085 
1086 		pages -= BIT(order);
1087 
1088 		if (!pages)
1089 			break;
1090 	} while (1);
1091 
1092 	/* Trim the allocated block to the required size */
1093 	if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
1094 	    original_size != size) {
1095 		struct list_head *trim_list;
1096 		LIST_HEAD(temp);
1097 		u64 trim_size;
1098 
1099 		trim_list = &allocated;
1100 		trim_size = original_size;
1101 
1102 		if (!list_is_singular(&allocated)) {
1103 			block = list_last_entry(&allocated, typeof(*block), link);
1104 			list_move(&block->link, &temp);
1105 			trim_list = &temp;
1106 			trim_size = drm_buddy_block_size(mm, block) -
1107 				(size - original_size);
1108 		}
1109 
1110 		drm_buddy_block_trim(mm,
1111 				     NULL,
1112 				     trim_size,
1113 				     trim_list);
1114 
1115 		if (!list_empty(&temp))
1116 			list_splice_tail(trim_list, &allocated);
1117 	}
1118 
1119 	list_splice_tail(&allocated, blocks);
1120 	return 0;
1121 
1122 err_free:
1123 	drm_buddy_free_list_internal(mm, &allocated);
1124 	return err;
1125 }
1126 EXPORT_SYMBOL(drm_buddy_alloc_blocks);
1127 
1128 /**
1129  * drm_buddy_block_print - print block information
1130  *
1131  * @mm: DRM buddy manager
1132  * @block: DRM buddy block
1133  * @p: DRM printer to use
1134  */
drm_buddy_block_print(struct drm_buddy * mm,struct drm_buddy_block * block,struct drm_printer * p)1135 void drm_buddy_block_print(struct drm_buddy *mm,
1136 			   struct drm_buddy_block *block,
1137 			   struct drm_printer *p)
1138 {
1139 	u64 start = drm_buddy_block_offset(block);
1140 	u64 size = drm_buddy_block_size(mm, block);
1141 
1142 	drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
1143 }
1144 EXPORT_SYMBOL(drm_buddy_block_print);
1145 
1146 /**
1147  * drm_buddy_print - print allocator state
1148  *
1149  * @mm: DRM buddy manager
1150  * @p: DRM printer to use
1151  */
drm_buddy_print(struct drm_buddy * mm,struct drm_printer * p)1152 void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
1153 {
1154 	int order;
1155 
1156 	drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB\n",
1157 		   mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
1158 
1159 	for (order = mm->max_order; order >= 0; order--) {
1160 		struct drm_buddy_block *block;
1161 		u64 count = 0, free;
1162 
1163 		list_for_each_entry(block, &mm->free_list[order], link) {
1164 			BUG_ON(!drm_buddy_block_is_free(block));
1165 			count++;
1166 		}
1167 
1168 		drm_printf(p, "order-%2d ", order);
1169 
1170 		free = count * (mm->chunk_size << order);
1171 		if (free < SZ_1M)
1172 			drm_printf(p, "free: %8llu KiB", free >> 10);
1173 		else
1174 			drm_printf(p, "free: %8llu MiB", free >> 20);
1175 
1176 		drm_printf(p, ", blocks: %llu\n", count);
1177 	}
1178 }
1179 EXPORT_SYMBOL(drm_buddy_print);
1180 
drm_buddy_module_exit(void)1181 static void drm_buddy_module_exit(void)
1182 {
1183 	kmem_cache_destroy(slab_blocks);
1184 }
1185 
drm_buddy_module_init(void)1186 static int __init drm_buddy_module_init(void)
1187 {
1188 	slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
1189 	if (!slab_blocks)
1190 		return -ENOMEM;
1191 
1192 	return 0;
1193 }
1194 
1195 module_init(drm_buddy_module_init);
1196 module_exit(drm_buddy_module_exit);
1197 
1198 MODULE_DESCRIPTION("DRM Buddy Allocator");
1199 MODULE_LICENSE("Dual MIT/GPL");
1200