1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
5 */
6
7 #include <kunit/test.h>
8
9 #include <linux/prime_numbers.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sizes.h>
12
13 #include <drm/drm_buddy.h>
14
15 #include "../lib/drm_random.h"
16
17 static unsigned int random_seed;
18
get_size(int order,u64 chunk_size)19 static inline u64 get_size(int order, u64 chunk_size)
20 {
21 return (1 << order) * chunk_size;
22 }
23
drm_test_buddy_alloc_range_bias(struct kunit * test)24 static void drm_test_buddy_alloc_range_bias(struct kunit *test)
25 {
26 u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem;
27 DRM_RND_STATE(prng, random_seed);
28 unsigned int i, count, *order;
29 struct drm_buddy_block *block;
30 unsigned long flags;
31 struct drm_buddy mm;
32 LIST_HEAD(allocated);
33
34 bias_size = SZ_1M;
35 ps = roundup_pow_of_two(prandom_u32_state(&prng) % bias_size);
36 ps = max(SZ_4K, ps);
37 mm_size = (SZ_8M-1) & ~(ps-1); /* Multiple roots */
38
39 kunit_info(test, "mm_size=%u, ps=%u\n", mm_size, ps);
40
41 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
42 "buddy_init failed\n");
43
44 count = mm_size / bias_size;
45 order = drm_random_order(count, &prng);
46 KUNIT_EXPECT_TRUE(test, order);
47
48 /*
49 * Idea is to split the address space into uniform bias ranges, and then
50 * in some random order allocate within each bias, using various
51 * patterns within. This should detect if allocations leak out from a
52 * given bias, for example.
53 */
54
55 for (i = 0; i < count; i++) {
56 LIST_HEAD(tmp);
57 u32 size;
58
59 bias_start = order[i] * bias_size;
60 bias_end = bias_start + bias_size;
61 bias_rem = bias_size;
62
63 /* internal round_up too big */
64 KUNIT_ASSERT_TRUE_MSG(test,
65 drm_buddy_alloc_blocks(&mm, bias_start,
66 bias_end, bias_size + ps, bias_size,
67 &allocated,
68 DRM_BUDDY_RANGE_ALLOCATION),
69 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
70 bias_start, bias_end, bias_size, bias_size);
71
72 /* size too big */
73 KUNIT_ASSERT_TRUE_MSG(test,
74 drm_buddy_alloc_blocks(&mm, bias_start,
75 bias_end, bias_size + ps, ps,
76 &allocated,
77 DRM_BUDDY_RANGE_ALLOCATION),
78 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
79 bias_start, bias_end, bias_size + ps, ps);
80
81 /* bias range too small for size */
82 KUNIT_ASSERT_TRUE_MSG(test,
83 drm_buddy_alloc_blocks(&mm, bias_start + ps,
84 bias_end, bias_size, ps,
85 &allocated,
86 DRM_BUDDY_RANGE_ALLOCATION),
87 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
88 bias_start + ps, bias_end, bias_size, ps);
89
90 /* bias misaligned */
91 KUNIT_ASSERT_TRUE_MSG(test,
92 drm_buddy_alloc_blocks(&mm, bias_start + ps,
93 bias_end - ps,
94 bias_size >> 1, bias_size >> 1,
95 &allocated,
96 DRM_BUDDY_RANGE_ALLOCATION),
97 "buddy_alloc h didn't fail with bias(%x-%x), size=%u, ps=%u\n",
98 bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1);
99
100 /* single big page */
101 KUNIT_ASSERT_FALSE_MSG(test,
102 drm_buddy_alloc_blocks(&mm, bias_start,
103 bias_end, bias_size, bias_size,
104 &tmp,
105 DRM_BUDDY_RANGE_ALLOCATION),
106 "buddy_alloc i failed with bias(%x-%x), size=%u, ps=%u\n",
107 bias_start, bias_end, bias_size, bias_size);
108 drm_buddy_free_list(&mm, &tmp, 0);
109
110 /* single page with internal round_up */
111 KUNIT_ASSERT_FALSE_MSG(test,
112 drm_buddy_alloc_blocks(&mm, bias_start,
113 bias_end, ps, bias_size,
114 &tmp,
115 DRM_BUDDY_RANGE_ALLOCATION),
116 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
117 bias_start, bias_end, ps, bias_size);
118 drm_buddy_free_list(&mm, &tmp, 0);
119
120 /* random size within */
121 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
122 if (size)
123 KUNIT_ASSERT_FALSE_MSG(test,
124 drm_buddy_alloc_blocks(&mm, bias_start,
125 bias_end, size, ps,
126 &tmp,
127 DRM_BUDDY_RANGE_ALLOCATION),
128 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
129 bias_start, bias_end, size, ps);
130
131 bias_rem -= size;
132 /* too big for current avail */
133 KUNIT_ASSERT_TRUE_MSG(test,
134 drm_buddy_alloc_blocks(&mm, bias_start,
135 bias_end, bias_rem + ps, ps,
136 &allocated,
137 DRM_BUDDY_RANGE_ALLOCATION),
138 "buddy_alloc didn't fail with bias(%x-%x), size=%u, ps=%u\n",
139 bias_start, bias_end, bias_rem + ps, ps);
140
141 if (bias_rem) {
142 /* random fill of the remainder */
143 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
144 size = max(size, ps);
145
146 KUNIT_ASSERT_FALSE_MSG(test,
147 drm_buddy_alloc_blocks(&mm, bias_start,
148 bias_end, size, ps,
149 &allocated,
150 DRM_BUDDY_RANGE_ALLOCATION),
151 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
152 bias_start, bias_end, size, ps);
153 /*
154 * Intentionally allow some space to be left
155 * unallocated, and ideally not always on the bias
156 * boundaries.
157 */
158 drm_buddy_free_list(&mm, &tmp, 0);
159 } else {
160 list_splice_tail(&tmp, &allocated);
161 }
162 }
163
164 kfree(order);
165 drm_buddy_free_list(&mm, &allocated, 0);
166 drm_buddy_fini(&mm);
167
168 /*
169 * Something more free-form. Idea is to pick a random starting bias
170 * range within the address space and then start filling it up. Also
171 * randomly grow the bias range in both directions as we go along. This
172 * should give us bias start/end which is not always uniform like above,
173 * and in some cases will require the allocator to jump over already
174 * allocated nodes in the middle of the address space.
175 */
176
177 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
178 "buddy_init failed\n");
179
180 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
181 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
182 bias_end = max(bias_end, bias_start + ps);
183 bias_rem = bias_end - bias_start;
184
185 do {
186 u32 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
187
188 KUNIT_ASSERT_FALSE_MSG(test,
189 drm_buddy_alloc_blocks(&mm, bias_start,
190 bias_end, size, ps,
191 &allocated,
192 DRM_BUDDY_RANGE_ALLOCATION),
193 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
194 bias_start, bias_end, size, ps);
195 bias_rem -= size;
196
197 /*
198 * Try to randomly grow the bias range in both directions, or
199 * only one, or perhaps don't grow at all.
200 */
201 do {
202 u32 old_bias_start = bias_start;
203 u32 old_bias_end = bias_end;
204
205 if (bias_start)
206 bias_start -= round_up(prandom_u32_state(&prng) % bias_start, ps);
207 if (bias_end != mm_size)
208 bias_end += round_up(prandom_u32_state(&prng) % (mm_size - bias_end), ps);
209
210 bias_rem += old_bias_start - bias_start;
211 bias_rem += bias_end - old_bias_end;
212 } while (!bias_rem && (bias_start || bias_end != mm_size));
213 } while (bias_rem);
214
215 KUNIT_ASSERT_EQ(test, bias_start, 0);
216 KUNIT_ASSERT_EQ(test, bias_end, mm_size);
217 KUNIT_ASSERT_TRUE_MSG(test,
218 drm_buddy_alloc_blocks(&mm, bias_start, bias_end,
219 ps, ps,
220 &allocated,
221 DRM_BUDDY_RANGE_ALLOCATION),
222 "buddy_alloc passed with bias(%x-%x), size=%u\n",
223 bias_start, bias_end, ps);
224
225 drm_buddy_free_list(&mm, &allocated, 0);
226 drm_buddy_fini(&mm);
227
228 /*
229 * Allocate cleared blocks in the bias range when the DRM buddy's clear avail is
230 * zero. This will validate the bias range allocation in scenarios like system boot
231 * when no cleared blocks are available and exercise the fallback path too. The resulting
232 * blocks should always be dirty.
233 */
234
235 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps),
236 "buddy_init failed\n");
237
238 bias_start = round_up(prandom_u32_state(&prng) % (mm_size - ps), ps);
239 bias_end = round_up(bias_start + prandom_u32_state(&prng) % (mm_size - bias_start), ps);
240 bias_end = max(bias_end, bias_start + ps);
241 bias_rem = bias_end - bias_start;
242
243 flags = DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION;
244 size = max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps);
245
246 KUNIT_ASSERT_FALSE_MSG(test,
247 drm_buddy_alloc_blocks(&mm, bias_start,
248 bias_end, size, ps,
249 &allocated,
250 flags),
251 "buddy_alloc failed with bias(%x-%x), size=%u, ps=%u\n",
252 bias_start, bias_end, size, ps);
253
254 list_for_each_entry(block, &allocated, link)
255 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
256
257 drm_buddy_free_list(&mm, &allocated, 0);
258 drm_buddy_fini(&mm);
259 }
260
drm_test_buddy_alloc_clear(struct kunit * test)261 static void drm_test_buddy_alloc_clear(struct kunit *test)
262 {
263 unsigned long n_pages, total, i = 0;
264 const unsigned long ps = SZ_4K;
265 struct drm_buddy_block *block;
266 const int max_order = 12;
267 LIST_HEAD(allocated);
268 struct drm_buddy mm;
269 unsigned int order;
270 u32 mm_size, size;
271 LIST_HEAD(dirty);
272 LIST_HEAD(clean);
273
274 mm_size = SZ_4K << max_order;
275 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
276
277 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
278
279 /*
280 * Idea is to allocate and free some random portion of the address space,
281 * returning those pages as non-dirty and randomly alternate between
282 * requesting dirty and non-dirty pages (not going over the limit
283 * we freed as non-dirty), putting that into two separate lists.
284 * Loop over both lists at the end checking that the dirty list
285 * is indeed all dirty pages and vice versa. Free it all again,
286 * keeping the dirty/clear status.
287 */
288 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
289 5 * ps, ps, &allocated,
290 DRM_BUDDY_TOPDOWN_ALLOCATION),
291 "buddy_alloc hit an error size=%lu\n", 5 * ps);
292 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
293
294 n_pages = 10;
295 do {
296 unsigned long flags;
297 struct list_head *list;
298 int slot = i % 2;
299
300 if (slot == 0) {
301 list = &dirty;
302 flags = 0;
303 } else {
304 list = &clean;
305 flags = DRM_BUDDY_CLEAR_ALLOCATION;
306 }
307
308 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
309 ps, ps, list,
310 flags),
311 "buddy_alloc hit an error size=%lu\n", ps);
312 } while (++i < n_pages);
313
314 list_for_each_entry(block, &clean, link)
315 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true);
316
317 list_for_each_entry(block, &dirty, link)
318 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
319
320 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
321
322 /*
323 * Trying to go over the clear limit for some allocation.
324 * The allocation should never fail with reasonable page-size.
325 */
326 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
327 10 * ps, ps, &clean,
328 DRM_BUDDY_CLEAR_ALLOCATION),
329 "buddy_alloc hit an error size=%lu\n", 10 * ps);
330
331 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
332 drm_buddy_free_list(&mm, &dirty, 0);
333 drm_buddy_fini(&mm);
334
335 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
336
337 /*
338 * Create a new mm. Intentionally fragment the address space by creating
339 * two alternating lists. Free both lists, one as dirty the other as clean.
340 * Try to allocate double the previous size with matching min_page_size. The
341 * allocation should never fail as it calls the force_merge. Also check that
342 * the page is always dirty after force_merge. Free the page as dirty, then
343 * repeat the whole thing, increment the order until we hit the max_order.
344 */
345
346 i = 0;
347 n_pages = mm_size / ps;
348 do {
349 struct list_head *list;
350 int slot = i % 2;
351
352 if (slot == 0)
353 list = &dirty;
354 else
355 list = &clean;
356
357 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
358 ps, ps, list, 0),
359 "buddy_alloc hit an error size=%lu\n", ps);
360 } while (++i < n_pages);
361
362 drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED);
363 drm_buddy_free_list(&mm, &dirty, 0);
364
365 order = 1;
366 do {
367 size = SZ_4K << order;
368
369 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
370 size, size, &allocated,
371 DRM_BUDDY_CLEAR_ALLOCATION),
372 "buddy_alloc hit an error size=%u\n", size);
373 total = 0;
374 list_for_each_entry(block, &allocated, link) {
375 if (size != mm_size)
376 KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false);
377 total += drm_buddy_block_size(&mm, block);
378 }
379 KUNIT_EXPECT_EQ(test, total, size);
380
381 drm_buddy_free_list(&mm, &allocated, 0);
382 } while (++order <= max_order);
383
384 drm_buddy_fini(&mm);
385
386 /*
387 * Create a new mm with a non power-of-two size. Allocate a random size from each
388 * root, free as cleared and then call fini. This will ensure the multi-root
389 * force merge during fini.
390 */
391 mm_size = (SZ_4K << max_order) + (SZ_4K << (max_order - 2));
392
393 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
394 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
395 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
396 4 * ps, ps, &allocated,
397 DRM_BUDDY_RANGE_ALLOCATION),
398 "buddy_alloc hit an error size=%lu\n", 4 * ps);
399 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
400 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_order,
401 2 * ps, ps, &allocated,
402 DRM_BUDDY_CLEAR_ALLOCATION),
403 "buddy_alloc hit an error size=%lu\n", 2 * ps);
404 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
405 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, SZ_4K << max_order, mm_size,
406 ps, ps, &allocated,
407 DRM_BUDDY_RANGE_ALLOCATION),
408 "buddy_alloc hit an error size=%lu\n", ps);
409 drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED);
410 drm_buddy_fini(&mm);
411 }
412
drm_test_buddy_alloc_contiguous(struct kunit * test)413 static void drm_test_buddy_alloc_contiguous(struct kunit *test)
414 {
415 const unsigned long ps = SZ_4K, mm_size = 16 * 3 * SZ_4K;
416 unsigned long i, n_pages, total;
417 struct drm_buddy_block *block;
418 struct drm_buddy mm;
419 LIST_HEAD(left);
420 LIST_HEAD(middle);
421 LIST_HEAD(right);
422 LIST_HEAD(allocated);
423
424 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps));
425
426 /*
427 * Idea is to fragment the address space by alternating block
428 * allocations between three different lists; one for left, middle and
429 * right. We can then free a list to simulate fragmentation. In
430 * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION,
431 * including the try_harder path.
432 */
433
434 i = 0;
435 n_pages = mm_size / ps;
436 do {
437 struct list_head *list;
438 int slot = i % 3;
439
440 if (slot == 0)
441 list = &left;
442 else if (slot == 1)
443 list = &middle;
444 else
445 list = &right;
446 KUNIT_ASSERT_FALSE_MSG(test,
447 drm_buddy_alloc_blocks(&mm, 0, mm_size,
448 ps, ps, list, 0),
449 "buddy_alloc hit an error size=%lu\n",
450 ps);
451 } while (++i < n_pages);
452
453 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
454 3 * ps, ps, &allocated,
455 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
456 "buddy_alloc didn't error size=%lu\n", 3 * ps);
457
458 drm_buddy_free_list(&mm, &middle, 0);
459 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
460 3 * ps, ps, &allocated,
461 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
462 "buddy_alloc didn't error size=%lu\n", 3 * ps);
463 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
464 2 * ps, ps, &allocated,
465 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
466 "buddy_alloc didn't error size=%lu\n", 2 * ps);
467
468 drm_buddy_free_list(&mm, &right, 0);
469 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
470 3 * ps, ps, &allocated,
471 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
472 "buddy_alloc didn't error size=%lu\n", 3 * ps);
473 /*
474 * At this point we should have enough contiguous space for 2 blocks,
475 * however they are never buddies (since we freed middle and right) so
476 * will require the try_harder logic to find them.
477 */
478 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
479 2 * ps, ps, &allocated,
480 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
481 "buddy_alloc hit an error size=%lu\n", 2 * ps);
482
483 drm_buddy_free_list(&mm, &left, 0);
484 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size,
485 3 * ps, ps, &allocated,
486 DRM_BUDDY_CONTIGUOUS_ALLOCATION),
487 "buddy_alloc hit an error size=%lu\n", 3 * ps);
488
489 total = 0;
490 list_for_each_entry(block, &allocated, link)
491 total += drm_buddy_block_size(&mm, block);
492
493 KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3);
494
495 drm_buddy_free_list(&mm, &allocated, 0);
496 drm_buddy_fini(&mm);
497 }
498
drm_test_buddy_alloc_pathological(struct kunit * test)499 static void drm_test_buddy_alloc_pathological(struct kunit *test)
500 {
501 u64 mm_size, size, start = 0;
502 struct drm_buddy_block *block;
503 const int max_order = 3;
504 unsigned long flags = 0;
505 int order, top;
506 struct drm_buddy mm;
507 LIST_HEAD(blocks);
508 LIST_HEAD(holes);
509 LIST_HEAD(tmp);
510
511 /*
512 * Create a pot-sized mm, then allocate one of each possible
513 * order within. This should leave the mm with exactly one
514 * page left. Free the largest block, then whittle down again.
515 * Eventually we will have a fully 50% fragmented mm.
516 */
517
518 mm_size = SZ_4K << max_order;
519 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
520 "buddy_init failed\n");
521
522 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
523
524 for (top = max_order; top; top--) {
525 /* Make room by freeing the largest allocated block */
526 block = list_first_entry_or_null(&blocks, typeof(*block), link);
527 if (block) {
528 list_del(&block->link);
529 drm_buddy_free_block(&mm, block);
530 }
531
532 for (order = top; order--;) {
533 size = get_size(order, mm.chunk_size);
534 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
535 mm_size, size, size,
536 &tmp, flags),
537 "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
538 order, top);
539
540 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
541 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
542
543 list_move_tail(&block->link, &blocks);
544 }
545
546 /* There should be one final page for this sub-allocation */
547 size = get_size(0, mm.chunk_size);
548 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
549 size, size, &tmp, flags),
550 "buddy_alloc hit -ENOMEM for hole\n");
551
552 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
553 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
554
555 list_move_tail(&block->link, &holes);
556
557 size = get_size(top, mm.chunk_size);
558 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
559 size, size, &tmp, flags),
560 "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
561 top, max_order);
562 }
563
564 drm_buddy_free_list(&mm, &holes, 0);
565
566 /* Nothing larger than blocks of chunk_size now available */
567 for (order = 1; order <= max_order; order++) {
568 size = get_size(order, mm.chunk_size);
569 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
570 size, size, &tmp, flags),
571 "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
572 order);
573 }
574
575 list_splice_tail(&holes, &blocks);
576 drm_buddy_free_list(&mm, &blocks, 0);
577 drm_buddy_fini(&mm);
578 }
579
drm_test_buddy_alloc_pessimistic(struct kunit * test)580 static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
581 {
582 u64 mm_size, size, start = 0;
583 struct drm_buddy_block *block, *bn;
584 const unsigned int max_order = 16;
585 unsigned long flags = 0;
586 struct drm_buddy mm;
587 unsigned int order;
588 LIST_HEAD(blocks);
589 LIST_HEAD(tmp);
590
591 /*
592 * Create a pot-sized mm, then allocate one of each possible
593 * order within. This should leave the mm with exactly one
594 * page left.
595 */
596
597 mm_size = SZ_4K << max_order;
598 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
599 "buddy_init failed\n");
600
601 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
602
603 for (order = 0; order < max_order; order++) {
604 size = get_size(order, mm.chunk_size);
605 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
606 size, size, &tmp, flags),
607 "buddy_alloc hit -ENOMEM with order=%d\n",
608 order);
609
610 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
611 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
612
613 list_move_tail(&block->link, &blocks);
614 }
615
616 /* And now the last remaining block available */
617 size = get_size(0, mm.chunk_size);
618 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
619 size, size, &tmp, flags),
620 "buddy_alloc hit -ENOMEM on final alloc\n");
621
622 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
623 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
624
625 list_move_tail(&block->link, &blocks);
626
627 /* Should be completely full! */
628 for (order = max_order; order--;) {
629 size = get_size(order, mm.chunk_size);
630 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
631 size, size, &tmp, flags),
632 "buddy_alloc unexpectedly succeeded, it should be full!");
633 }
634
635 block = list_last_entry(&blocks, typeof(*block), link);
636 list_del(&block->link);
637 drm_buddy_free_block(&mm, block);
638
639 /* As we free in increasing size, we make available larger blocks */
640 order = 1;
641 list_for_each_entry_safe(block, bn, &blocks, link) {
642 list_del(&block->link);
643 drm_buddy_free_block(&mm, block);
644
645 size = get_size(order, mm.chunk_size);
646 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
647 size, size, &tmp, flags),
648 "buddy_alloc hit -ENOMEM with order=%d\n",
649 order);
650
651 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
652 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
653
654 list_del(&block->link);
655 drm_buddy_free_block(&mm, block);
656 order++;
657 }
658
659 /* To confirm, now the whole mm should be available */
660 size = get_size(max_order, mm.chunk_size);
661 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
662 size, size, &tmp, flags),
663 "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
664 max_order);
665
666 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
667 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
668
669 list_del(&block->link);
670 drm_buddy_free_block(&mm, block);
671 drm_buddy_free_list(&mm, &blocks, 0);
672 drm_buddy_fini(&mm);
673 }
674
drm_test_buddy_alloc_optimistic(struct kunit * test)675 static void drm_test_buddy_alloc_optimistic(struct kunit *test)
676 {
677 u64 mm_size, size, start = 0;
678 struct drm_buddy_block *block;
679 unsigned long flags = 0;
680 const int max_order = 16;
681 struct drm_buddy mm;
682 LIST_HEAD(blocks);
683 LIST_HEAD(tmp);
684 int order;
685
686 /*
687 * Create a mm with one block of each order available, and
688 * try to allocate them all.
689 */
690
691 mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
692
693 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
694 "buddy_init failed\n");
695
696 KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
697
698 for (order = 0; order <= max_order; order++) {
699 size = get_size(order, mm.chunk_size);
700 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
701 size, size, &tmp, flags),
702 "buddy_alloc hit -ENOMEM with order=%d\n",
703 order);
704
705 block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
706 KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
707
708 list_move_tail(&block->link, &blocks);
709 }
710
711 /* Should be completely full! */
712 size = get_size(0, mm.chunk_size);
713 KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
714 size, size, &tmp, flags),
715 "buddy_alloc unexpectedly succeeded, it should be full!");
716
717 drm_buddy_free_list(&mm, &blocks, 0);
718 drm_buddy_fini(&mm);
719 }
720
drm_test_buddy_alloc_limit(struct kunit * test)721 static void drm_test_buddy_alloc_limit(struct kunit *test)
722 {
723 u64 size = U64_MAX, start = 0;
724 struct drm_buddy_block *block;
725 unsigned long flags = 0;
726 LIST_HEAD(allocated);
727 struct drm_buddy mm;
728
729 KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
730
731 KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
732 "mm.max_order(%d) != %d\n", mm.max_order,
733 DRM_BUDDY_MAX_ORDER);
734
735 size = mm.chunk_size << mm.max_order;
736 KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
737 mm.chunk_size, &allocated, flags));
738
739 block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
740 KUNIT_EXPECT_TRUE(test, block);
741
742 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
743 "block order(%d) != %d\n",
744 drm_buddy_block_order(block), mm.max_order);
745
746 KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
747 BIT_ULL(mm.max_order) * mm.chunk_size,
748 "block size(%llu) != %llu\n",
749 drm_buddy_block_size(&mm, block),
750 BIT_ULL(mm.max_order) * mm.chunk_size);
751
752 drm_buddy_free_list(&mm, &allocated, 0);
753 drm_buddy_fini(&mm);
754 }
755
drm_buddy_suite_init(struct kunit_suite * suite)756 static int drm_buddy_suite_init(struct kunit_suite *suite)
757 {
758 while (!random_seed)
759 random_seed = get_random_u32();
760
761 kunit_info(suite, "Testing DRM buddy manager, with random_seed=0x%x\n",
762 random_seed);
763
764 return 0;
765 }
766
767 static struct kunit_case drm_buddy_tests[] = {
768 KUNIT_CASE(drm_test_buddy_alloc_limit),
769 KUNIT_CASE(drm_test_buddy_alloc_optimistic),
770 KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
771 KUNIT_CASE(drm_test_buddy_alloc_pathological),
772 KUNIT_CASE(drm_test_buddy_alloc_contiguous),
773 KUNIT_CASE(drm_test_buddy_alloc_clear),
774 KUNIT_CASE(drm_test_buddy_alloc_range_bias),
775 {}
776 };
777
778 static struct kunit_suite drm_buddy_test_suite = {
779 .name = "drm_buddy",
780 .suite_init = drm_buddy_suite_init,
781 .test_cases = drm_buddy_tests,
782 };
783
784 kunit_test_suite(drm_buddy_test_suite);
785
786 MODULE_AUTHOR("Intel Corporation");
787 MODULE_DESCRIPTION("Kunit test for drm_buddy functions");
788 MODULE_LICENSE("GPL");
789