1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/prime_numbers.h>
7 #include <linux/sort.h>
8
9 #include <drm/drm_buddy.h>
10
11 #include "../i915_selftest.h"
12
13 #include "mock_drm.h"
14 #include "mock_gem_device.h"
15 #include "mock_region.h"
16
17 #include "gem/i915_gem_context.h"
18 #include "gem/i915_gem_lmem.h"
19 #include "gem/i915_gem_region.h"
20 #include "gem/i915_gem_ttm.h"
21 #include "gem/selftests/igt_gem_utils.h"
22 #include "gem/selftests/mock_context.h"
23 #include "gt/intel_engine_pm.h"
24 #include "gt/intel_engine_user.h"
25 #include "gt/intel_gt.h"
26 #include "gt/intel_migrate.h"
27 #include "i915_memcpy.h"
28 #include "i915_ttm_buddy_manager.h"
29 #include "selftests/igt_flush_test.h"
30 #include "selftests/i915_random.h"
31
close_objects(struct intel_memory_region * mem,struct list_head * objects)32 static void close_objects(struct intel_memory_region *mem,
33 struct list_head *objects)
34 {
35 struct drm_i915_private *i915 = mem->i915;
36 struct drm_i915_gem_object *obj, *on;
37
38 list_for_each_entry_safe(obj, on, objects, st_link) {
39 i915_gem_object_lock(obj, NULL);
40 if (i915_gem_object_has_pinned_pages(obj))
41 i915_gem_object_unpin_pages(obj);
42 /* No polluting the memory region between tests */
43 __i915_gem_object_put_pages(obj);
44 i915_gem_object_unlock(obj);
45 list_del(&obj->st_link);
46 i915_gem_object_put(obj);
47 }
48
49 cond_resched();
50
51 i915_gem_drain_freed_objects(i915);
52 }
53
igt_mock_fill(void * arg)54 static int igt_mock_fill(void *arg)
55 {
56 struct intel_memory_region *mem = arg;
57 resource_size_t total = resource_size(&mem->region);
58 resource_size_t page_size;
59 resource_size_t rem;
60 unsigned long max_pages;
61 unsigned long page_num;
62 LIST_HEAD(objects);
63 int err = 0;
64
65 page_size = PAGE_SIZE;
66 max_pages = div64_u64(total, page_size);
67 rem = total;
68
69 for_each_prime_number_from(page_num, 1, max_pages) {
70 resource_size_t size = page_num * page_size;
71 struct drm_i915_gem_object *obj;
72
73 obj = i915_gem_object_create_region(mem, size, 0, 0);
74 if (IS_ERR(obj)) {
75 err = PTR_ERR(obj);
76 break;
77 }
78
79 err = i915_gem_object_pin_pages_unlocked(obj);
80 if (err) {
81 i915_gem_object_put(obj);
82 break;
83 }
84
85 list_add(&obj->st_link, &objects);
86 rem -= size;
87 }
88
89 if (err == -ENOMEM)
90 err = 0;
91 if (err == -ENXIO) {
92 if (page_num * page_size <= rem) {
93 pr_err("%s failed, space still left in region\n",
94 __func__);
95 err = -EINVAL;
96 } else {
97 err = 0;
98 }
99 }
100
101 close_objects(mem, &objects);
102
103 return err;
104 }
105
106 static struct drm_i915_gem_object *
igt_object_create(struct intel_memory_region * mem,struct list_head * objects,u64 size,unsigned int flags)107 igt_object_create(struct intel_memory_region *mem,
108 struct list_head *objects,
109 u64 size,
110 unsigned int flags)
111 {
112 struct drm_i915_gem_object *obj;
113 int err;
114
115 obj = i915_gem_object_create_region(mem, size, 0, flags);
116 if (IS_ERR(obj))
117 return obj;
118
119 err = i915_gem_object_pin_pages_unlocked(obj);
120 if (err)
121 goto put;
122
123 list_add(&obj->st_link, objects);
124 return obj;
125
126 put:
127 i915_gem_object_put(obj);
128 return ERR_PTR(err);
129 }
130
igt_object_release(struct drm_i915_gem_object * obj)131 static void igt_object_release(struct drm_i915_gem_object *obj)
132 {
133 i915_gem_object_lock(obj, NULL);
134 i915_gem_object_unpin_pages(obj);
135 __i915_gem_object_put_pages(obj);
136 i915_gem_object_unlock(obj);
137 list_del(&obj->st_link);
138 i915_gem_object_put(obj);
139 }
140
is_contiguous(struct drm_i915_gem_object * obj)141 static bool is_contiguous(struct drm_i915_gem_object *obj)
142 {
143 struct scatterlist *sg;
144 dma_addr_t addr = -1;
145
146 for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
147 if (addr != -1 && sg_dma_address(sg) != addr)
148 return false;
149
150 addr = sg_dma_address(sg) + sg_dma_len(sg);
151 }
152
153 return true;
154 }
155
igt_mock_reserve(void * arg)156 static int igt_mock_reserve(void *arg)
157 {
158 struct intel_memory_region *mem = arg;
159 struct drm_i915_private *i915 = mem->i915;
160 resource_size_t avail = resource_size(&mem->region);
161 struct drm_i915_gem_object *obj;
162 const u32 chunk_size = SZ_32M;
163 u32 i, offset, count, *order;
164 u64 allocated, cur_avail;
165 I915_RND_STATE(prng);
166 LIST_HEAD(objects);
167 int err = 0;
168
169 count = avail / chunk_size;
170 order = i915_random_order(count, &prng);
171 if (!order)
172 return 0;
173
174 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
175 if (IS_ERR(mem)) {
176 pr_err("failed to create memory region\n");
177 err = PTR_ERR(mem);
178 goto out_free_order;
179 }
180
181 /* Reserve a bunch of ranges within the region */
182 for (i = 0; i < count; ++i) {
183 u64 start = order[i] * chunk_size;
184 u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
185
186 /* Allow for some really big holes */
187 if (!size)
188 continue;
189
190 size = round_up(size, PAGE_SIZE);
191 offset = igt_random_offset(&prng, 0, chunk_size, size,
192 PAGE_SIZE);
193
194 err = intel_memory_region_reserve(mem, start + offset, size);
195 if (err) {
196 pr_err("%s failed to reserve range", __func__);
197 goto out_close;
198 }
199
200 /* XXX: maybe sanity check the block range here? */
201 avail -= size;
202 }
203
204 /* Try to see if we can allocate from the remaining space */
205 allocated = 0;
206 cur_avail = avail;
207 do {
208 u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
209
210 size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
211 obj = igt_object_create(mem, &objects, size, 0);
212 if (IS_ERR(obj)) {
213 if (PTR_ERR(obj) == -ENXIO)
214 break;
215
216 err = PTR_ERR(obj);
217 goto out_close;
218 }
219 cur_avail -= size;
220 allocated += size;
221 } while (1);
222
223 if (allocated != avail) {
224 pr_err("%s mismatch between allocation and free space", __func__);
225 err = -EINVAL;
226 }
227
228 out_close:
229 close_objects(mem, &objects);
230 intel_memory_region_destroy(mem);
231 out_free_order:
232 kfree(order);
233 return err;
234 }
235
igt_mock_contiguous(void * arg)236 static int igt_mock_contiguous(void *arg)
237 {
238 struct intel_memory_region *mem = arg;
239 struct drm_i915_gem_object *obj;
240 unsigned long n_objects;
241 LIST_HEAD(objects);
242 LIST_HEAD(holes);
243 I915_RND_STATE(prng);
244 resource_size_t total;
245 resource_size_t min;
246 u64 target;
247 int err = 0;
248
249 total = resource_size(&mem->region);
250
251 /* Min size */
252 obj = igt_object_create(mem, &objects, PAGE_SIZE,
253 I915_BO_ALLOC_CONTIGUOUS);
254 if (IS_ERR(obj))
255 return PTR_ERR(obj);
256
257 if (!is_contiguous(obj)) {
258 pr_err("%s min object spans disjoint sg entries\n", __func__);
259 err = -EINVAL;
260 goto err_close_objects;
261 }
262
263 igt_object_release(obj);
264
265 /* Max size */
266 obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
267 if (IS_ERR(obj))
268 return PTR_ERR(obj);
269
270 if (!is_contiguous(obj)) {
271 pr_err("%s max object spans disjoint sg entries\n", __func__);
272 err = -EINVAL;
273 goto err_close_objects;
274 }
275
276 igt_object_release(obj);
277
278 /* Internal fragmentation should not bleed into the object size */
279 target = i915_prandom_u64_state(&prng);
280 div64_u64_rem(target, total, &target);
281 target = round_up(target, PAGE_SIZE);
282 target = max_t(u64, PAGE_SIZE, target);
283
284 obj = igt_object_create(mem, &objects, target,
285 I915_BO_ALLOC_CONTIGUOUS);
286 if (IS_ERR(obj))
287 return PTR_ERR(obj);
288
289 if (obj->base.size != target) {
290 pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__,
291 obj->base.size, target);
292 err = -EINVAL;
293 goto err_close_objects;
294 }
295
296 if (!is_contiguous(obj)) {
297 pr_err("%s object spans disjoint sg entries\n", __func__);
298 err = -EINVAL;
299 goto err_close_objects;
300 }
301
302 igt_object_release(obj);
303
304 /*
305 * Try to fragment the address space, such that half of it is free, but
306 * the max contiguous block size is SZ_64K.
307 */
308
309 target = SZ_64K;
310 n_objects = div64_u64(total, target);
311
312 while (n_objects--) {
313 struct list_head *list;
314
315 if (n_objects % 2)
316 list = &holes;
317 else
318 list = &objects;
319
320 obj = igt_object_create(mem, list, target,
321 I915_BO_ALLOC_CONTIGUOUS);
322 if (IS_ERR(obj)) {
323 err = PTR_ERR(obj);
324 goto err_close_objects;
325 }
326 }
327
328 close_objects(mem, &holes);
329
330 min = target;
331 target = total >> 1;
332
333 /* Make sure we can still allocate all the fragmented space */
334 obj = igt_object_create(mem, &objects, target, 0);
335 if (IS_ERR(obj)) {
336 err = PTR_ERR(obj);
337 goto err_close_objects;
338 }
339
340 igt_object_release(obj);
341
342 /*
343 * Even though we have enough free space, we don't have a big enough
344 * contiguous block. Make sure that holds true.
345 */
346
347 do {
348 bool should_fail = target > min;
349
350 obj = igt_object_create(mem, &objects, target,
351 I915_BO_ALLOC_CONTIGUOUS);
352 if (should_fail != IS_ERR(obj)) {
353 pr_err("%s target allocation(%llx) mismatch\n",
354 __func__, target);
355 err = -EINVAL;
356 goto err_close_objects;
357 }
358
359 target >>= 1;
360 } while (target >= PAGE_SIZE);
361
362 err_close_objects:
363 list_splice_tail(&holes, &objects);
364 close_objects(mem, &objects);
365 return err;
366 }
367
igt_mock_splintered_region(void * arg)368 static int igt_mock_splintered_region(void *arg)
369 {
370 struct intel_memory_region *mem = arg;
371 struct drm_i915_private *i915 = mem->i915;
372 struct i915_ttm_buddy_resource *res;
373 struct drm_i915_gem_object *obj;
374 struct drm_buddy *mm;
375 unsigned int expected_order;
376 LIST_HEAD(objects);
377 u64 size;
378 int err = 0;
379
380 /*
381 * Sanity check we can still allocate everything even if the
382 * mm.max_order != mm.size. i.e our starting address space size is not a
383 * power-of-two.
384 */
385
386 size = (SZ_4G - 1) & PAGE_MASK;
387 mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0);
388 if (IS_ERR(mem))
389 return PTR_ERR(mem);
390
391 obj = igt_object_create(mem, &objects, size, 0);
392 if (IS_ERR(obj)) {
393 err = PTR_ERR(obj);
394 goto out_close;
395 }
396
397 res = to_ttm_buddy_resource(obj->mm.res);
398 mm = res->mm;
399 if (mm->size != size) {
400 pr_err("%s size mismatch(%llu != %llu)\n",
401 __func__, mm->size, size);
402 err = -EINVAL;
403 goto out_put;
404 }
405
406 expected_order = get_order(rounddown_pow_of_two(size));
407 if (mm->max_order != expected_order) {
408 pr_err("%s order mismatch(%u != %u)\n",
409 __func__, mm->max_order, expected_order);
410 err = -EINVAL;
411 goto out_put;
412 }
413
414 close_objects(mem, &objects);
415
416 obj = igt_object_create(mem, &objects, roundup_pow_of_two(size),
417 I915_BO_ALLOC_CONTIGUOUS);
418 if (!IS_ERR(obj)) {
419 pr_err("%s too large contiguous allocation was not rejected\n",
420 __func__);
421 err = -EINVAL;
422 goto out_close;
423 }
424
425 obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
426 if (IS_ERR(obj)) {
427 pr_err("%s largest possible contiguous allocation failed\n",
428 __func__);
429 err = PTR_ERR(obj);
430 goto out_close;
431 }
432
433 out_close:
434 close_objects(mem, &objects);
435 out_put:
436 intel_memory_region_destroy(mem);
437 return err;
438 }
439
440 #ifndef SZ_8G
441 #define SZ_8G BIT_ULL(33)
442 #endif
443
igt_mock_max_segment(void * arg)444 static int igt_mock_max_segment(void *arg)
445 {
446 struct intel_memory_region *mem = arg;
447 struct drm_i915_private *i915 = mem->i915;
448 struct i915_ttm_buddy_resource *res;
449 struct drm_i915_gem_object *obj;
450 struct drm_buddy_block *block;
451 struct drm_buddy *mm;
452 struct list_head *blocks;
453 struct scatterlist *sg;
454 I915_RND_STATE(prng);
455 LIST_HEAD(objects);
456 unsigned int max_segment;
457 unsigned int ps;
458 u64 size;
459 int err = 0;
460
461 /*
462 * While we may create very large contiguous blocks, we may need
463 * to break those down for consumption elsewhere. In particular,
464 * dma-mapping with scatterlist elements have an implicit limit of
465 * UINT_MAX on each element.
466 */
467
468 size = SZ_8G;
469 ps = PAGE_SIZE;
470 if (i915_prandom_u64_state(&prng) & 1)
471 ps = SZ_64K; /* For something like DG2 */
472
473 max_segment = round_down(UINT_MAX, ps);
474
475 mem = mock_region_create(i915, 0, size, ps, 0, 0);
476 if (IS_ERR(mem))
477 return PTR_ERR(mem);
478
479 obj = igt_object_create(mem, &objects, size, 0);
480 if (IS_ERR(obj)) {
481 err = PTR_ERR(obj);
482 goto out_put;
483 }
484
485 res = to_ttm_buddy_resource(obj->mm.res);
486 blocks = &res->blocks;
487 mm = res->mm;
488 size = 0;
489 list_for_each_entry(block, blocks, link) {
490 if (drm_buddy_block_size(mm, block) > size)
491 size = drm_buddy_block_size(mm, block);
492 }
493 if (size < max_segment) {
494 pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
495 __func__, max_segment, size);
496 err = -EINVAL;
497 goto out_close;
498 }
499
500 for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
501 dma_addr_t daddr = sg_dma_address(sg);
502
503 if (sg->length > max_segment) {
504 pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
505 __func__, sg->length, max_segment);
506 err = -EINVAL;
507 goto out_close;
508 }
509
510 if (!IS_ALIGNED(daddr, ps)) {
511 pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n",
512 __func__, &daddr, ps);
513 err = -EINVAL;
514 goto out_close;
515 }
516 }
517
518 out_close:
519 close_objects(mem, &objects);
520 out_put:
521 intel_memory_region_destroy(mem);
522 return err;
523 }
524
igt_object_mappable_total(struct drm_i915_gem_object * obj)525 static u64 igt_object_mappable_total(struct drm_i915_gem_object *obj)
526 {
527 struct intel_memory_region *mr = obj->mm.region;
528 struct i915_ttm_buddy_resource *bman_res =
529 to_ttm_buddy_resource(obj->mm.res);
530 struct drm_buddy *mm = bman_res->mm;
531 struct drm_buddy_block *block;
532 u64 total;
533
534 total = 0;
535 list_for_each_entry(block, &bman_res->blocks, link) {
536 u64 start = drm_buddy_block_offset(block);
537 u64 end = start + drm_buddy_block_size(mm, block);
538
539 if (start < resource_size(&mr->io))
540 total += min_t(u64, end, resource_size(&mr->io)) - start;
541 }
542
543 return total;
544 }
545
igt_mock_io_size(void * arg)546 static int igt_mock_io_size(void *arg)
547 {
548 struct intel_memory_region *mr = arg;
549 struct drm_i915_private *i915 = mr->i915;
550 struct drm_i915_gem_object *obj;
551 u64 mappable_theft_total;
552 u64 io_size;
553 u64 total;
554 u64 ps;
555 u64 rem;
556 u64 size;
557 I915_RND_STATE(prng);
558 LIST_HEAD(objects);
559 int err = 0;
560
561 ps = SZ_4K;
562 if (i915_prandom_u64_state(&prng) & 1)
563 ps = SZ_64K; /* For something like DG2 */
564
565 div64_u64_rem(i915_prandom_u64_state(&prng), SZ_8G, &total);
566 total = round_down(total, ps);
567 total = max_t(u64, total, SZ_1G);
568
569 div64_u64_rem(i915_prandom_u64_state(&prng), total - ps, &io_size);
570 io_size = round_down(io_size, ps);
571 io_size = max_t(u64, io_size, SZ_256M); /* 256M seems to be the common lower limit */
572
573 pr_info("%s with ps=%llx, io_size=%llx, total=%llx\n",
574 __func__, ps, io_size, total);
575
576 mr = mock_region_create(i915, 0, total, ps, 0, io_size);
577 if (IS_ERR(mr)) {
578 err = PTR_ERR(mr);
579 goto out_err;
580 }
581
582 mappable_theft_total = 0;
583 rem = total - io_size;
584 do {
585 div64_u64_rem(i915_prandom_u64_state(&prng), rem, &size);
586 size = round_down(size, ps);
587 size = max(size, ps);
588
589 obj = igt_object_create(mr, &objects, size,
590 I915_BO_ALLOC_GPU_ONLY);
591 if (IS_ERR(obj)) {
592 pr_err("%s TOPDOWN failed with rem=%llx, size=%llx\n",
593 __func__, rem, size);
594 err = PTR_ERR(obj);
595 goto out_close;
596 }
597
598 mappable_theft_total += igt_object_mappable_total(obj);
599 rem -= size;
600 } while (rem);
601
602 pr_info("%s mappable theft=(%lluMiB/%lluMiB), total=%lluMiB\n",
603 __func__,
604 (u64)mappable_theft_total >> 20,
605 (u64)io_size >> 20,
606 (u64)total >> 20);
607
608 /*
609 * Even if we allocate all of the non-mappable portion, we should still
610 * be able to dip into the mappable portion.
611 */
612 obj = igt_object_create(mr, &objects, io_size,
613 I915_BO_ALLOC_GPU_ONLY);
614 if (IS_ERR(obj)) {
615 pr_err("%s allocation unexpectedly failed\n", __func__);
616 err = PTR_ERR(obj);
617 goto out_close;
618 }
619
620 close_objects(mr, &objects);
621
622 rem = io_size;
623 do {
624 div64_u64_rem(i915_prandom_u64_state(&prng), rem, &size);
625 size = round_down(size, ps);
626 size = max(size, ps);
627
628 obj = igt_object_create(mr, &objects, size, 0);
629 if (IS_ERR(obj)) {
630 pr_err("%s MAPPABLE failed with rem=%llx, size=%llx\n",
631 __func__, rem, size);
632 err = PTR_ERR(obj);
633 goto out_close;
634 }
635
636 if (igt_object_mappable_total(obj) != size) {
637 pr_err("%s allocation is not mappable(size=%llx)\n",
638 __func__, size);
639 err = -EINVAL;
640 goto out_close;
641 }
642 rem -= size;
643 } while (rem);
644
645 /*
646 * We assume CPU access is required by default, which should result in a
647 * failure here, even though the non-mappable portion is free.
648 */
649 obj = igt_object_create(mr, &objects, ps, 0);
650 if (!IS_ERR(obj)) {
651 pr_err("%s allocation unexpectedly succeeded\n", __func__);
652 err = -EINVAL;
653 goto out_close;
654 }
655
656 out_close:
657 close_objects(mr, &objects);
658 intel_memory_region_destroy(mr);
659 out_err:
660 if (err == -ENOMEM)
661 err = 0;
662
663 return err;
664 }
665
igt_gpu_write_dw(struct intel_context * ce,struct i915_vma * vma,u32 dword,u32 value)666 static int igt_gpu_write_dw(struct intel_context *ce,
667 struct i915_vma *vma,
668 u32 dword,
669 u32 value)
670 {
671 return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
672 vma->size >> PAGE_SHIFT, value);
673 }
674
igt_cpu_check(struct drm_i915_gem_object * obj,u32 dword,u32 val)675 static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
676 {
677 unsigned long n = obj->base.size >> PAGE_SHIFT;
678 u32 *ptr;
679 int err;
680
681 err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
682 if (err)
683 return err;
684
685 ptr = i915_gem_object_pin_map(obj, I915_MAP_WC);
686 if (IS_ERR(ptr))
687 return PTR_ERR(ptr);
688
689 ptr += dword;
690 while (n--) {
691 if (*ptr != val) {
692 pr_err("base[%u]=%08x, val=%08x\n",
693 dword, *ptr, val);
694 err = -EINVAL;
695 break;
696 }
697
698 ptr += PAGE_SIZE / sizeof(*ptr);
699 }
700
701 i915_gem_object_unpin_map(obj);
702 return err;
703 }
704
igt_gpu_write(struct i915_gem_context * ctx,struct drm_i915_gem_object * obj)705 static int igt_gpu_write(struct i915_gem_context *ctx,
706 struct drm_i915_gem_object *obj)
707 {
708 struct i915_gem_engines *engines;
709 struct i915_gem_engines_iter it;
710 struct i915_address_space *vm;
711 struct intel_context *ce;
712 I915_RND_STATE(prng);
713 IGT_TIMEOUT(end_time);
714 unsigned int count;
715 struct i915_vma *vma;
716 int *order;
717 int i, n;
718 int err = 0;
719
720 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
721
722 n = 0;
723 count = 0;
724 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
725 count++;
726 if (!intel_engine_can_store_dword(ce->engine))
727 continue;
728
729 vm = ce->vm;
730 n++;
731 }
732 i915_gem_context_unlock_engines(ctx);
733 if (!n)
734 return 0;
735
736 order = i915_random_order(count * count, &prng);
737 if (!order)
738 return -ENOMEM;
739
740 vma = i915_vma_instance(obj, vm, NULL);
741 if (IS_ERR(vma)) {
742 err = PTR_ERR(vma);
743 goto out_free;
744 }
745
746 err = i915_vma_pin(vma, 0, 0, PIN_USER);
747 if (err)
748 goto out_free;
749
750 i = 0;
751 engines = i915_gem_context_lock_engines(ctx);
752 do {
753 u32 rng = prandom_u32_state(&prng);
754 u32 dword = offset_in_page(rng) / 4;
755
756 ce = engines->engines[order[i] % engines->num_engines];
757 i = (i + 1) % (count * count);
758 if (!ce || !intel_engine_can_store_dword(ce->engine))
759 continue;
760
761 err = igt_gpu_write_dw(ce, vma, dword, rng);
762 if (err)
763 break;
764
765 i915_gem_object_lock(obj, NULL);
766 err = igt_cpu_check(obj, dword, rng);
767 i915_gem_object_unlock(obj);
768 if (err)
769 break;
770 } while (!__igt_timeout(end_time, NULL));
771 i915_gem_context_unlock_engines(ctx);
772
773 out_free:
774 kfree(order);
775
776 if (err == -ENOMEM)
777 err = 0;
778
779 return err;
780 }
781
igt_lmem_create(void * arg)782 static int igt_lmem_create(void *arg)
783 {
784 struct drm_i915_private *i915 = arg;
785 struct drm_i915_gem_object *obj;
786 int err = 0;
787
788 obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0);
789 if (IS_ERR(obj))
790 return PTR_ERR(obj);
791
792 err = i915_gem_object_pin_pages_unlocked(obj);
793 if (err)
794 goto out_put;
795
796 i915_gem_object_unpin_pages(obj);
797 out_put:
798 i915_gem_object_put(obj);
799
800 return err;
801 }
802
igt_lmem_create_with_ps(void * arg)803 static int igt_lmem_create_with_ps(void *arg)
804 {
805 struct drm_i915_private *i915 = arg;
806 int err = 0;
807 u32 ps;
808
809 for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
810 struct drm_i915_gem_object *obj;
811 dma_addr_t daddr;
812
813 obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
814 if (IS_ERR(obj)) {
815 err = PTR_ERR(obj);
816 if (err == -ENXIO || err == -E2BIG) {
817 pr_info("%s not enough lmem for ps(%u) err=%d\n",
818 __func__, ps, err);
819 err = 0;
820 }
821
822 break;
823 }
824
825 if (obj->base.size != ps) {
826 pr_err("%s size(%zu) != ps(%u)\n",
827 __func__, obj->base.size, ps);
828 err = -EINVAL;
829 goto out_put;
830 }
831
832 i915_gem_object_lock(obj, NULL);
833 err = i915_gem_object_pin_pages(obj);
834 if (err) {
835 if (err == -ENXIO || err == -E2BIG || err == -ENOMEM) {
836 pr_info("%s not enough lmem for ps(%u) err=%d\n",
837 __func__, ps, err);
838 err = 0;
839 }
840 goto out_put;
841 }
842
843 daddr = i915_gem_object_get_dma_address(obj, 0);
844 if (!IS_ALIGNED(daddr, ps)) {
845 pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
846 __func__, &daddr, ps);
847 err = -EINVAL;
848 goto out_unpin;
849 }
850
851 out_unpin:
852 i915_gem_object_unpin_pages(obj);
853 __i915_gem_object_put_pages(obj);
854 out_put:
855 i915_gem_object_unlock(obj);
856 i915_gem_object_put(obj);
857
858 if (err)
859 break;
860 }
861
862 return err;
863 }
864
igt_lmem_create_cleared_cpu(void * arg)865 static int igt_lmem_create_cleared_cpu(void *arg)
866 {
867 struct drm_i915_private *i915 = arg;
868 I915_RND_STATE(prng);
869 IGT_TIMEOUT(end_time);
870 u32 size, i;
871 int err;
872
873 i915_gem_drain_freed_objects(i915);
874
875 size = max_t(u32, PAGE_SIZE, i915_prandom_u32_max_state(SZ_32M, &prng));
876 size = round_up(size, PAGE_SIZE);
877 i = 0;
878
879 do {
880 struct drm_i915_gem_object *obj;
881 unsigned int flags;
882 u32 dword, val;
883 void *vaddr;
884
885 /*
886 * Alternate between cleared and uncleared allocations, while
887 * also dirtying the pages each time to check that the pages are
888 * always cleared if requested, since we should get some overlap
889 * of the underlying pages, if not all, since we are the only
890 * user.
891 */
892
893 flags = I915_BO_ALLOC_CPU_CLEAR;
894 if (i & 1)
895 flags = 0;
896
897 obj = i915_gem_object_create_lmem(i915, size, flags);
898 if (IS_ERR(obj))
899 return PTR_ERR(obj);
900
901 i915_gem_object_lock(obj, NULL);
902 err = i915_gem_object_pin_pages(obj);
903 if (err)
904 goto out_put;
905
906 dword = i915_prandom_u32_max_state(PAGE_SIZE / sizeof(u32),
907 &prng);
908
909 if (flags & I915_BO_ALLOC_CPU_CLEAR) {
910 err = igt_cpu_check(obj, dword, 0);
911 if (err) {
912 pr_err("%s failed with size=%u, flags=%u\n",
913 __func__, size, flags);
914 goto out_unpin;
915 }
916 }
917
918 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
919 if (IS_ERR(vaddr)) {
920 err = PTR_ERR(vaddr);
921 goto out_unpin;
922 }
923
924 val = prandom_u32_state(&prng);
925
926 memset32(vaddr, val, obj->base.size / sizeof(u32));
927
928 i915_gem_object_flush_map(obj);
929 i915_gem_object_unpin_map(obj);
930 out_unpin:
931 i915_gem_object_unpin_pages(obj);
932 __i915_gem_object_put_pages(obj);
933 out_put:
934 i915_gem_object_unlock(obj);
935 i915_gem_object_put(obj);
936
937 if (err)
938 break;
939 ++i;
940 } while (!__igt_timeout(end_time, NULL));
941
942 pr_info("%s completed (%u) iterations\n", __func__, i);
943
944 return err;
945 }
946
igt_lmem_write_gpu(void * arg)947 static int igt_lmem_write_gpu(void *arg)
948 {
949 struct drm_i915_private *i915 = arg;
950 struct drm_i915_gem_object *obj;
951 struct i915_gem_context *ctx;
952 struct file *file;
953 I915_RND_STATE(prng);
954 u32 sz;
955 int err;
956
957 file = mock_file(i915);
958 if (IS_ERR(file))
959 return PTR_ERR(file);
960
961 ctx = live_context(i915, file);
962 if (IS_ERR(ctx)) {
963 err = PTR_ERR(ctx);
964 goto out_file;
965 }
966
967 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
968
969 obj = i915_gem_object_create_lmem(i915, sz, 0);
970 if (IS_ERR(obj)) {
971 err = PTR_ERR(obj);
972 goto out_file;
973 }
974
975 err = i915_gem_object_pin_pages_unlocked(obj);
976 if (err)
977 goto out_put;
978
979 err = igt_gpu_write(ctx, obj);
980 if (err)
981 pr_err("igt_gpu_write failed(%d)\n", err);
982
983 i915_gem_object_unpin_pages(obj);
984 out_put:
985 i915_gem_object_put(obj);
986 out_file:
987 fput(file);
988 return err;
989 }
990
991 static struct intel_engine_cs *
random_engine_class(struct drm_i915_private * i915,unsigned int class,struct rnd_state * prng)992 random_engine_class(struct drm_i915_private *i915,
993 unsigned int class,
994 struct rnd_state *prng)
995 {
996 struct intel_engine_cs *engine;
997 unsigned int count;
998
999 count = 0;
1000 for (engine = intel_engine_lookup_user(i915, class, 0);
1001 engine && engine->uabi_class == class;
1002 engine = rb_entry_safe(rb_next(&engine->uabi_node),
1003 typeof(*engine), uabi_node))
1004 count++;
1005
1006 count = i915_prandom_u32_max_state(count, prng);
1007 return intel_engine_lookup_user(i915, class, count);
1008 }
1009
igt_lmem_write_cpu(void * arg)1010 static int igt_lmem_write_cpu(void *arg)
1011 {
1012 struct drm_i915_private *i915 = arg;
1013 struct drm_i915_gem_object *obj;
1014 I915_RND_STATE(prng);
1015 IGT_TIMEOUT(end_time);
1016 u32 bytes[] = {
1017 0, /* rng placeholder */
1018 sizeof(u32),
1019 sizeof(u64),
1020 64, /* cl */
1021 PAGE_SIZE,
1022 PAGE_SIZE - sizeof(u32),
1023 PAGE_SIZE - sizeof(u64),
1024 PAGE_SIZE - 64,
1025 };
1026 struct intel_engine_cs *engine;
1027 struct i915_request *rq;
1028 u32 *vaddr;
1029 u32 sz;
1030 u32 i;
1031 int *order;
1032 int count;
1033 int err;
1034
1035 engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng);
1036 if (!engine)
1037 return 0;
1038
1039 pr_info("%s: using %s\n", __func__, engine->name);
1040
1041 sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
1042 sz = max_t(u32, 2 * PAGE_SIZE, sz);
1043
1044 obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
1045 if (IS_ERR(obj))
1046 return PTR_ERR(obj);
1047
1048 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
1049 if (IS_ERR(vaddr)) {
1050 err = PTR_ERR(vaddr);
1051 goto out_put;
1052 }
1053
1054 i915_gem_object_lock(obj, NULL);
1055
1056 err = dma_resv_reserve_fences(obj->base.resv, 1);
1057 if (err) {
1058 i915_gem_object_unlock(obj);
1059 goto out_put;
1060 }
1061
1062 /* Put the pages into a known state -- from the gpu for added fun */
1063 intel_engine_pm_get(engine);
1064 err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
1065 obj->mm.pages->sgl,
1066 i915_gem_get_pat_index(i915,
1067 I915_CACHE_NONE),
1068 true, 0xdeadbeaf, &rq);
1069 if (rq) {
1070 dma_resv_add_fence(obj->base.resv, &rq->fence,
1071 DMA_RESV_USAGE_WRITE);
1072 i915_request_put(rq);
1073 }
1074
1075 intel_engine_pm_put(engine);
1076 if (!err)
1077 err = i915_gem_object_set_to_wc_domain(obj, true);
1078 i915_gem_object_unlock(obj);
1079 if (err)
1080 goto out_unpin;
1081
1082 count = ARRAY_SIZE(bytes);
1083 order = i915_random_order(count * count, &prng);
1084 if (!order) {
1085 err = -ENOMEM;
1086 goto out_unpin;
1087 }
1088
1089 /* A random multiple of u32, picked between [64, PAGE_SIZE - 64] */
1090 bytes[0] = igt_random_offset(&prng, 64, PAGE_SIZE - 64, 0, sizeof(u32));
1091 GEM_BUG_ON(!IS_ALIGNED(bytes[0], sizeof(u32)));
1092
1093 i = 0;
1094 do {
1095 u32 offset;
1096 u32 align;
1097 u32 dword;
1098 u32 size;
1099 u32 val;
1100
1101 size = bytes[order[i] % count];
1102 i = (i + 1) % (count * count);
1103
1104 align = bytes[order[i] % count];
1105 i = (i + 1) % (count * count);
1106
1107 align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
1108
1109 offset = igt_random_offset(&prng, 0, obj->base.size,
1110 size, align);
1111
1112 val = prandom_u32_state(&prng);
1113 memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
1114 size / sizeof(u32));
1115
1116 /*
1117 * Sample random dw -- don't waste precious time reading every
1118 * single dw.
1119 */
1120 dword = igt_random_offset(&prng, offset,
1121 offset + size,
1122 sizeof(u32), sizeof(u32));
1123 dword /= sizeof(u32);
1124 if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
1125 pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
1126 __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
1127 size, align, offset);
1128 err = -EINVAL;
1129 break;
1130 }
1131 } while (!__igt_timeout(end_time, NULL));
1132
1133 out_unpin:
1134 i915_gem_object_unpin_map(obj);
1135 out_put:
1136 i915_gem_object_put(obj);
1137
1138 return err;
1139 }
1140
repr_type(u32 type)1141 static const char *repr_type(u32 type)
1142 {
1143 switch (type) {
1144 case I915_MAP_WB:
1145 return "WB";
1146 case I915_MAP_WC:
1147 return "WC";
1148 }
1149
1150 return "";
1151 }
1152
1153 static struct drm_i915_gem_object *
create_region_for_mapping(struct intel_memory_region * mr,u64 size,u32 type,void ** out_addr)1154 create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
1155 void **out_addr)
1156 {
1157 struct drm_i915_gem_object *obj;
1158 void *addr;
1159
1160 obj = i915_gem_object_create_region(mr, size, 0, 0);
1161 if (IS_ERR(obj)) {
1162 if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
1163 return ERR_PTR(-ENODEV);
1164 return obj;
1165 }
1166
1167 addr = i915_gem_object_pin_map_unlocked(obj, type);
1168 if (IS_ERR(addr)) {
1169 i915_gem_object_put(obj);
1170 if (PTR_ERR(addr) == -ENXIO)
1171 return ERR_PTR(-ENODEV);
1172 return addr;
1173 }
1174
1175 *out_addr = addr;
1176 return obj;
1177 }
1178
wrap_ktime_compare(const void * A,const void * B)1179 static int wrap_ktime_compare(const void *A, const void *B)
1180 {
1181 const ktime_t *a = A, *b = B;
1182
1183 return ktime_compare(*a, *b);
1184 }
1185
igt_memcpy_long(void * dst,const void * src,size_t size)1186 static void igt_memcpy_long(void *dst, const void *src, size_t size)
1187 {
1188 unsigned long *tmp = dst;
1189 const unsigned long *s = src;
1190
1191 size = size / sizeof(unsigned long);
1192 while (size--)
1193 *tmp++ = *s++;
1194 }
1195
igt_memcpy(void * dst,const void * src,size_t size)1196 static inline void igt_memcpy(void *dst, const void *src, size_t size)
1197 {
1198 memcpy(dst, src, size);
1199 }
1200
igt_memcpy_from_wc(void * dst,const void * src,size_t size)1201 static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
1202 {
1203 i915_memcpy_from_wc(dst, src, size);
1204 }
1205
_perf_memcpy(struct intel_memory_region * src_mr,struct intel_memory_region * dst_mr,u64 size,u32 src_type,u32 dst_type)1206 static int _perf_memcpy(struct intel_memory_region *src_mr,
1207 struct intel_memory_region *dst_mr,
1208 u64 size, u32 src_type, u32 dst_type)
1209 {
1210 struct drm_i915_private *i915 = src_mr->i915;
1211 const struct {
1212 const char *name;
1213 void (*copy)(void *dst, const void *src, size_t size);
1214 bool skip;
1215 } tests[] = {
1216 {
1217 "memcpy",
1218 igt_memcpy,
1219 },
1220 {
1221 "memcpy_long",
1222 igt_memcpy_long,
1223 },
1224 {
1225 "memcpy_from_wc",
1226 igt_memcpy_from_wc,
1227 !i915_has_memcpy_from_wc(),
1228 },
1229 };
1230 struct drm_i915_gem_object *src, *dst;
1231 void *src_addr, *dst_addr;
1232 int ret = 0;
1233 int i;
1234
1235 src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
1236 if (IS_ERR(src)) {
1237 ret = PTR_ERR(src);
1238 goto out;
1239 }
1240
1241 dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
1242 if (IS_ERR(dst)) {
1243 ret = PTR_ERR(dst);
1244 goto out_unpin_src;
1245 }
1246
1247 for (i = 0; i < ARRAY_SIZE(tests); ++i) {
1248 ktime_t t[5];
1249 int pass;
1250
1251 if (tests[i].skip)
1252 continue;
1253
1254 for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
1255 ktime_t t0, t1;
1256
1257 t0 = ktime_get();
1258
1259 tests[i].copy(dst_addr, src_addr, size);
1260
1261 t1 = ktime_get();
1262 t[pass] = ktime_sub(t1, t0);
1263 }
1264
1265 sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
1266 if (t[0] <= 0) {
1267 /* ignore the impossible to protect our sanity */
1268 pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n",
1269 __func__,
1270 src_mr->name, repr_type(src_type),
1271 dst_mr->name, repr_type(dst_type),
1272 tests[i].name, size >> 10,
1273 t[0], t[4]);
1274 continue;
1275 }
1276
1277 pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
1278 __func__,
1279 src_mr->name, repr_type(src_type),
1280 dst_mr->name, repr_type(dst_type),
1281 tests[i].name, size >> 10,
1282 div64_u64(mul_u32_u32(4 * size,
1283 1000 * 1000 * 1000),
1284 t[1] + 2 * t[2] + t[3]) >> 20);
1285
1286 cond_resched();
1287 }
1288
1289 i915_gem_object_unpin_map(dst);
1290 i915_gem_object_put(dst);
1291 out_unpin_src:
1292 i915_gem_object_unpin_map(src);
1293 i915_gem_object_put(src);
1294
1295 i915_gem_drain_freed_objects(i915);
1296 out:
1297 if (ret == -ENODEV)
1298 ret = 0;
1299
1300 return ret;
1301 }
1302
perf_memcpy(void * arg)1303 static int perf_memcpy(void *arg)
1304 {
1305 struct drm_i915_private *i915 = arg;
1306 static const u32 types[] = {
1307 I915_MAP_WB,
1308 I915_MAP_WC,
1309 };
1310 static const u32 sizes[] = {
1311 SZ_4K,
1312 SZ_64K,
1313 SZ_4M,
1314 };
1315 struct intel_memory_region *src_mr, *dst_mr;
1316 int src_id, dst_id;
1317 int i, j, k;
1318 int ret;
1319
1320 for_each_memory_region(src_mr, i915, src_id) {
1321 for_each_memory_region(dst_mr, i915, dst_id) {
1322 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
1323 for (j = 0; j < ARRAY_SIZE(types); ++j) {
1324 for (k = 0; k < ARRAY_SIZE(types); ++k) {
1325 ret = _perf_memcpy(src_mr,
1326 dst_mr,
1327 sizes[i],
1328 types[j],
1329 types[k]);
1330 if (ret)
1331 return ret;
1332 }
1333 }
1334 }
1335 }
1336 }
1337
1338 return 0;
1339 }
1340
intel_memory_region_mock_selftests(void)1341 int intel_memory_region_mock_selftests(void)
1342 {
1343 static const struct i915_subtest tests[] = {
1344 SUBTEST(igt_mock_reserve),
1345 SUBTEST(igt_mock_fill),
1346 SUBTEST(igt_mock_contiguous),
1347 SUBTEST(igt_mock_splintered_region),
1348 SUBTEST(igt_mock_max_segment),
1349 SUBTEST(igt_mock_io_size),
1350 };
1351 struct intel_memory_region *mem;
1352 struct drm_i915_private *i915;
1353 int err;
1354
1355 i915 = mock_gem_device();
1356 if (!i915)
1357 return -ENOMEM;
1358
1359 mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0, 0);
1360 if (IS_ERR(mem)) {
1361 pr_err("failed to create memory region\n");
1362 err = PTR_ERR(mem);
1363 goto out_unref;
1364 }
1365
1366 err = i915_subtests(tests, mem);
1367
1368 intel_memory_region_destroy(mem);
1369 out_unref:
1370 mock_destroy_device(i915);
1371 return err;
1372 }
1373
intel_memory_region_live_selftests(struct drm_i915_private * i915)1374 int intel_memory_region_live_selftests(struct drm_i915_private *i915)
1375 {
1376 static const struct i915_subtest tests[] = {
1377 SUBTEST(igt_lmem_create),
1378 SUBTEST(igt_lmem_create_with_ps),
1379 SUBTEST(igt_lmem_create_cleared_cpu),
1380 SUBTEST(igt_lmem_write_cpu),
1381 SUBTEST(igt_lmem_write_gpu),
1382 };
1383
1384 if (!HAS_LMEM(i915)) {
1385 pr_info("device lacks LMEM support, skipping\n");
1386 return 0;
1387 }
1388
1389 if (intel_gt_is_wedged(to_gt(i915)))
1390 return 0;
1391
1392 return i915_live_subtests(tests, i915);
1393 }
1394
intel_memory_region_perf_selftests(struct drm_i915_private * i915)1395 int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
1396 {
1397 static const struct i915_subtest tests[] = {
1398 SUBTEST(perf_memcpy),
1399 };
1400
1401 if (intel_gt_is_wedged(to_gt(i915)))
1402 return 0;
1403
1404 return i915_live_subtests(tests, i915);
1405 }
1406