1 // SPDX-License-Identifier: GPL-2.0 AND MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <kunit/static_stub.h>
7 #include <kunit/test.h>
8 #include <kunit/test-bug.h>
9 
10 #include "xe_device.h"
11 #include "xe_ggtt.h"
12 #include "xe_guc_ct.h"
13 #include "xe_kunit_helpers.h"
14 #include "xe_pci_test.h"
15 
16 #define DUT_GGTT_START		SZ_1M
17 #define DUT_GGTT_SIZE		SZ_2M
18 
replacement_xe_managed_bo_create_pin_map(struct xe_device * xe,struct xe_tile * tile,size_t size,u32 flags)19 static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *xe,
20 							      struct xe_tile *tile,
21 							      size_t size, u32 flags)
22 {
23 	struct kunit *test = kunit_get_current_test();
24 	struct xe_bo *bo;
25 	void *buf;
26 
27 	bo = drmm_kzalloc(&xe->drm, sizeof(*bo), GFP_KERNEL);
28 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo);
29 
30 	buf = drmm_kzalloc(&xe->drm, size, GFP_KERNEL);
31 	KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
32 
33 	bo->tile = tile;
34 	bo->ttm.bdev = &xe->ttm;
35 	bo->size = size;
36 	iosys_map_set_vaddr(&bo->vmap, buf);
37 
38 	if (flags & XE_BO_FLAG_GGTT) {
39 		struct xe_ggtt *ggtt = tile->mem.ggtt;
40 
41 		bo->ggtt_node[tile->id] = xe_ggtt_node_init(ggtt);
42 		KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
43 
44 		KUNIT_ASSERT_EQ(test, 0,
45 				drm_mm_insert_node_in_range(&ggtt->mm,
46 							    &bo->ggtt_node[tile->id]->base,
47 							    bo->size, SZ_4K,
48 							    0, 0, U64_MAX, 0));
49 	}
50 
51 	return bo;
52 }
53 
guc_buf_test_init(struct kunit * test)54 static int guc_buf_test_init(struct kunit *test)
55 {
56 	struct xe_pci_fake_data fake = {
57 		.sriov_mode = XE_SRIOV_MODE_PF,
58 		.platform = XE_TIGERLAKE, /* some random platform */
59 		.subplatform = XE_SUBPLATFORM_NONE,
60 	};
61 	struct xe_ggtt *ggtt;
62 	struct xe_guc *guc;
63 
64 	test->priv = &fake;
65 	xe_kunit_helper_xe_device_test_init(test);
66 
67 	ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
68 	guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
69 
70 	drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE);
71 	mutex_init(&ggtt->lock);
72 
73 	kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
74 				   replacement_xe_managed_bo_create_pin_map);
75 
76 	KUNIT_ASSERT_EQ(test, 0, xe_guc_buf_cache_init(&guc->buf));
77 
78 	test->priv = &guc->buf;
79 	return 0;
80 }
81 
test_smallest(struct kunit * test)82 static void test_smallest(struct kunit *test)
83 {
84 	struct xe_guc_buf_cache *cache = test->priv;
85 	struct xe_guc_buf buf;
86 
87 	buf = xe_guc_buf_reserve(cache, 1);
88 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
89 	KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
90 	KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
91 	KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
92 	KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
93 	xe_guc_buf_release(buf);
94 }
95 
test_largest(struct kunit * test)96 static void test_largest(struct kunit *test)
97 {
98 	struct xe_guc_buf_cache *cache = test->priv;
99 	struct xe_guc_buf buf;
100 
101 	buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
102 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
103 	KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
104 	KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
105 	KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
106 	KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
107 	xe_guc_buf_release(buf);
108 }
109 
test_granular(struct kunit * test)110 static void test_granular(struct kunit *test)
111 {
112 	struct xe_guc_buf_cache *cache = test->priv;
113 	struct xe_guc_buf *bufs;
114 	int n, dwords;
115 
116 	dwords = xe_guc_buf_cache_dwords(cache);
117 	bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
118 	KUNIT_EXPECT_NOT_NULL(test, bufs);
119 
120 	for (n = 0; n < dwords; n++)
121 		bufs[n] = xe_guc_buf_reserve(cache, 1);
122 
123 	for (n = 0; n < dwords; n++)
124 		KUNIT_EXPECT_TRUE_MSG(test, xe_guc_buf_is_valid(bufs[n]), "n=%d", n);
125 
126 	for (n = 0; n < dwords; n++)
127 		xe_guc_buf_release(bufs[n]);
128 }
129 
test_unique(struct kunit * test)130 static void test_unique(struct kunit *test)
131 {
132 	struct xe_guc_buf_cache *cache = test->priv;
133 	struct xe_guc_buf *bufs;
134 	int n, m, dwords;
135 
136 	dwords = xe_guc_buf_cache_dwords(cache);
137 	bufs = kunit_kcalloc(test, dwords, sizeof(*bufs), GFP_KERNEL);
138 	KUNIT_EXPECT_NOT_NULL(test, bufs);
139 
140 	for (n = 0; n < dwords; n++)
141 		bufs[n] = xe_guc_buf_reserve(cache, 1);
142 
143 	for (n = 0; n < dwords; n++) {
144 		for (m = n + 1; m < dwords; m++) {
145 			KUNIT_EXPECT_PTR_NE_MSG(test, xe_guc_buf_cpu_ptr(bufs[n]),
146 						xe_guc_buf_cpu_ptr(bufs[m]), "n=%d, m=%d", n, m);
147 			KUNIT_ASSERT_NE_MSG(test, xe_guc_buf_gpu_addr(bufs[n]),
148 					    xe_guc_buf_gpu_addr(bufs[m]), "n=%d, m=%d", n, m);
149 		}
150 	}
151 
152 	for (n = 0; n < dwords; n++)
153 		xe_guc_buf_release(bufs[n]);
154 }
155 
test_overlap(struct kunit * test)156 static void test_overlap(struct kunit *test)
157 {
158 	struct xe_guc_buf_cache *cache = test->priv;
159 	struct xe_guc_buf b1, b2;
160 	u32 dwords = xe_guc_buf_cache_dwords(cache) / 2;
161 	u32 bytes = dwords * sizeof(u32);
162 	void *p1, *p2;
163 	u64 a1, a2;
164 
165 	b1 = xe_guc_buf_reserve(cache, dwords);
166 	b2 = xe_guc_buf_reserve(cache, dwords);
167 
168 	p1 = xe_guc_buf_cpu_ptr(b1);
169 	p2 = xe_guc_buf_cpu_ptr(b2);
170 
171 	a1 = xe_guc_buf_gpu_addr(b1);
172 	a2 = xe_guc_buf_gpu_addr(b2);
173 
174 	KUNIT_EXPECT_PTR_NE(test, p1, p2);
175 	if (p1 < p2)
176 		KUNIT_EXPECT_LT(test, (uintptr_t)(p1 + bytes - 1), (uintptr_t)p2);
177 	else
178 		KUNIT_EXPECT_LT(test, (uintptr_t)(p2 + bytes - 1), (uintptr_t)p1);
179 
180 	KUNIT_EXPECT_NE(test, a1, a2);
181 	if (a1 < a2)
182 		KUNIT_EXPECT_LT(test, a1 + bytes - 1, a2);
183 	else
184 		KUNIT_EXPECT_LT(test, a2 + bytes - 1, a1);
185 
186 	xe_guc_buf_release(b1);
187 	xe_guc_buf_release(b2);
188 }
189 
test_reusable(struct kunit * test)190 static void test_reusable(struct kunit *test)
191 {
192 	struct xe_guc_buf_cache *cache = test->priv;
193 	struct xe_guc_buf b1, b2;
194 	void *p1;
195 	u64 a1;
196 
197 	b1 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
198 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(b1));
199 	KUNIT_EXPECT_NOT_NULL(test, p1 = xe_guc_buf_cpu_ptr(b1));
200 	KUNIT_EXPECT_NE(test, 0, a1 = xe_guc_buf_gpu_addr(b1));
201 	xe_guc_buf_release(b1);
202 
203 	b2 = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache));
204 	KUNIT_EXPECT_PTR_EQ(test, p1, xe_guc_buf_cpu_ptr(b2));
205 	KUNIT_EXPECT_EQ(test, a1, xe_guc_buf_gpu_addr(b2));
206 	xe_guc_buf_release(b2);
207 }
208 
test_too_big(struct kunit * test)209 static void test_too_big(struct kunit *test)
210 {
211 	struct xe_guc_buf_cache *cache = test->priv;
212 	struct xe_guc_buf buf;
213 
214 	buf = xe_guc_buf_reserve(cache, xe_guc_buf_cache_dwords(cache) + 1);
215 	KUNIT_EXPECT_FALSE(test, xe_guc_buf_is_valid(buf));
216 	xe_guc_buf_release(buf); /* shouldn't crash */
217 }
218 
test_flush(struct kunit * test)219 static void test_flush(struct kunit *test)
220 {
221 	struct xe_guc_buf_cache *cache = test->priv;
222 	struct xe_guc_buf buf;
223 	const u32 dwords = xe_guc_buf_cache_dwords(cache);
224 	const u32 bytes = dwords * sizeof(u32);
225 	u32 *s, *p, *d;
226 	int n;
227 
228 	KUNIT_ASSERT_NOT_NULL(test, s = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
229 	KUNIT_ASSERT_NOT_NULL(test, d = kunit_kcalloc(test, dwords, sizeof(u32), GFP_KERNEL));
230 
231 	for (n = 0; n < dwords; n++)
232 		s[n] = n;
233 
234 	buf = xe_guc_buf_reserve(cache, dwords);
235 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
236 	KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
237 	KUNIT_EXPECT_PTR_NE(test, p, s);
238 	KUNIT_EXPECT_PTR_NE(test, p, d);
239 
240 	memcpy(p, s, bytes);
241 	KUNIT_EXPECT_NE(test, 0, xe_guc_buf_flush(buf));
242 
243 	iosys_map_memcpy_from(d, &cache->sam->bo->vmap, 0, bytes);
244 	KUNIT_EXPECT_MEMEQ(test, s, d, bytes);
245 
246 	xe_guc_buf_release(buf);
247 }
248 
test_lookup(struct kunit * test)249 static void test_lookup(struct kunit *test)
250 {
251 	struct xe_guc_buf_cache *cache = test->priv;
252 	struct xe_guc_buf buf;
253 	u32 dwords;
254 	u64 addr;
255 	u32 *p;
256 	int n;
257 
258 	dwords = xe_guc_buf_cache_dwords(cache);
259 	buf = xe_guc_buf_reserve(cache, dwords);
260 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
261 	KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
262 	KUNIT_ASSERT_NE(test, 0, addr = xe_guc_buf_gpu_addr(buf));
263 
264 	KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p - 1, sizeof(u32)));
265 	KUNIT_EXPECT_EQ(test, 0, xe_guc_cache_gpu_addr_from_ptr(cache, p + dwords, sizeof(u32)));
266 
267 	for (n = 0; n < dwords; n++)
268 		KUNIT_EXPECT_EQ_MSG(test, xe_guc_cache_gpu_addr_from_ptr(cache, p + n, sizeof(u32)),
269 				    addr + n * sizeof(u32), "n=%d", n);
270 
271 	xe_guc_buf_release(buf);
272 }
273 
test_data(struct kunit * test)274 static void test_data(struct kunit *test)
275 {
276 	static const u32 data[] = { 1, 2, 3, 4, 5, 6 };
277 	struct xe_guc_buf_cache *cache = test->priv;
278 	struct xe_guc_buf buf;
279 	void *p;
280 
281 	buf = xe_guc_buf_from_data(cache, data, sizeof(data));
282 	KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
283 	KUNIT_ASSERT_NOT_NULL(test, p = xe_guc_buf_cpu_ptr(buf));
284 	KUNIT_EXPECT_MEMEQ(test, p, data, sizeof(data));
285 
286 	xe_guc_buf_release(buf);
287 }
288 
test_class(struct kunit * test)289 static void test_class(struct kunit *test)
290 {
291 	struct xe_guc_buf_cache *cache = test->priv;
292 	u32 dwords = xe_guc_buf_cache_dwords(cache);
293 
294 	{
295 		CLASS(xe_guc_buf, buf)(cache, dwords);
296 		KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
297 		KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
298 		KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
299 		KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
300 		KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
301 	}
302 
303 	{
304 		CLASS(xe_guc_buf, buf)(cache, dwords);
305 		KUNIT_ASSERT_TRUE(test, xe_guc_buf_is_valid(buf));
306 		KUNIT_EXPECT_NOT_NULL(test, xe_guc_buf_cpu_ptr(buf));
307 		KUNIT_EXPECT_NE(test, 0, xe_guc_buf_gpu_addr(buf));
308 		KUNIT_EXPECT_LE(test, DUT_GGTT_START, xe_guc_buf_gpu_addr(buf));
309 		KUNIT_EXPECT_GT(test, DUT_GGTT_START + DUT_GGTT_SIZE, xe_guc_buf_gpu_addr(buf));
310 	}
311 }
312 
313 static struct kunit_case guc_buf_test_cases[] = {
314 	KUNIT_CASE(test_smallest),
315 	KUNIT_CASE(test_largest),
316 	KUNIT_CASE(test_granular),
317 	KUNIT_CASE(test_unique),
318 	KUNIT_CASE(test_overlap),
319 	KUNIT_CASE(test_reusable),
320 	KUNIT_CASE(test_too_big),
321 	KUNIT_CASE(test_flush),
322 	KUNIT_CASE(test_lookup),
323 	KUNIT_CASE(test_data),
324 	KUNIT_CASE(test_class),
325 	{}
326 };
327 
328 static struct kunit_suite guc_buf_suite = {
329 	.name = "guc_buf",
330 	.test_cases = guc_buf_test_cases,
331 	.init = guc_buf_test_init,
332 };
333 
334 kunit_test_suites(&guc_buf_suite);
335