1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4 #include <linux/platform_device.h>
5 #include <linux/memory_hotplug.h>
6 #include <linux/genalloc.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/acpi.h>
10 #include <linux/pci.h>
11 #include <linux/mm.h>
12 #include <cxlmem.h>
13
14 #include "../watermark.h"
15 #include "mock.h"
16
17 static int interleave_arithmetic;
18
19 #define FAKE_QTG_ID 42
20
21 #define NR_CXL_HOST_BRIDGES 2
22 #define NR_CXL_SINGLE_HOST 1
23 #define NR_CXL_RCH 1
24 #define NR_CXL_ROOT_PORTS 2
25 #define NR_CXL_SWITCH_PORTS 2
26 #define NR_CXL_PORT_DECODERS 8
27 #define NR_BRIDGES (NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + NR_CXL_RCH)
28
29 static struct platform_device *cxl_acpi;
30 static struct platform_device *cxl_host_bridge[NR_CXL_HOST_BRIDGES];
31 #define NR_MULTI_ROOT (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS)
32 static struct platform_device *cxl_root_port[NR_MULTI_ROOT];
33 static struct platform_device *cxl_switch_uport[NR_MULTI_ROOT];
34 #define NR_MEM_MULTI \
35 (NR_CXL_HOST_BRIDGES * NR_CXL_ROOT_PORTS * NR_CXL_SWITCH_PORTS)
36 static struct platform_device *cxl_switch_dport[NR_MEM_MULTI];
37
38 static struct platform_device *cxl_hb_single[NR_CXL_SINGLE_HOST];
39 static struct platform_device *cxl_root_single[NR_CXL_SINGLE_HOST];
40 static struct platform_device *cxl_swu_single[NR_CXL_SINGLE_HOST];
41 #define NR_MEM_SINGLE (NR_CXL_SINGLE_HOST * NR_CXL_SWITCH_PORTS)
42 static struct platform_device *cxl_swd_single[NR_MEM_SINGLE];
43
44 struct platform_device *cxl_mem[NR_MEM_MULTI];
45 struct platform_device *cxl_mem_single[NR_MEM_SINGLE];
46
47 static struct platform_device *cxl_rch[NR_CXL_RCH];
48 static struct platform_device *cxl_rcd[NR_CXL_RCH];
49
is_multi_bridge(struct device * dev)50 static inline bool is_multi_bridge(struct device *dev)
51 {
52 int i;
53
54 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
55 if (&cxl_host_bridge[i]->dev == dev)
56 return true;
57 return false;
58 }
59
is_single_bridge(struct device * dev)60 static inline bool is_single_bridge(struct device *dev)
61 {
62 int i;
63
64 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
65 if (&cxl_hb_single[i]->dev == dev)
66 return true;
67 return false;
68 }
69
70 static struct acpi_device acpi0017_mock;
71 static struct acpi_device host_bridge[NR_BRIDGES] = {
72 [0] = {
73 .handle = &host_bridge[0],
74 .pnp.unique_id = "0",
75 },
76 [1] = {
77 .handle = &host_bridge[1],
78 .pnp.unique_id = "1",
79 },
80 [2] = {
81 .handle = &host_bridge[2],
82 .pnp.unique_id = "2",
83 },
84 [3] = {
85 .handle = &host_bridge[3],
86 .pnp.unique_id = "3",
87 },
88 };
89
is_mock_dev(struct device * dev)90 static bool is_mock_dev(struct device *dev)
91 {
92 int i;
93
94 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++)
95 if (dev == &cxl_mem[i]->dev)
96 return true;
97 for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++)
98 if (dev == &cxl_mem_single[i]->dev)
99 return true;
100 for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++)
101 if (dev == &cxl_rcd[i]->dev)
102 return true;
103 if (dev == &cxl_acpi->dev)
104 return true;
105 return false;
106 }
107
is_mock_adev(struct acpi_device * adev)108 static bool is_mock_adev(struct acpi_device *adev)
109 {
110 int i;
111
112 if (adev == &acpi0017_mock)
113 return true;
114
115 for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
116 if (adev == &host_bridge[i])
117 return true;
118
119 return false;
120 }
121
122 static struct {
123 struct acpi_table_cedt cedt;
124 struct acpi_cedt_chbs chbs[NR_BRIDGES];
125 struct {
126 struct acpi_cedt_cfmws cfmws;
127 u32 target[1];
128 } cfmws0;
129 struct {
130 struct acpi_cedt_cfmws cfmws;
131 u32 target[2];
132 } cfmws1;
133 struct {
134 struct acpi_cedt_cfmws cfmws;
135 u32 target[1];
136 } cfmws2;
137 struct {
138 struct acpi_cedt_cfmws cfmws;
139 u32 target[2];
140 } cfmws3;
141 struct {
142 struct acpi_cedt_cfmws cfmws;
143 u32 target[1];
144 } cfmws4;
145 struct {
146 struct acpi_cedt_cfmws cfmws;
147 u32 target[1];
148 } cfmws5;
149 struct {
150 struct acpi_cedt_cfmws cfmws;
151 u32 target[1];
152 } cfmws6;
153 struct {
154 struct acpi_cedt_cfmws cfmws;
155 u32 target[2];
156 } cfmws7;
157 struct {
158 struct acpi_cedt_cfmws cfmws;
159 u32 target[3];
160 } cfmws8;
161 struct {
162 struct acpi_cedt_cxims cxims;
163 u64 xormap_list[2];
164 } cxims0;
165 } __packed mock_cedt = {
166 .cedt = {
167 .header = {
168 .signature = "CEDT",
169 .length = sizeof(mock_cedt),
170 .revision = 1,
171 },
172 },
173 .chbs[0] = {
174 .header = {
175 .type = ACPI_CEDT_TYPE_CHBS,
176 .length = sizeof(mock_cedt.chbs[0]),
177 },
178 .uid = 0,
179 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
180 },
181 .chbs[1] = {
182 .header = {
183 .type = ACPI_CEDT_TYPE_CHBS,
184 .length = sizeof(mock_cedt.chbs[0]),
185 },
186 .uid = 1,
187 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
188 },
189 .chbs[2] = {
190 .header = {
191 .type = ACPI_CEDT_TYPE_CHBS,
192 .length = sizeof(mock_cedt.chbs[0]),
193 },
194 .uid = 2,
195 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL20,
196 },
197 .chbs[3] = {
198 .header = {
199 .type = ACPI_CEDT_TYPE_CHBS,
200 .length = sizeof(mock_cedt.chbs[0]),
201 },
202 .uid = 3,
203 .cxl_version = ACPI_CEDT_CHBS_VERSION_CXL11,
204 },
205 .cfmws0 = {
206 .cfmws = {
207 .header = {
208 .type = ACPI_CEDT_TYPE_CFMWS,
209 .length = sizeof(mock_cedt.cfmws0),
210 },
211 .interleave_ways = 0,
212 .granularity = 4,
213 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
214 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
215 .qtg_id = FAKE_QTG_ID,
216 .window_size = SZ_256M * 4UL,
217 },
218 .target = { 0 },
219 },
220 .cfmws1 = {
221 .cfmws = {
222 .header = {
223 .type = ACPI_CEDT_TYPE_CFMWS,
224 .length = sizeof(mock_cedt.cfmws1),
225 },
226 .interleave_ways = 1,
227 .granularity = 4,
228 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
229 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
230 .qtg_id = FAKE_QTG_ID,
231 .window_size = SZ_256M * 8UL,
232 },
233 .target = { 0, 1, },
234 },
235 .cfmws2 = {
236 .cfmws = {
237 .header = {
238 .type = ACPI_CEDT_TYPE_CFMWS,
239 .length = sizeof(mock_cedt.cfmws2),
240 },
241 .interleave_ways = 0,
242 .granularity = 4,
243 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
244 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
245 .qtg_id = FAKE_QTG_ID,
246 .window_size = SZ_256M * 4UL,
247 },
248 .target = { 0 },
249 },
250 .cfmws3 = {
251 .cfmws = {
252 .header = {
253 .type = ACPI_CEDT_TYPE_CFMWS,
254 .length = sizeof(mock_cedt.cfmws3),
255 },
256 .interleave_ways = 1,
257 .granularity = 4,
258 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
259 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
260 .qtg_id = FAKE_QTG_ID,
261 .window_size = SZ_256M * 8UL,
262 },
263 .target = { 0, 1, },
264 },
265 .cfmws4 = {
266 .cfmws = {
267 .header = {
268 .type = ACPI_CEDT_TYPE_CFMWS,
269 .length = sizeof(mock_cedt.cfmws4),
270 },
271 .interleave_ways = 0,
272 .granularity = 4,
273 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
274 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
275 .qtg_id = FAKE_QTG_ID,
276 .window_size = SZ_256M * 4UL,
277 },
278 .target = { 2 },
279 },
280 .cfmws5 = {
281 .cfmws = {
282 .header = {
283 .type = ACPI_CEDT_TYPE_CFMWS,
284 .length = sizeof(mock_cedt.cfmws5),
285 },
286 .interleave_ways = 0,
287 .granularity = 4,
288 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
289 ACPI_CEDT_CFMWS_RESTRICT_VOLATILE,
290 .qtg_id = FAKE_QTG_ID,
291 .window_size = SZ_256M,
292 },
293 .target = { 3 },
294 },
295 /* .cfmws6,7,8 use ACPI_CEDT_CFMWS_ARITHMETIC_XOR */
296 .cfmws6 = {
297 .cfmws = {
298 .header = {
299 .type = ACPI_CEDT_TYPE_CFMWS,
300 .length = sizeof(mock_cedt.cfmws6),
301 },
302 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
303 .interleave_ways = 0,
304 .granularity = 4,
305 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
306 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
307 .qtg_id = FAKE_QTG_ID,
308 .window_size = SZ_256M * 8UL,
309 },
310 .target = { 0, },
311 },
312 .cfmws7 = {
313 .cfmws = {
314 .header = {
315 .type = ACPI_CEDT_TYPE_CFMWS,
316 .length = sizeof(mock_cedt.cfmws7),
317 },
318 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
319 .interleave_ways = 1,
320 .granularity = 0,
321 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
322 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
323 .qtg_id = FAKE_QTG_ID,
324 .window_size = SZ_256M * 8UL,
325 },
326 .target = { 0, 1, },
327 },
328 .cfmws8 = {
329 .cfmws = {
330 .header = {
331 .type = ACPI_CEDT_TYPE_CFMWS,
332 .length = sizeof(mock_cedt.cfmws8),
333 },
334 .interleave_arithmetic = ACPI_CEDT_CFMWS_ARITHMETIC_XOR,
335 .interleave_ways = 8,
336 .granularity = 1,
337 .restrictions = ACPI_CEDT_CFMWS_RESTRICT_TYPE3 |
338 ACPI_CEDT_CFMWS_RESTRICT_PMEM,
339 .qtg_id = FAKE_QTG_ID,
340 .window_size = SZ_512M * 6UL,
341 },
342 .target = { 0, 1, 2, },
343 },
344 .cxims0 = {
345 .cxims = {
346 .header = {
347 .type = ACPI_CEDT_TYPE_CXIMS,
348 .length = sizeof(mock_cedt.cxims0),
349 },
350 .hbig = 0,
351 .nr_xormaps = 2,
352 },
353 .xormap_list = { 0x404100, 0x808200, },
354 },
355 };
356
357 struct acpi_cedt_cfmws *mock_cfmws[] = {
358 [0] = &mock_cedt.cfmws0.cfmws,
359 [1] = &mock_cedt.cfmws1.cfmws,
360 [2] = &mock_cedt.cfmws2.cfmws,
361 [3] = &mock_cedt.cfmws3.cfmws,
362 [4] = &mock_cedt.cfmws4.cfmws,
363 [5] = &mock_cedt.cfmws5.cfmws,
364 /* Modulo Math above, XOR Math below */
365 [6] = &mock_cedt.cfmws6.cfmws,
366 [7] = &mock_cedt.cfmws7.cfmws,
367 [8] = &mock_cedt.cfmws8.cfmws,
368 };
369
370 static int cfmws_start;
371 static int cfmws_end;
372 #define CFMWS_MOD_ARRAY_START 0
373 #define CFMWS_MOD_ARRAY_END 5
374 #define CFMWS_XOR_ARRAY_START 6
375 #define CFMWS_XOR_ARRAY_END 8
376
377 struct acpi_cedt_cxims *mock_cxims[1] = {
378 [0] = &mock_cedt.cxims0.cxims,
379 };
380
381 struct cxl_mock_res {
382 struct list_head list;
383 struct range range;
384 };
385
386 static LIST_HEAD(mock_res);
387 static DEFINE_MUTEX(mock_res_lock);
388 static struct gen_pool *cxl_mock_pool;
389
depopulate_all_mock_resources(void)390 static void depopulate_all_mock_resources(void)
391 {
392 struct cxl_mock_res *res, *_res;
393
394 mutex_lock(&mock_res_lock);
395 list_for_each_entry_safe(res, _res, &mock_res, list) {
396 gen_pool_free(cxl_mock_pool, res->range.start,
397 range_len(&res->range));
398 list_del(&res->list);
399 kfree(res);
400 }
401 mutex_unlock(&mock_res_lock);
402 }
403
alloc_mock_res(resource_size_t size,int align)404 static struct cxl_mock_res *alloc_mock_res(resource_size_t size, int align)
405 {
406 struct cxl_mock_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
407 struct genpool_data_align data = {
408 .align = align,
409 };
410 unsigned long phys;
411
412 INIT_LIST_HEAD(&res->list);
413 phys = gen_pool_alloc_algo(cxl_mock_pool, size,
414 gen_pool_first_fit_align, &data);
415 if (!phys)
416 return NULL;
417
418 res->range = (struct range) {
419 .start = phys,
420 .end = phys + size - 1,
421 };
422 mutex_lock(&mock_res_lock);
423 list_add(&res->list, &mock_res);
424 mutex_unlock(&mock_res_lock);
425
426 return res;
427 }
428
populate_cedt(void)429 static int populate_cedt(void)
430 {
431 struct cxl_mock_res *res;
432 int i;
433
434 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
435 struct acpi_cedt_chbs *chbs = &mock_cedt.chbs[i];
436 resource_size_t size;
437
438 if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20)
439 size = ACPI_CEDT_CHBS_LENGTH_CXL20;
440 else
441 size = ACPI_CEDT_CHBS_LENGTH_CXL11;
442
443 res = alloc_mock_res(size, size);
444 if (!res)
445 return -ENOMEM;
446 chbs->base = res->range.start;
447 chbs->length = size;
448 }
449
450 for (i = cfmws_start; i <= cfmws_end; i++) {
451 struct acpi_cedt_cfmws *window = mock_cfmws[i];
452
453 res = alloc_mock_res(window->window_size, SZ_256M);
454 if (!res)
455 return -ENOMEM;
456 window->base_hpa = res->range.start;
457 }
458
459 return 0;
460 }
461
462 static bool is_mock_port(struct device *dev);
463
464 /*
465 * WARNING, this hack assumes the format of 'struct cxl_cfmws_context'
466 * and 'struct cxl_chbs_context' share the property that the first
467 * struct member is a cxl_test device being probed by the cxl_acpi
468 * driver.
469 */
470 struct cxl_cedt_context {
471 struct device *dev;
472 };
473
mock_acpi_table_parse_cedt(enum acpi_cedt_type id,acpi_tbl_entry_handler_arg handler_arg,void * arg)474 static int mock_acpi_table_parse_cedt(enum acpi_cedt_type id,
475 acpi_tbl_entry_handler_arg handler_arg,
476 void *arg)
477 {
478 struct cxl_cedt_context *ctx = arg;
479 struct device *dev = ctx->dev;
480 union acpi_subtable_headers *h;
481 unsigned long end;
482 int i;
483
484 if (!is_mock_port(dev) && !is_mock_dev(dev))
485 return acpi_table_parse_cedt(id, handler_arg, arg);
486
487 if (id == ACPI_CEDT_TYPE_CHBS)
488 for (i = 0; i < ARRAY_SIZE(mock_cedt.chbs); i++) {
489 h = (union acpi_subtable_headers *)&mock_cedt.chbs[i];
490 end = (unsigned long)&mock_cedt.chbs[i + 1];
491 handler_arg(h, arg, end);
492 }
493
494 if (id == ACPI_CEDT_TYPE_CFMWS)
495 for (i = cfmws_start; i <= cfmws_end; i++) {
496 h = (union acpi_subtable_headers *) mock_cfmws[i];
497 end = (unsigned long) h + mock_cfmws[i]->header.length;
498 handler_arg(h, arg, end);
499 }
500
501 if (id == ACPI_CEDT_TYPE_CXIMS)
502 for (i = 0; i < ARRAY_SIZE(mock_cxims); i++) {
503 h = (union acpi_subtable_headers *)mock_cxims[i];
504 end = (unsigned long)h + mock_cxims[i]->header.length;
505 handler_arg(h, arg, end);
506 }
507
508 return 0;
509 }
510
is_mock_bridge(struct device * dev)511 static bool is_mock_bridge(struct device *dev)
512 {
513 int i;
514
515 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++)
516 if (dev == &cxl_host_bridge[i]->dev)
517 return true;
518 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++)
519 if (dev == &cxl_hb_single[i]->dev)
520 return true;
521 for (i = 0; i < ARRAY_SIZE(cxl_rch); i++)
522 if (dev == &cxl_rch[i]->dev)
523 return true;
524
525 return false;
526 }
527
is_mock_port(struct device * dev)528 static bool is_mock_port(struct device *dev)
529 {
530 int i;
531
532 if (is_mock_bridge(dev))
533 return true;
534
535 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++)
536 if (dev == &cxl_root_port[i]->dev)
537 return true;
538
539 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++)
540 if (dev == &cxl_switch_uport[i]->dev)
541 return true;
542
543 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++)
544 if (dev == &cxl_switch_dport[i]->dev)
545 return true;
546
547 for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++)
548 if (dev == &cxl_root_single[i]->dev)
549 return true;
550
551 for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++)
552 if (dev == &cxl_swu_single[i]->dev)
553 return true;
554
555 for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++)
556 if (dev == &cxl_swd_single[i]->dev)
557 return true;
558
559 if (is_cxl_memdev(dev))
560 return is_mock_dev(dev->parent);
561
562 return false;
563 }
564
host_bridge_index(struct acpi_device * adev)565 static int host_bridge_index(struct acpi_device *adev)
566 {
567 return adev - host_bridge;
568 }
569
find_host_bridge(acpi_handle handle)570 static struct acpi_device *find_host_bridge(acpi_handle handle)
571 {
572 int i;
573
574 for (i = 0; i < ARRAY_SIZE(host_bridge); i++)
575 if (handle == host_bridge[i].handle)
576 return &host_bridge[i];
577 return NULL;
578 }
579
580 static acpi_status
mock_acpi_evaluate_integer(acpi_handle handle,acpi_string pathname,struct acpi_object_list * arguments,unsigned long long * data)581 mock_acpi_evaluate_integer(acpi_handle handle, acpi_string pathname,
582 struct acpi_object_list *arguments,
583 unsigned long long *data)
584 {
585 struct acpi_device *adev = find_host_bridge(handle);
586
587 if (!adev || strcmp(pathname, METHOD_NAME__UID) != 0)
588 return acpi_evaluate_integer(handle, pathname, arguments, data);
589
590 *data = host_bridge_index(adev);
591 return AE_OK;
592 }
593
594 static struct pci_bus mock_pci_bus[NR_BRIDGES];
595 static struct acpi_pci_root mock_pci_root[ARRAY_SIZE(mock_pci_bus)] = {
596 [0] = {
597 .bus = &mock_pci_bus[0],
598 },
599 [1] = {
600 .bus = &mock_pci_bus[1],
601 },
602 [2] = {
603 .bus = &mock_pci_bus[2],
604 },
605 [3] = {
606 .bus = &mock_pci_bus[3],
607 },
608
609 };
610
is_mock_bus(struct pci_bus * bus)611 static bool is_mock_bus(struct pci_bus *bus)
612 {
613 int i;
614
615 for (i = 0; i < ARRAY_SIZE(mock_pci_bus); i++)
616 if (bus == &mock_pci_bus[i])
617 return true;
618 return false;
619 }
620
mock_acpi_pci_find_root(acpi_handle handle)621 static struct acpi_pci_root *mock_acpi_pci_find_root(acpi_handle handle)
622 {
623 struct acpi_device *adev = find_host_bridge(handle);
624
625 if (!adev)
626 return acpi_pci_find_root(handle);
627 return &mock_pci_root[host_bridge_index(adev)];
628 }
629
mock_cxl_setup_hdm(struct cxl_port * port,struct cxl_endpoint_dvsec_info * info)630 static struct cxl_hdm *mock_cxl_setup_hdm(struct cxl_port *port,
631 struct cxl_endpoint_dvsec_info *info)
632 {
633 struct cxl_hdm *cxlhdm = devm_kzalloc(&port->dev, sizeof(*cxlhdm), GFP_KERNEL);
634 struct device *dev = &port->dev;
635
636 if (!cxlhdm)
637 return ERR_PTR(-ENOMEM);
638
639 cxlhdm->port = port;
640 cxlhdm->interleave_mask = ~0U;
641 cxlhdm->iw_cap_mask = ~0UL;
642 dev_set_drvdata(dev, cxlhdm);
643 return cxlhdm;
644 }
645
mock_cxl_add_passthrough_decoder(struct cxl_port * port)646 static int mock_cxl_add_passthrough_decoder(struct cxl_port *port)
647 {
648 dev_err(&port->dev, "unexpected passthrough decoder for cxl_test\n");
649 return -EOPNOTSUPP;
650 }
651
652
653 struct target_map_ctx {
654 int *target_map;
655 int index;
656 int target_count;
657 };
658
map_targets(struct device * dev,void * data)659 static int map_targets(struct device *dev, void *data)
660 {
661 struct platform_device *pdev = to_platform_device(dev);
662 struct target_map_ctx *ctx = data;
663
664 ctx->target_map[ctx->index++] = pdev->id;
665
666 if (ctx->index > ctx->target_count) {
667 dev_WARN_ONCE(dev, 1, "too many targets found?\n");
668 return -ENXIO;
669 }
670
671 return 0;
672 }
673
mock_decoder_commit(struct cxl_decoder * cxld)674 static int mock_decoder_commit(struct cxl_decoder *cxld)
675 {
676 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
677 int id = cxld->id;
678
679 if (cxld->flags & CXL_DECODER_F_ENABLE)
680 return 0;
681
682 dev_dbg(&port->dev, "%s commit\n", dev_name(&cxld->dev));
683 if (cxl_num_decoders_committed(port) != id) {
684 dev_dbg(&port->dev,
685 "%s: out of order commit, expected decoder%d.%d\n",
686 dev_name(&cxld->dev), port->id,
687 cxl_num_decoders_committed(port));
688 return -EBUSY;
689 }
690
691 port->commit_end++;
692 cxld->flags |= CXL_DECODER_F_ENABLE;
693
694 return 0;
695 }
696
mock_decoder_reset(struct cxl_decoder * cxld)697 static void mock_decoder_reset(struct cxl_decoder *cxld)
698 {
699 struct cxl_port *port = to_cxl_port(cxld->dev.parent);
700 int id = cxld->id;
701
702 if ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)
703 return;
704
705 dev_dbg(&port->dev, "%s reset\n", dev_name(&cxld->dev));
706 if (port->commit_end == id)
707 cxl_port_commit_reap(cxld);
708 else
709 dev_dbg(&port->dev,
710 "%s: out of order reset, expected decoder%d.%d\n",
711 dev_name(&cxld->dev), port->id, port->commit_end);
712 cxld->flags &= ~CXL_DECODER_F_ENABLE;
713 }
714
default_mock_decoder(struct cxl_decoder * cxld)715 static void default_mock_decoder(struct cxl_decoder *cxld)
716 {
717 cxld->hpa_range = (struct range){
718 .start = 0,
719 .end = -1,
720 };
721
722 cxld->interleave_ways = 1;
723 cxld->interleave_granularity = 256;
724 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
725 cxld->commit = mock_decoder_commit;
726 cxld->reset = mock_decoder_reset;
727 }
728
first_decoder(struct device * dev,const void * data)729 static int first_decoder(struct device *dev, const void *data)
730 {
731 struct cxl_decoder *cxld;
732
733 if (!is_switch_decoder(dev))
734 return 0;
735 cxld = to_cxl_decoder(dev);
736 if (cxld->id == 0)
737 return 1;
738 return 0;
739 }
740
mock_init_hdm_decoder(struct cxl_decoder * cxld)741 static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
742 {
743 struct acpi_cedt_cfmws *window = mock_cfmws[0];
744 struct platform_device *pdev = NULL;
745 struct cxl_endpoint_decoder *cxled;
746 struct cxl_switch_decoder *cxlsd;
747 struct cxl_port *port, *iter;
748 const int size = SZ_512M;
749 struct cxl_memdev *cxlmd;
750 struct cxl_dport *dport;
751 struct device *dev;
752 bool hb0 = false;
753 u64 base;
754 int i;
755
756 if (is_endpoint_decoder(&cxld->dev)) {
757 cxled = to_cxl_endpoint_decoder(&cxld->dev);
758 cxlmd = cxled_to_memdev(cxled);
759 WARN_ON(!dev_is_platform(cxlmd->dev.parent));
760 pdev = to_platform_device(cxlmd->dev.parent);
761
762 /* check is endpoint is attach to host-bridge0 */
763 port = cxled_to_port(cxled);
764 do {
765 if (port->uport_dev == &cxl_host_bridge[0]->dev) {
766 hb0 = true;
767 break;
768 }
769 if (is_cxl_port(port->dev.parent))
770 port = to_cxl_port(port->dev.parent);
771 else
772 port = NULL;
773 } while (port);
774 port = cxled_to_port(cxled);
775 }
776
777 /*
778 * The first decoder on the first 2 devices on the first switch
779 * attached to host-bridge0 mock a fake / static RAM region. All
780 * other decoders are default disabled. Given the round robin
781 * assignment those devices are named cxl_mem.0, and cxl_mem.4.
782 *
783 * See 'cxl list -BMPu -m cxl_mem.0,cxl_mem.4'
784 */
785 if (!hb0 || pdev->id % 4 || pdev->id > 4 || cxld->id > 0) {
786 default_mock_decoder(cxld);
787 return;
788 }
789
790 base = window->base_hpa;
791 cxld->hpa_range = (struct range) {
792 .start = base,
793 .end = base + size - 1,
794 };
795
796 cxld->interleave_ways = 2;
797 eig_to_granularity(window->granularity, &cxld->interleave_granularity);
798 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
799 cxld->flags = CXL_DECODER_F_ENABLE;
800 cxled->state = CXL_DECODER_STATE_AUTO;
801 port->commit_end = cxld->id;
802 devm_cxl_dpa_reserve(cxled, 0, size / cxld->interleave_ways, 0);
803 cxld->commit = mock_decoder_commit;
804 cxld->reset = mock_decoder_reset;
805
806 /*
807 * Now that endpoint decoder is set up, walk up the hierarchy
808 * and setup the switch and root port decoders targeting @cxlmd.
809 */
810 iter = port;
811 for (i = 0; i < 2; i++) {
812 dport = iter->parent_dport;
813 iter = dport->port;
814 dev = device_find_child(&iter->dev, NULL, first_decoder);
815 /*
816 * Ancestor ports are guaranteed to be enumerated before
817 * @port, and all ports have at least one decoder.
818 */
819 if (WARN_ON(!dev))
820 continue;
821 cxlsd = to_cxl_switch_decoder(dev);
822 if (i == 0) {
823 /* put cxl_mem.4 second in the decode order */
824 if (pdev->id == 4)
825 cxlsd->target[1] = dport;
826 else
827 cxlsd->target[0] = dport;
828 } else
829 cxlsd->target[0] = dport;
830 cxld = &cxlsd->cxld;
831 cxld->target_type = CXL_DECODER_HOSTONLYMEM;
832 cxld->flags = CXL_DECODER_F_ENABLE;
833 iter->commit_end = 0;
834 /*
835 * Switch targets 2 endpoints, while host bridge targets
836 * one root port
837 */
838 if (i == 0)
839 cxld->interleave_ways = 2;
840 else
841 cxld->interleave_ways = 1;
842 cxld->interleave_granularity = 4096;
843 cxld->hpa_range = (struct range) {
844 .start = base,
845 .end = base + size - 1,
846 };
847 put_device(dev);
848 }
849 }
850
mock_cxl_enumerate_decoders(struct cxl_hdm * cxlhdm,struct cxl_endpoint_dvsec_info * info)851 static int mock_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
852 struct cxl_endpoint_dvsec_info *info)
853 {
854 struct cxl_port *port = cxlhdm->port;
855 struct cxl_port *parent_port = to_cxl_port(port->dev.parent);
856 int target_count, i;
857
858 if (is_cxl_endpoint(port))
859 target_count = 0;
860 else if (is_cxl_root(parent_port))
861 target_count = NR_CXL_ROOT_PORTS;
862 else
863 target_count = NR_CXL_SWITCH_PORTS;
864
865 for (i = 0; i < NR_CXL_PORT_DECODERS; i++) {
866 int target_map[CXL_DECODER_MAX_INTERLEAVE] = { 0 };
867 struct target_map_ctx ctx = {
868 .target_map = target_map,
869 .target_count = target_count,
870 };
871 struct cxl_decoder *cxld;
872 int rc;
873
874 if (target_count) {
875 struct cxl_switch_decoder *cxlsd;
876
877 cxlsd = cxl_switch_decoder_alloc(port, target_count);
878 if (IS_ERR(cxlsd)) {
879 dev_warn(&port->dev,
880 "Failed to allocate the decoder\n");
881 return PTR_ERR(cxlsd);
882 }
883 cxld = &cxlsd->cxld;
884 } else {
885 struct cxl_endpoint_decoder *cxled;
886
887 cxled = cxl_endpoint_decoder_alloc(port);
888
889 if (IS_ERR(cxled)) {
890 dev_warn(&port->dev,
891 "Failed to allocate the decoder\n");
892 return PTR_ERR(cxled);
893 }
894 cxld = &cxled->cxld;
895 }
896
897 mock_init_hdm_decoder(cxld);
898
899 if (target_count) {
900 rc = device_for_each_child(port->uport_dev, &ctx,
901 map_targets);
902 if (rc) {
903 put_device(&cxld->dev);
904 return rc;
905 }
906 }
907
908 rc = cxl_decoder_add_locked(cxld, target_map);
909 if (rc) {
910 put_device(&cxld->dev);
911 dev_err(&port->dev, "Failed to add decoder\n");
912 return rc;
913 }
914
915 rc = cxl_decoder_autoremove(&port->dev, cxld);
916 if (rc)
917 return rc;
918 dev_dbg(&cxld->dev, "Added to port %s\n", dev_name(&port->dev));
919 }
920
921 return 0;
922 }
923
mock_cxl_port_enumerate_dports(struct cxl_port * port)924 static int mock_cxl_port_enumerate_dports(struct cxl_port *port)
925 {
926 struct platform_device **array;
927 int i, array_size;
928
929 if (port->depth == 1) {
930 if (is_multi_bridge(port->uport_dev)) {
931 array_size = ARRAY_SIZE(cxl_root_port);
932 array = cxl_root_port;
933 } else if (is_single_bridge(port->uport_dev)) {
934 array_size = ARRAY_SIZE(cxl_root_single);
935 array = cxl_root_single;
936 } else {
937 dev_dbg(&port->dev, "%s: unknown bridge type\n",
938 dev_name(port->uport_dev));
939 return -ENXIO;
940 }
941 } else if (port->depth == 2) {
942 struct cxl_port *parent = to_cxl_port(port->dev.parent);
943
944 if (is_multi_bridge(parent->uport_dev)) {
945 array_size = ARRAY_SIZE(cxl_switch_dport);
946 array = cxl_switch_dport;
947 } else if (is_single_bridge(parent->uport_dev)) {
948 array_size = ARRAY_SIZE(cxl_swd_single);
949 array = cxl_swd_single;
950 } else {
951 dev_dbg(&port->dev, "%s: unknown bridge type\n",
952 dev_name(port->uport_dev));
953 return -ENXIO;
954 }
955 } else {
956 dev_WARN_ONCE(&port->dev, 1, "unexpected depth %d\n",
957 port->depth);
958 return -ENXIO;
959 }
960
961 for (i = 0; i < array_size; i++) {
962 struct platform_device *pdev = array[i];
963 struct cxl_dport *dport;
964
965 if (pdev->dev.parent != port->uport_dev) {
966 dev_dbg(&port->dev, "%s: mismatch parent %s\n",
967 dev_name(port->uport_dev),
968 dev_name(pdev->dev.parent));
969 continue;
970 }
971
972 dport = devm_cxl_add_dport(port, &pdev->dev, pdev->id,
973 CXL_RESOURCE_NONE);
974
975 if (IS_ERR(dport))
976 return PTR_ERR(dport);
977 }
978
979 return 0;
980 }
981
982 /*
983 * Faking the cxl_dpa_perf for the memdev when appropriate.
984 */
dpa_perf_setup(struct cxl_port * endpoint,struct range * range,struct cxl_dpa_perf * dpa_perf)985 static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
986 struct cxl_dpa_perf *dpa_perf)
987 {
988 dpa_perf->qos_class = FAKE_QTG_ID;
989 dpa_perf->dpa_range = *range;
990 for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
991 dpa_perf->coord[i].read_latency = 500;
992 dpa_perf->coord[i].write_latency = 500;
993 dpa_perf->coord[i].read_bandwidth = 1000;
994 dpa_perf->coord[i].write_bandwidth = 1000;
995 }
996 }
997
mock_cxl_endpoint_parse_cdat(struct cxl_port * port)998 static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
999 {
1000 struct cxl_root *cxl_root __free(put_cxl_root) =
1001 find_cxl_root(port);
1002 struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
1003 struct cxl_dev_state *cxlds = cxlmd->cxlds;
1004 struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
1005
1006 if (!cxl_root)
1007 return;
1008
1009 for (int i = 0; i < cxlds->nr_partitions; i++) {
1010 struct resource *res = &cxlds->part[i].res;
1011 struct cxl_dpa_perf *perf = &cxlds->part[i].perf;
1012 struct range range = {
1013 .start = res->start,
1014 .end = res->end,
1015 };
1016
1017 dpa_perf_setup(port, &range, perf);
1018 }
1019
1020 cxl_memdev_update_perf(cxlmd);
1021
1022 /*
1023 * This function is here to only test the topology iterator. It serves
1024 * no other purpose.
1025 */
1026 cxl_endpoint_get_perf_coordinates(port, ep_c);
1027 }
1028
1029 static struct cxl_mock_ops cxl_mock_ops = {
1030 .is_mock_adev = is_mock_adev,
1031 .is_mock_bridge = is_mock_bridge,
1032 .is_mock_bus = is_mock_bus,
1033 .is_mock_port = is_mock_port,
1034 .is_mock_dev = is_mock_dev,
1035 .acpi_table_parse_cedt = mock_acpi_table_parse_cedt,
1036 .acpi_evaluate_integer = mock_acpi_evaluate_integer,
1037 .acpi_pci_find_root = mock_acpi_pci_find_root,
1038 .devm_cxl_port_enumerate_dports = mock_cxl_port_enumerate_dports,
1039 .devm_cxl_setup_hdm = mock_cxl_setup_hdm,
1040 .devm_cxl_add_passthrough_decoder = mock_cxl_add_passthrough_decoder,
1041 .devm_cxl_enumerate_decoders = mock_cxl_enumerate_decoders,
1042 .cxl_endpoint_parse_cdat = mock_cxl_endpoint_parse_cdat,
1043 .list = LIST_HEAD_INIT(cxl_mock_ops.list),
1044 };
1045
mock_companion(struct acpi_device * adev,struct device * dev)1046 static void mock_companion(struct acpi_device *adev, struct device *dev)
1047 {
1048 device_initialize(&adev->dev);
1049 fwnode_init(&adev->fwnode, NULL);
1050 dev->fwnode = &adev->fwnode;
1051 adev->fwnode.dev = dev;
1052 }
1053
1054 #ifndef SZ_64G
1055 #define SZ_64G (SZ_32G * 2)
1056 #endif
1057
cxl_rch_topo_init(void)1058 static __init int cxl_rch_topo_init(void)
1059 {
1060 int rc, i;
1061
1062 for (i = 0; i < ARRAY_SIZE(cxl_rch); i++) {
1063 int idx = NR_CXL_HOST_BRIDGES + NR_CXL_SINGLE_HOST + i;
1064 struct acpi_device *adev = &host_bridge[idx];
1065 struct platform_device *pdev;
1066
1067 pdev = platform_device_alloc("cxl_host_bridge", idx);
1068 if (!pdev)
1069 goto err_bridge;
1070
1071 mock_companion(adev, &pdev->dev);
1072 rc = platform_device_add(pdev);
1073 if (rc) {
1074 platform_device_put(pdev);
1075 goto err_bridge;
1076 }
1077
1078 cxl_rch[i] = pdev;
1079 mock_pci_bus[idx].bridge = &pdev->dev;
1080 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1081 "firmware_node");
1082 if (rc)
1083 goto err_bridge;
1084 }
1085
1086 return 0;
1087
1088 err_bridge:
1089 for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1090 struct platform_device *pdev = cxl_rch[i];
1091
1092 if (!pdev)
1093 continue;
1094 sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1095 platform_device_unregister(cxl_rch[i]);
1096 }
1097
1098 return rc;
1099 }
1100
cxl_rch_topo_exit(void)1101 static void cxl_rch_topo_exit(void)
1102 {
1103 int i;
1104
1105 for (i = ARRAY_SIZE(cxl_rch) - 1; i >= 0; i--) {
1106 struct platform_device *pdev = cxl_rch[i];
1107
1108 if (!pdev)
1109 continue;
1110 sysfs_remove_link(&pdev->dev.kobj, "firmware_node");
1111 platform_device_unregister(cxl_rch[i]);
1112 }
1113 }
1114
cxl_single_topo_init(void)1115 static __init int cxl_single_topo_init(void)
1116 {
1117 int i, rc;
1118
1119 for (i = 0; i < ARRAY_SIZE(cxl_hb_single); i++) {
1120 struct acpi_device *adev =
1121 &host_bridge[NR_CXL_HOST_BRIDGES + i];
1122 struct platform_device *pdev;
1123
1124 pdev = platform_device_alloc("cxl_host_bridge",
1125 NR_CXL_HOST_BRIDGES + i);
1126 if (!pdev)
1127 goto err_bridge;
1128
1129 mock_companion(adev, &pdev->dev);
1130 rc = platform_device_add(pdev);
1131 if (rc) {
1132 platform_device_put(pdev);
1133 goto err_bridge;
1134 }
1135
1136 cxl_hb_single[i] = pdev;
1137 mock_pci_bus[i + NR_CXL_HOST_BRIDGES].bridge = &pdev->dev;
1138 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1139 "physical_node");
1140 if (rc)
1141 goto err_bridge;
1142 }
1143
1144 for (i = 0; i < ARRAY_SIZE(cxl_root_single); i++) {
1145 struct platform_device *bridge =
1146 cxl_hb_single[i % ARRAY_SIZE(cxl_hb_single)];
1147 struct platform_device *pdev;
1148
1149 pdev = platform_device_alloc("cxl_root_port",
1150 NR_MULTI_ROOT + i);
1151 if (!pdev)
1152 goto err_port;
1153 pdev->dev.parent = &bridge->dev;
1154
1155 rc = platform_device_add(pdev);
1156 if (rc) {
1157 platform_device_put(pdev);
1158 goto err_port;
1159 }
1160 cxl_root_single[i] = pdev;
1161 }
1162
1163 for (i = 0; i < ARRAY_SIZE(cxl_swu_single); i++) {
1164 struct platform_device *root_port = cxl_root_single[i];
1165 struct platform_device *pdev;
1166
1167 pdev = platform_device_alloc("cxl_switch_uport",
1168 NR_MULTI_ROOT + i);
1169 if (!pdev)
1170 goto err_uport;
1171 pdev->dev.parent = &root_port->dev;
1172
1173 rc = platform_device_add(pdev);
1174 if (rc) {
1175 platform_device_put(pdev);
1176 goto err_uport;
1177 }
1178 cxl_swu_single[i] = pdev;
1179 }
1180
1181 for (i = 0; i < ARRAY_SIZE(cxl_swd_single); i++) {
1182 struct platform_device *uport =
1183 cxl_swu_single[i % ARRAY_SIZE(cxl_swu_single)];
1184 struct platform_device *pdev;
1185
1186 pdev = platform_device_alloc("cxl_switch_dport",
1187 i + NR_MEM_MULTI);
1188 if (!pdev)
1189 goto err_dport;
1190 pdev->dev.parent = &uport->dev;
1191
1192 rc = platform_device_add(pdev);
1193 if (rc) {
1194 platform_device_put(pdev);
1195 goto err_dport;
1196 }
1197 cxl_swd_single[i] = pdev;
1198 }
1199
1200 return 0;
1201
1202 err_dport:
1203 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1204 platform_device_unregister(cxl_swd_single[i]);
1205 err_uport:
1206 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1207 platform_device_unregister(cxl_swu_single[i]);
1208 err_port:
1209 for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1210 platform_device_unregister(cxl_root_single[i]);
1211 err_bridge:
1212 for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1213 struct platform_device *pdev = cxl_hb_single[i];
1214
1215 if (!pdev)
1216 continue;
1217 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1218 platform_device_unregister(cxl_hb_single[i]);
1219 }
1220
1221 return rc;
1222 }
1223
cxl_single_topo_exit(void)1224 static void cxl_single_topo_exit(void)
1225 {
1226 int i;
1227
1228 for (i = ARRAY_SIZE(cxl_swd_single) - 1; i >= 0; i--)
1229 platform_device_unregister(cxl_swd_single[i]);
1230 for (i = ARRAY_SIZE(cxl_swu_single) - 1; i >= 0; i--)
1231 platform_device_unregister(cxl_swu_single[i]);
1232 for (i = ARRAY_SIZE(cxl_root_single) - 1; i >= 0; i--)
1233 platform_device_unregister(cxl_root_single[i]);
1234 for (i = ARRAY_SIZE(cxl_hb_single) - 1; i >= 0; i--) {
1235 struct platform_device *pdev = cxl_hb_single[i];
1236
1237 if (!pdev)
1238 continue;
1239 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1240 platform_device_unregister(cxl_hb_single[i]);
1241 }
1242 }
1243
cxl_mem_exit(void)1244 static void cxl_mem_exit(void)
1245 {
1246 int i;
1247
1248 for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1249 platform_device_unregister(cxl_rcd[i]);
1250 for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1251 platform_device_unregister(cxl_mem_single[i]);
1252 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1253 platform_device_unregister(cxl_mem[i]);
1254 }
1255
cxl_mem_init(void)1256 static int cxl_mem_init(void)
1257 {
1258 int i, rc;
1259
1260 for (i = 0; i < ARRAY_SIZE(cxl_mem); i++) {
1261 struct platform_device *dport = cxl_switch_dport[i];
1262 struct platform_device *pdev;
1263
1264 pdev = platform_device_alloc("cxl_mem", i);
1265 if (!pdev)
1266 goto err_mem;
1267 pdev->dev.parent = &dport->dev;
1268 set_dev_node(&pdev->dev, i % 2);
1269
1270 rc = platform_device_add(pdev);
1271 if (rc) {
1272 platform_device_put(pdev);
1273 goto err_mem;
1274 }
1275 cxl_mem[i] = pdev;
1276 }
1277
1278 for (i = 0; i < ARRAY_SIZE(cxl_mem_single); i++) {
1279 struct platform_device *dport = cxl_swd_single[i];
1280 struct platform_device *pdev;
1281
1282 pdev = platform_device_alloc("cxl_mem", NR_MEM_MULTI + i);
1283 if (!pdev)
1284 goto err_single;
1285 pdev->dev.parent = &dport->dev;
1286 set_dev_node(&pdev->dev, i % 2);
1287
1288 rc = platform_device_add(pdev);
1289 if (rc) {
1290 platform_device_put(pdev);
1291 goto err_single;
1292 }
1293 cxl_mem_single[i] = pdev;
1294 }
1295
1296 for (i = 0; i < ARRAY_SIZE(cxl_rcd); i++) {
1297 int idx = NR_MEM_MULTI + NR_MEM_SINGLE + i;
1298 struct platform_device *rch = cxl_rch[i];
1299 struct platform_device *pdev;
1300
1301 pdev = platform_device_alloc("cxl_rcd", idx);
1302 if (!pdev)
1303 goto err_rcd;
1304 pdev->dev.parent = &rch->dev;
1305 set_dev_node(&pdev->dev, i % 2);
1306
1307 rc = platform_device_add(pdev);
1308 if (rc) {
1309 platform_device_put(pdev);
1310 goto err_rcd;
1311 }
1312 cxl_rcd[i] = pdev;
1313 }
1314
1315 return 0;
1316
1317 err_rcd:
1318 for (i = ARRAY_SIZE(cxl_rcd) - 1; i >= 0; i--)
1319 platform_device_unregister(cxl_rcd[i]);
1320 err_single:
1321 for (i = ARRAY_SIZE(cxl_mem_single) - 1; i >= 0; i--)
1322 platform_device_unregister(cxl_mem_single[i]);
1323 err_mem:
1324 for (i = ARRAY_SIZE(cxl_mem) - 1; i >= 0; i--)
1325 platform_device_unregister(cxl_mem[i]);
1326 return rc;
1327 }
1328
cxl_test_init(void)1329 static __init int cxl_test_init(void)
1330 {
1331 int rc, i;
1332 struct range mappable;
1333
1334 cxl_acpi_test();
1335 cxl_core_test();
1336 cxl_mem_test();
1337 cxl_pmem_test();
1338 cxl_port_test();
1339
1340 register_cxl_mock_ops(&cxl_mock_ops);
1341
1342 cxl_mock_pool = gen_pool_create(ilog2(SZ_2M), NUMA_NO_NODE);
1343 if (!cxl_mock_pool) {
1344 rc = -ENOMEM;
1345 goto err_gen_pool_create;
1346 }
1347 mappable = mhp_get_pluggable_range(true);
1348
1349 rc = gen_pool_add(cxl_mock_pool,
1350 min(iomem_resource.end + 1 - SZ_64G,
1351 mappable.end + 1 - SZ_64G),
1352 SZ_64G, NUMA_NO_NODE);
1353 if (rc)
1354 goto err_gen_pool_add;
1355
1356 if (interleave_arithmetic == 1) {
1357 cfmws_start = CFMWS_XOR_ARRAY_START;
1358 cfmws_end = CFMWS_XOR_ARRAY_END;
1359 } else {
1360 cfmws_start = CFMWS_MOD_ARRAY_START;
1361 cfmws_end = CFMWS_MOD_ARRAY_END;
1362 }
1363
1364 rc = populate_cedt();
1365 if (rc)
1366 goto err_populate;
1367
1368 for (i = 0; i < ARRAY_SIZE(cxl_host_bridge); i++) {
1369 struct acpi_device *adev = &host_bridge[i];
1370 struct platform_device *pdev;
1371
1372 pdev = platform_device_alloc("cxl_host_bridge", i);
1373 if (!pdev)
1374 goto err_bridge;
1375
1376 mock_companion(adev, &pdev->dev);
1377 rc = platform_device_add(pdev);
1378 if (rc) {
1379 platform_device_put(pdev);
1380 goto err_bridge;
1381 }
1382
1383 cxl_host_bridge[i] = pdev;
1384 mock_pci_bus[i].bridge = &pdev->dev;
1385 rc = sysfs_create_link(&pdev->dev.kobj, &pdev->dev.kobj,
1386 "physical_node");
1387 if (rc)
1388 goto err_bridge;
1389 }
1390
1391 for (i = 0; i < ARRAY_SIZE(cxl_root_port); i++) {
1392 struct platform_device *bridge =
1393 cxl_host_bridge[i % ARRAY_SIZE(cxl_host_bridge)];
1394 struct platform_device *pdev;
1395
1396 pdev = platform_device_alloc("cxl_root_port", i);
1397 if (!pdev)
1398 goto err_port;
1399 pdev->dev.parent = &bridge->dev;
1400
1401 rc = platform_device_add(pdev);
1402 if (rc) {
1403 platform_device_put(pdev);
1404 goto err_port;
1405 }
1406 cxl_root_port[i] = pdev;
1407 }
1408
1409 BUILD_BUG_ON(ARRAY_SIZE(cxl_switch_uport) != ARRAY_SIZE(cxl_root_port));
1410 for (i = 0; i < ARRAY_SIZE(cxl_switch_uport); i++) {
1411 struct platform_device *root_port = cxl_root_port[i];
1412 struct platform_device *pdev;
1413
1414 pdev = platform_device_alloc("cxl_switch_uport", i);
1415 if (!pdev)
1416 goto err_uport;
1417 pdev->dev.parent = &root_port->dev;
1418
1419 rc = platform_device_add(pdev);
1420 if (rc) {
1421 platform_device_put(pdev);
1422 goto err_uport;
1423 }
1424 cxl_switch_uport[i] = pdev;
1425 }
1426
1427 for (i = 0; i < ARRAY_SIZE(cxl_switch_dport); i++) {
1428 struct platform_device *uport =
1429 cxl_switch_uport[i % ARRAY_SIZE(cxl_switch_uport)];
1430 struct platform_device *pdev;
1431
1432 pdev = platform_device_alloc("cxl_switch_dport", i);
1433 if (!pdev)
1434 goto err_dport;
1435 pdev->dev.parent = &uport->dev;
1436
1437 rc = platform_device_add(pdev);
1438 if (rc) {
1439 platform_device_put(pdev);
1440 goto err_dport;
1441 }
1442 cxl_switch_dport[i] = pdev;
1443 }
1444
1445 rc = cxl_single_topo_init();
1446 if (rc)
1447 goto err_dport;
1448
1449 rc = cxl_rch_topo_init();
1450 if (rc)
1451 goto err_single;
1452
1453 cxl_acpi = platform_device_alloc("cxl_acpi", 0);
1454 if (!cxl_acpi)
1455 goto err_rch;
1456
1457 mock_companion(&acpi0017_mock, &cxl_acpi->dev);
1458 acpi0017_mock.dev.bus = &platform_bus_type;
1459
1460 rc = platform_device_add(cxl_acpi);
1461 if (rc)
1462 goto err_root;
1463
1464 rc = cxl_mem_init();
1465 if (rc)
1466 goto err_root;
1467
1468 return 0;
1469
1470 err_root:
1471 platform_device_put(cxl_acpi);
1472 err_rch:
1473 cxl_rch_topo_exit();
1474 err_single:
1475 cxl_single_topo_exit();
1476 err_dport:
1477 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1478 platform_device_unregister(cxl_switch_dport[i]);
1479 err_uport:
1480 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1481 platform_device_unregister(cxl_switch_uport[i]);
1482 err_port:
1483 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1484 platform_device_unregister(cxl_root_port[i]);
1485 err_bridge:
1486 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1487 struct platform_device *pdev = cxl_host_bridge[i];
1488
1489 if (!pdev)
1490 continue;
1491 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1492 platform_device_unregister(cxl_host_bridge[i]);
1493 }
1494 err_populate:
1495 depopulate_all_mock_resources();
1496 err_gen_pool_add:
1497 gen_pool_destroy(cxl_mock_pool);
1498 err_gen_pool_create:
1499 unregister_cxl_mock_ops(&cxl_mock_ops);
1500 return rc;
1501 }
1502
cxl_test_exit(void)1503 static __exit void cxl_test_exit(void)
1504 {
1505 int i;
1506
1507 cxl_mem_exit();
1508 platform_device_unregister(cxl_acpi);
1509 cxl_rch_topo_exit();
1510 cxl_single_topo_exit();
1511 for (i = ARRAY_SIZE(cxl_switch_dport) - 1; i >= 0; i--)
1512 platform_device_unregister(cxl_switch_dport[i]);
1513 for (i = ARRAY_SIZE(cxl_switch_uport) - 1; i >= 0; i--)
1514 platform_device_unregister(cxl_switch_uport[i]);
1515 for (i = ARRAY_SIZE(cxl_root_port) - 1; i >= 0; i--)
1516 platform_device_unregister(cxl_root_port[i]);
1517 for (i = ARRAY_SIZE(cxl_host_bridge) - 1; i >= 0; i--) {
1518 struct platform_device *pdev = cxl_host_bridge[i];
1519
1520 if (!pdev)
1521 continue;
1522 sysfs_remove_link(&pdev->dev.kobj, "physical_node");
1523 platform_device_unregister(cxl_host_bridge[i]);
1524 }
1525 depopulate_all_mock_resources();
1526 gen_pool_destroy(cxl_mock_pool);
1527 unregister_cxl_mock_ops(&cxl_mock_ops);
1528 }
1529
1530 module_param(interleave_arithmetic, int, 0444);
1531 MODULE_PARM_DESC(interleave_arithmetic, "Modulo:0, XOR:1");
1532 module_init(cxl_test_init);
1533 module_exit(cxl_test_exit);
1534 MODULE_LICENSE("GPL v2");
1535 MODULE_DESCRIPTION("cxl_test: setup module");
1536 MODULE_IMPORT_NS("ACPI");
1537 MODULE_IMPORT_NS("CXL");
1538