1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24
25 #include <linux/pci_regs.h>
26
27 #include <uapi/linux/pcitest.h>
28
29 #define DRV_MODULE_NAME "pci-endpoint-test"
30
31 #define PCI_ENDPOINT_TEST_MAGIC 0x0
32
33 #define PCI_ENDPOINT_TEST_COMMAND 0x4
34 #define COMMAND_RAISE_INTX_IRQ BIT(0)
35 #define COMMAND_RAISE_MSI_IRQ BIT(1)
36 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
37 #define COMMAND_READ BIT(3)
38 #define COMMAND_WRITE BIT(4)
39 #define COMMAND_COPY BIT(5)
40 #define COMMAND_ENABLE_DOORBELL BIT(6)
41 #define COMMAND_DISABLE_DOORBELL BIT(7)
42 #define COMMAND_BAR_SUBRANGE_SETUP BIT(8)
43 #define COMMAND_BAR_SUBRANGE_CLEAR BIT(9)
44
45 #define PCI_ENDPOINT_TEST_STATUS 0x8
46 #define STATUS_READ_SUCCESS BIT(0)
47 #define STATUS_READ_FAIL BIT(1)
48 #define STATUS_WRITE_SUCCESS BIT(2)
49 #define STATUS_WRITE_FAIL BIT(3)
50 #define STATUS_COPY_SUCCESS BIT(4)
51 #define STATUS_COPY_FAIL BIT(5)
52 #define STATUS_IRQ_RAISED BIT(6)
53 #define STATUS_SRC_ADDR_INVALID BIT(7)
54 #define STATUS_DST_ADDR_INVALID BIT(8)
55 #define STATUS_DOORBELL_SUCCESS BIT(9)
56 #define STATUS_DOORBELL_ENABLE_SUCCESS BIT(10)
57 #define STATUS_DOORBELL_ENABLE_FAIL BIT(11)
58 #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12)
59 #define STATUS_DOORBELL_DISABLE_FAIL BIT(13)
60 #define STATUS_BAR_SUBRANGE_SETUP_SUCCESS BIT(14)
61 #define STATUS_BAR_SUBRANGE_SETUP_FAIL BIT(15)
62 #define STATUS_BAR_SUBRANGE_CLEAR_SUCCESS BIT(16)
63 #define STATUS_BAR_SUBRANGE_CLEAR_FAIL BIT(17)
64
65 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
66 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
67
68 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
69 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
70
71 #define PCI_ENDPOINT_TEST_SIZE 0x1c
72 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
73
74 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
75 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
76
77 #define PCI_ENDPOINT_TEST_FLAGS 0x2c
78
79 #define FLAG_USE_DMA BIT(0)
80
81 #define PCI_ENDPOINT_TEST_CAPS 0x30
82 #define CAP_UNALIGNED_ACCESS BIT(0)
83 #define CAP_MSI BIT(1)
84 #define CAP_MSIX BIT(2)
85 #define CAP_INTX BIT(3)
86 #define CAP_SUBRANGE_MAPPING BIT(4)
87
88 #define PCI_ENDPOINT_TEST_DB_BAR 0x34
89 #define PCI_ENDPOINT_TEST_DB_OFFSET 0x38
90 #define PCI_ENDPOINT_TEST_DB_DATA 0x3c
91
92 #define PCI_DEVICE_ID_TI_AM654 0xb00c
93 #define PCI_DEVICE_ID_TI_J7200 0xb00f
94 #define PCI_DEVICE_ID_TI_AM64 0xb010
95 #define PCI_DEVICE_ID_TI_J721S2 0xb013
96 #define PCI_DEVICE_ID_LS1088A 0x80c0
97 #define PCI_DEVICE_ID_IMX8 0x0808
98
99 #define is_am654_pci_dev(pdev) \
100 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
101
102 #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
103 #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
104 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
105 #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
106 #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
107
108 #define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588
109
110 #define PCI_ENDPOINT_TEST_BAR_SUBRANGE_NSUB 2
111
112 static DEFINE_IDA(pci_endpoint_test_ida);
113
114 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
115 miscdev)
116
117 enum pci_barno {
118 BAR_0,
119 BAR_1,
120 BAR_2,
121 BAR_3,
122 BAR_4,
123 BAR_5,
124 NO_BAR = -1,
125 };
126
127 struct pci_endpoint_test {
128 struct pci_dev *pdev;
129 void __iomem *base;
130 void __iomem *bar[PCI_STD_NUM_BARS];
131 struct completion irq_raised;
132 int last_irq;
133 int num_irqs;
134 int irq_type;
135 /* mutex to protect the ioctls */
136 struct mutex mutex;
137 struct miscdevice miscdev;
138 enum pci_barno test_reg_bar;
139 size_t alignment;
140 u32 ep_caps;
141 const char *name;
142 };
143
144 struct pci_endpoint_test_data {
145 enum pci_barno test_reg_bar;
146 size_t alignment;
147 };
148
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)149 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
150 u32 offset)
151 {
152 return readl(test->base + offset);
153 }
154
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)155 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
156 u32 offset, u32 value)
157 {
158 writel(value, test->base + offset);
159 }
160
pci_endpoint_test_irqhandler(int irq,void * dev_id)161 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
162 {
163 struct pci_endpoint_test *test = dev_id;
164 u32 reg;
165
166 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
167 if (reg & STATUS_IRQ_RAISED) {
168 test->last_irq = irq;
169 complete(&test->irq_raised);
170 }
171
172 return IRQ_HANDLED;
173 }
174
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)175 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
176 {
177 struct pci_dev *pdev = test->pdev;
178
179 pci_free_irq_vectors(pdev);
180 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
181 }
182
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)183 static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
184 int type)
185 {
186 int irq;
187 struct pci_dev *pdev = test->pdev;
188 struct device *dev = &pdev->dev;
189
190 switch (type) {
191 case PCITEST_IRQ_TYPE_INTX:
192 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
193 if (irq < 0) {
194 dev_err(dev, "Failed to get Legacy interrupt\n");
195 return irq;
196 }
197
198 break;
199 case PCITEST_IRQ_TYPE_MSI:
200 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
201 if (irq < 0) {
202 dev_err(dev, "Failed to get MSI interrupts\n");
203 return irq;
204 }
205
206 break;
207 case PCITEST_IRQ_TYPE_MSIX:
208 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
209 if (irq < 0) {
210 dev_err(dev, "Failed to get MSI-X interrupts\n");
211 return irq;
212 }
213
214 break;
215 default:
216 dev_err(dev, "Invalid IRQ type selected\n");
217 return -EINVAL;
218 }
219
220 test->irq_type = type;
221 test->num_irqs = irq;
222
223 return 0;
224 }
225
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)226 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
227 {
228 int i;
229 struct pci_dev *pdev = test->pdev;
230
231 for (i = 0; i < test->num_irqs; i++)
232 free_irq(pci_irq_vector(pdev, i), test);
233
234 test->num_irqs = 0;
235 }
236
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)237 static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
238 {
239 int i;
240 int ret;
241 struct pci_dev *pdev = test->pdev;
242 struct device *dev = &pdev->dev;
243
244 for (i = 0; i < test->num_irqs; i++) {
245 ret = request_irq(pci_irq_vector(pdev, i),
246 pci_endpoint_test_irqhandler, IRQF_SHARED,
247 test->name, test);
248 if (ret)
249 goto fail;
250 }
251
252 return 0;
253
254 fail:
255 switch (test->irq_type) {
256 case PCITEST_IRQ_TYPE_INTX:
257 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
258 pci_irq_vector(pdev, i));
259 break;
260 case PCITEST_IRQ_TYPE_MSI:
261 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
262 pci_irq_vector(pdev, i),
263 i + 1);
264 break;
265 case PCITEST_IRQ_TYPE_MSIX:
266 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
267 pci_irq_vector(pdev, i),
268 i + 1);
269 break;
270 }
271
272 test->num_irqs = i;
273 pci_endpoint_test_release_irq(test);
274
275 return ret;
276 }
277
278 static const u32 bar_test_pattern[] = {
279 0xA0A0A0A0,
280 0xA1A1A1A1,
281 0xA2A2A2A2,
282 0xA3A3A3A3,
283 0xA4A4A4A4,
284 0xA5A5A5A5,
285 };
286
pci_endpoint_test_bar_memcmp(struct pci_endpoint_test * test,enum pci_barno barno,resource_size_t offset,void * write_buf,void * read_buf,int size)287 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
288 enum pci_barno barno,
289 resource_size_t offset, void *write_buf,
290 void *read_buf, int size)
291 {
292 memset(write_buf, bar_test_pattern[barno], size);
293 memcpy_toio(test->bar[barno] + offset, write_buf, size);
294
295 memcpy_fromio(read_buf, test->bar[barno] + offset, size);
296
297 return memcmp(write_buf, read_buf, size);
298 }
299
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)300 static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
301 enum pci_barno barno)
302 {
303 resource_size_t bar_size, offset = 0;
304 void *write_buf __free(kfree) = NULL;
305 void *read_buf __free(kfree) = NULL;
306 struct pci_dev *pdev = test->pdev;
307 int buf_size;
308
309 bar_size = pci_resource_len(pdev, barno);
310 if (!bar_size)
311 return -ENODATA;
312
313 if (!test->bar[barno])
314 return -ENOMEM;
315
316 if (barno == test->test_reg_bar)
317 bar_size = 0x4;
318
319 /*
320 * Allocate a buffer of max size 1MB, and reuse that buffer while
321 * iterating over the whole BAR size (which might be much larger).
322 */
323 buf_size = min(SZ_1M, bar_size);
324
325 write_buf = kmalloc(buf_size, GFP_KERNEL);
326 if (!write_buf)
327 return -ENOMEM;
328
329 read_buf = kmalloc(buf_size, GFP_KERNEL);
330 if (!read_buf)
331 return -ENOMEM;
332
333 while (offset < bar_size) {
334 if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
335 read_buf, buf_size))
336 return -EIO;
337 offset += buf_size;
338 }
339
340 return 0;
341 }
342
bar_test_pattern_with_offset(enum pci_barno barno,int offset)343 static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
344 {
345 u32 val;
346
347 /* Keep the BAR pattern in the top byte. */
348 val = bar_test_pattern[barno] & 0xff000000;
349 /* Store the (partial) offset in the remaining bytes. */
350 val |= offset & 0x00ffffff;
351
352 return val;
353 }
354
pci_endpoint_test_bars_write_bar(struct pci_endpoint_test * test,enum pci_barno barno)355 static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
356 enum pci_barno barno)
357 {
358 struct pci_dev *pdev = test->pdev;
359 int j, size;
360
361 size = pci_resource_len(pdev, barno);
362
363 if (barno == test->test_reg_bar)
364 size = 0x4;
365
366 for (j = 0; j < size; j += 4)
367 writel_relaxed(bar_test_pattern_with_offset(barno, j),
368 test->bar[barno] + j);
369 }
370
pci_endpoint_test_bars_read_bar(struct pci_endpoint_test * test,enum pci_barno barno)371 static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
372 enum pci_barno barno)
373 {
374 struct pci_dev *pdev = test->pdev;
375 struct device *dev = &pdev->dev;
376 int j, size;
377 u32 val;
378
379 size = pci_resource_len(pdev, barno);
380
381 if (barno == test->test_reg_bar)
382 size = 0x4;
383
384 for (j = 0; j < size; j += 4) {
385 u32 expected = bar_test_pattern_with_offset(barno, j);
386
387 val = readl_relaxed(test->bar[barno] + j);
388 if (val != expected) {
389 dev_err(dev,
390 "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
391 barno, j, val, expected);
392 return -EIO;
393 }
394 }
395
396 return 0;
397 }
398
pci_endpoint_test_bars(struct pci_endpoint_test * test)399 static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
400 {
401 enum pci_barno bar;
402 int ret;
403
404 /* Write all BARs in order (without reading). */
405 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
406 if (test->bar[bar])
407 pci_endpoint_test_bars_write_bar(test, bar);
408
409 /*
410 * Read all BARs in order (without writing).
411 * If there is an address translation issue on the EP, writing one BAR
412 * might have overwritten another BAR. Ensure that this is not the case.
413 * (Reading back the BAR directly after writing can not detect this.)
414 */
415 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
416 if (test->bar[bar]) {
417 ret = pci_endpoint_test_bars_read_bar(test, bar);
418 if (ret)
419 return ret;
420 }
421 }
422
423 return 0;
424 }
425
pci_endpoint_test_subrange_sig_byte(enum pci_barno barno,unsigned int subno)426 static u8 pci_endpoint_test_subrange_sig_byte(enum pci_barno barno,
427 unsigned int subno)
428 {
429 return 0x50 + (barno * 8) + subno;
430 }
431
pci_endpoint_test_subrange_test_byte(enum pci_barno barno,unsigned int subno)432 static u8 pci_endpoint_test_subrange_test_byte(enum pci_barno barno,
433 unsigned int subno)
434 {
435 return 0xa0 + (barno * 8) + subno;
436 }
437
pci_endpoint_test_bar_subrange_cmd(struct pci_endpoint_test * test,enum pci_barno barno,u32 command,u32 ok_bit,u32 fail_bit)438 static int pci_endpoint_test_bar_subrange_cmd(struct pci_endpoint_test *test,
439 enum pci_barno barno, u32 command,
440 u32 ok_bit, u32 fail_bit)
441 {
442 struct pci_dev *pdev = test->pdev;
443 struct device *dev = &pdev->dev;
444 int irq_type = test->irq_type;
445 u32 status;
446
447 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
448 irq_type > PCITEST_IRQ_TYPE_MSIX) {
449 dev_err(dev, "Invalid IRQ type\n");
450 return -EINVAL;
451 }
452
453 reinit_completion(&test->irq_raised);
454
455 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0);
456 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
457 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
458 /* Reuse SIZE as a command parameter: bar number. */
459 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, barno);
460 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, command);
461
462 if (!wait_for_completion_timeout(&test->irq_raised,
463 msecs_to_jiffies(1000)))
464 return -ETIMEDOUT;
465
466 status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
467 if (status & fail_bit)
468 return -EIO;
469
470 if (!(status & ok_bit))
471 return -EIO;
472
473 return 0;
474 }
475
pci_endpoint_test_bar_subrange_setup(struct pci_endpoint_test * test,enum pci_barno barno)476 static int pci_endpoint_test_bar_subrange_setup(struct pci_endpoint_test *test,
477 enum pci_barno barno)
478 {
479 return pci_endpoint_test_bar_subrange_cmd(test, barno,
480 COMMAND_BAR_SUBRANGE_SETUP,
481 STATUS_BAR_SUBRANGE_SETUP_SUCCESS,
482 STATUS_BAR_SUBRANGE_SETUP_FAIL);
483 }
484
pci_endpoint_test_bar_subrange_clear(struct pci_endpoint_test * test,enum pci_barno barno)485 static int pci_endpoint_test_bar_subrange_clear(struct pci_endpoint_test *test,
486 enum pci_barno barno)
487 {
488 return pci_endpoint_test_bar_subrange_cmd(test, barno,
489 COMMAND_BAR_SUBRANGE_CLEAR,
490 STATUS_BAR_SUBRANGE_CLEAR_SUCCESS,
491 STATUS_BAR_SUBRANGE_CLEAR_FAIL);
492 }
493
pci_endpoint_test_bar_subrange(struct pci_endpoint_test * test,enum pci_barno barno)494 static int pci_endpoint_test_bar_subrange(struct pci_endpoint_test *test,
495 enum pci_barno barno)
496 {
497 u32 nsub = PCI_ENDPOINT_TEST_BAR_SUBRANGE_NSUB;
498 struct device *dev = &test->pdev->dev;
499 size_t sub_size, buf_size;
500 resource_size_t bar_size;
501 void __iomem *bar_addr;
502 void *read_buf = NULL;
503 int ret, clear_ret;
504 size_t off, chunk;
505 u32 i, exp, val;
506 u8 pattern;
507
508 if (!(test->ep_caps & CAP_SUBRANGE_MAPPING))
509 return -EOPNOTSUPP;
510
511 /*
512 * The test register BAR is not safe to reprogram and write/read
513 * over its full size. BAR_TEST already special-cases it to a tiny
514 * range. For subrange mapping tests, let's simply skip it.
515 */
516 if (barno == test->test_reg_bar)
517 return -EBUSY;
518
519 bar_size = pci_resource_len(test->pdev, barno);
520 if (!bar_size)
521 return -ENODATA;
522
523 bar_addr = test->bar[barno];
524 if (!bar_addr)
525 return -ENOMEM;
526
527 ret = pci_endpoint_test_bar_subrange_setup(test, barno);
528 if (ret)
529 return ret;
530
531 if (bar_size % nsub || bar_size / nsub > SIZE_MAX) {
532 ret = -EINVAL;
533 goto out_clear;
534 }
535
536 sub_size = bar_size / nsub;
537 if (sub_size < sizeof(u32)) {
538 ret = -ENOSPC;
539 goto out_clear;
540 }
541
542 /* Limit the temporary buffer size */
543 buf_size = min_t(size_t, sub_size, SZ_1M);
544
545 read_buf = kmalloc(buf_size, GFP_KERNEL);
546 if (!read_buf) {
547 ret = -ENOMEM;
548 goto out_clear;
549 }
550
551 /*
552 * Step 1: verify EP-provided signature per subrange. This detects
553 * whether the EP actually applied the submap order.
554 */
555 for (i = 0; i < nsub; i++) {
556 exp = (u32)pci_endpoint_test_subrange_sig_byte(barno, i) *
557 0x01010101U;
558 val = ioread32(bar_addr + (i * sub_size));
559 if (val != exp) {
560 dev_err(dev,
561 "BAR%d subrange%u signature mismatch @%#zx: exp %#08x got %#08x\n",
562 barno, i, (size_t)i * sub_size, exp, val);
563 ret = -EIO;
564 goto out_clear;
565 }
566 val = ioread32(bar_addr + (i * sub_size) + sub_size - sizeof(u32));
567 if (val != exp) {
568 dev_err(dev,
569 "BAR%d subrange%u signature mismatch @%#zx: exp %#08x got %#08x\n",
570 barno, i,
571 ((size_t)i * sub_size) + sub_size - sizeof(u32),
572 exp, val);
573 ret = -EIO;
574 goto out_clear;
575 }
576 }
577
578 /* Step 2: write unique pattern per subrange (write all first). */
579 for (i = 0; i < nsub; i++) {
580 pattern = pci_endpoint_test_subrange_test_byte(barno, i);
581 memset_io(bar_addr + (i * sub_size), pattern, sub_size);
582 }
583
584 /* Step 3: read back and verify (read all after all writes). */
585 for (i = 0; i < nsub; i++) {
586 pattern = pci_endpoint_test_subrange_test_byte(barno, i);
587 for (off = 0; off < sub_size; off += chunk) {
588 void *bad;
589
590 chunk = min_t(size_t, buf_size, sub_size - off);
591 memcpy_fromio(read_buf, bar_addr + (i * sub_size) + off,
592 chunk);
593 bad = memchr_inv(read_buf, pattern, chunk);
594 if (bad) {
595 size_t bad_off = (u8 *)bad - (u8 *)read_buf;
596
597 dev_err(dev,
598 "BAR%d subrange%u data mismatch @%#zx (pattern %#02x)\n",
599 barno, i, (size_t)i * sub_size + off + bad_off,
600 pattern);
601 ret = -EIO;
602 goto out_clear;
603 }
604 }
605 }
606
607 out_clear:
608 kfree(read_buf);
609 clear_ret = pci_endpoint_test_bar_subrange_clear(test, barno);
610 return ret ?: clear_ret;
611 }
612
pci_endpoint_test_intx_irq(struct pci_endpoint_test * test)613 static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
614 {
615 u32 val;
616
617 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
618 PCITEST_IRQ_TYPE_INTX);
619 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
620 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
621 COMMAND_RAISE_INTX_IRQ);
622 val = wait_for_completion_timeout(&test->irq_raised,
623 msecs_to_jiffies(1000));
624 if (!val)
625 return -ETIMEDOUT;
626
627 return 0;
628 }
629
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)630 static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
631 u16 msi_num, bool msix)
632 {
633 struct pci_dev *pdev = test->pdev;
634 u32 val;
635 int irq;
636
637 irq = pci_irq_vector(pdev, msi_num - 1);
638 if (irq < 0)
639 return irq;
640
641 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
642 msix ? PCITEST_IRQ_TYPE_MSIX :
643 PCITEST_IRQ_TYPE_MSI);
644 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
645 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
646 msix ? COMMAND_RAISE_MSIX_IRQ :
647 COMMAND_RAISE_MSI_IRQ);
648 val = wait_for_completion_timeout(&test->irq_raised,
649 msecs_to_jiffies(1000));
650 if (!val)
651 return -ETIMEDOUT;
652
653 if (irq != test->last_irq)
654 return -EIO;
655
656 return 0;
657 }
658
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)659 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
660 struct pci_endpoint_test_xfer_param *param, size_t alignment)
661 {
662 if (!param->size) {
663 dev_dbg(dev, "Data size is zero\n");
664 return -EINVAL;
665 }
666
667 if (param->size > SIZE_MAX - alignment) {
668 dev_dbg(dev, "Maximum transfer data size exceeded\n");
669 return -EINVAL;
670 }
671
672 return 0;
673 }
674
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)675 static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
676 unsigned long arg)
677 {
678 struct pci_endpoint_test_xfer_param param;
679 void *src_addr;
680 void *dst_addr;
681 u32 flags = 0;
682 bool use_dma;
683 size_t size;
684 dma_addr_t src_phys_addr;
685 dma_addr_t dst_phys_addr;
686 struct pci_dev *pdev = test->pdev;
687 struct device *dev = &pdev->dev;
688 void *orig_src_addr;
689 dma_addr_t orig_src_phys_addr;
690 void *orig_dst_addr;
691 dma_addr_t orig_dst_phys_addr;
692 size_t offset;
693 size_t alignment = test->alignment;
694 int irq_type = test->irq_type;
695 u32 src_crc32;
696 u32 dst_crc32;
697 int ret;
698
699 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
700 if (ret) {
701 dev_err(dev, "Failed to get transfer param\n");
702 return -EFAULT;
703 }
704
705 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
706 if (ret)
707 return ret;
708
709 size = param.size;
710
711 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
712 if (use_dma)
713 flags |= FLAG_USE_DMA;
714
715 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
716 irq_type > PCITEST_IRQ_TYPE_MSIX) {
717 dev_err(dev, "Invalid IRQ type option\n");
718 return -EINVAL;
719 }
720
721 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
722 if (!orig_src_addr) {
723 dev_err(dev, "Failed to allocate source buffer\n");
724 return -ENOMEM;
725 }
726
727 get_random_bytes(orig_src_addr, size + alignment);
728 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
729 size + alignment, DMA_TO_DEVICE);
730 ret = dma_mapping_error(dev, orig_src_phys_addr);
731 if (ret) {
732 dev_err(dev, "failed to map source buffer address\n");
733 goto err_src_phys_addr;
734 }
735
736 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
737 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
738 offset = src_phys_addr - orig_src_phys_addr;
739 src_addr = orig_src_addr + offset;
740 } else {
741 src_phys_addr = orig_src_phys_addr;
742 src_addr = orig_src_addr;
743 }
744
745 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
746 lower_32_bits(src_phys_addr));
747
748 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
749 upper_32_bits(src_phys_addr));
750
751 src_crc32 = crc32_le(~0, src_addr, size);
752
753 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
754 if (!orig_dst_addr) {
755 dev_err(dev, "Failed to allocate destination address\n");
756 ret = -ENOMEM;
757 goto err_dst_addr;
758 }
759
760 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
761 size + alignment, DMA_FROM_DEVICE);
762 ret = dma_mapping_error(dev, orig_dst_phys_addr);
763 if (ret) {
764 dev_err(dev, "failed to map destination buffer address\n");
765 goto err_dst_phys_addr;
766 }
767
768 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
769 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
770 offset = dst_phys_addr - orig_dst_phys_addr;
771 dst_addr = orig_dst_addr + offset;
772 } else {
773 dst_phys_addr = orig_dst_phys_addr;
774 dst_addr = orig_dst_addr;
775 }
776
777 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
778 lower_32_bits(dst_phys_addr));
779 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
780 upper_32_bits(dst_phys_addr));
781
782 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
783 size);
784
785 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
786 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
787 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
788 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
789 COMMAND_COPY);
790
791 wait_for_completion(&test->irq_raised);
792
793 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
794 DMA_FROM_DEVICE);
795
796 dst_crc32 = crc32_le(~0, dst_addr, size);
797 if (dst_crc32 != src_crc32)
798 ret = -EIO;
799
800 err_dst_phys_addr:
801 kfree(orig_dst_addr);
802
803 err_dst_addr:
804 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
805 DMA_TO_DEVICE);
806
807 err_src_phys_addr:
808 kfree(orig_src_addr);
809 return ret;
810 }
811
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)812 static int pci_endpoint_test_write(struct pci_endpoint_test *test,
813 unsigned long arg)
814 {
815 struct pci_endpoint_test_xfer_param param;
816 u32 flags = 0;
817 bool use_dma;
818 u32 reg;
819 void *addr;
820 dma_addr_t phys_addr;
821 struct pci_dev *pdev = test->pdev;
822 struct device *dev = &pdev->dev;
823 void *orig_addr;
824 dma_addr_t orig_phys_addr;
825 size_t offset;
826 size_t alignment = test->alignment;
827 int irq_type = test->irq_type;
828 size_t size;
829 u32 crc32;
830 int ret;
831
832 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
833 if (ret) {
834 dev_err(dev, "Failed to get transfer param\n");
835 return -EFAULT;
836 }
837
838 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
839 if (ret)
840 return ret;
841
842 size = param.size;
843
844 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
845 if (use_dma)
846 flags |= FLAG_USE_DMA;
847
848 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
849 irq_type > PCITEST_IRQ_TYPE_MSIX) {
850 dev_err(dev, "Invalid IRQ type option\n");
851 return -EINVAL;
852 }
853
854 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
855 if (!orig_addr) {
856 dev_err(dev, "Failed to allocate address\n");
857 return -ENOMEM;
858 }
859
860 get_random_bytes(orig_addr, size + alignment);
861
862 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
863 DMA_TO_DEVICE);
864 ret = dma_mapping_error(dev, orig_phys_addr);
865 if (ret) {
866 dev_err(dev, "failed to map source buffer address\n");
867 goto err_phys_addr;
868 }
869
870 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
871 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
872 offset = phys_addr - orig_phys_addr;
873 addr = orig_addr + offset;
874 } else {
875 phys_addr = orig_phys_addr;
876 addr = orig_addr;
877 }
878
879 crc32 = crc32_le(~0, addr, size);
880 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
881 crc32);
882
883 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
884 lower_32_bits(phys_addr));
885 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
886 upper_32_bits(phys_addr));
887
888 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
889
890 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
891 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
892 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
893 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
894 COMMAND_READ);
895
896 wait_for_completion(&test->irq_raised);
897
898 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
899 if (!(reg & STATUS_READ_SUCCESS))
900 ret = -EIO;
901
902 dma_unmap_single(dev, orig_phys_addr, size + alignment,
903 DMA_TO_DEVICE);
904
905 err_phys_addr:
906 kfree(orig_addr);
907 return ret;
908 }
909
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)910 static int pci_endpoint_test_read(struct pci_endpoint_test *test,
911 unsigned long arg)
912 {
913 struct pci_endpoint_test_xfer_param param;
914 u32 flags = 0;
915 bool use_dma;
916 size_t size;
917 void *addr;
918 dma_addr_t phys_addr;
919 struct pci_dev *pdev = test->pdev;
920 struct device *dev = &pdev->dev;
921 void *orig_addr;
922 dma_addr_t orig_phys_addr;
923 size_t offset;
924 size_t alignment = test->alignment;
925 int irq_type = test->irq_type;
926 u32 crc32;
927 int ret;
928
929 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param));
930 if (ret) {
931 dev_err(dev, "Failed to get transfer param\n");
932 return -EFAULT;
933 }
934
935 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
936 if (ret)
937 return ret;
938
939 size = param.size;
940
941 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
942 if (use_dma)
943 flags |= FLAG_USE_DMA;
944
945 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
946 irq_type > PCITEST_IRQ_TYPE_MSIX) {
947 dev_err(dev, "Invalid IRQ type option\n");
948 return -EINVAL;
949 }
950
951 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
952 if (!orig_addr) {
953 dev_err(dev, "Failed to allocate destination address\n");
954 return -ENOMEM;
955 }
956
957 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
958 DMA_FROM_DEVICE);
959 ret = dma_mapping_error(dev, orig_phys_addr);
960 if (ret) {
961 dev_err(dev, "failed to map source buffer address\n");
962 goto err_phys_addr;
963 }
964
965 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
966 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
967 offset = phys_addr - orig_phys_addr;
968 addr = orig_addr + offset;
969 } else {
970 phys_addr = orig_phys_addr;
971 addr = orig_addr;
972 }
973
974 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
975 lower_32_bits(phys_addr));
976 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
977 upper_32_bits(phys_addr));
978
979 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
980
981 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
982 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
983 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
984 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
985 COMMAND_WRITE);
986
987 wait_for_completion(&test->irq_raised);
988
989 dma_unmap_single(dev, orig_phys_addr, size + alignment,
990 DMA_FROM_DEVICE);
991
992 crc32 = crc32_le(~0, addr, size);
993 if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
994 ret = -EIO;
995
996 err_phys_addr:
997 kfree(orig_addr);
998 return ret;
999 }
1000
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)1001 static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
1002 {
1003 pci_endpoint_test_release_irq(test);
1004 pci_endpoint_test_free_irq_vectors(test);
1005
1006 return 0;
1007 }
1008
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)1009 static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
1010 int req_irq_type)
1011 {
1012 struct pci_dev *pdev = test->pdev;
1013 struct device *dev = &pdev->dev;
1014 int ret;
1015
1016 if (req_irq_type < PCITEST_IRQ_TYPE_INTX ||
1017 req_irq_type > PCITEST_IRQ_TYPE_AUTO) {
1018 dev_err(dev, "Invalid IRQ type option\n");
1019 return -EINVAL;
1020 }
1021
1022 if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) {
1023 if (test->ep_caps & CAP_MSI)
1024 req_irq_type = PCITEST_IRQ_TYPE_MSI;
1025 else if (test->ep_caps & CAP_MSIX)
1026 req_irq_type = PCITEST_IRQ_TYPE_MSIX;
1027 else if (test->ep_caps & CAP_INTX)
1028 req_irq_type = PCITEST_IRQ_TYPE_INTX;
1029 else
1030 /* fallback to MSI if no caps defined */
1031 req_irq_type = PCITEST_IRQ_TYPE_MSI;
1032 }
1033
1034 if (test->irq_type == req_irq_type)
1035 return 0;
1036
1037 pci_endpoint_test_release_irq(test);
1038 pci_endpoint_test_free_irq_vectors(test);
1039
1040 ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
1041 if (ret)
1042 return ret;
1043
1044 ret = pci_endpoint_test_request_irq(test);
1045 if (ret) {
1046 pci_endpoint_test_free_irq_vectors(test);
1047 return ret;
1048 }
1049
1050 return 0;
1051 }
1052
pci_endpoint_test_doorbell(struct pci_endpoint_test * test)1053 static int pci_endpoint_test_doorbell(struct pci_endpoint_test *test)
1054 {
1055 struct pci_dev *pdev = test->pdev;
1056 struct device *dev = &pdev->dev;
1057 int irq_type = test->irq_type;
1058 enum pci_barno bar;
1059 u32 data, status;
1060 u32 addr;
1061 int left;
1062
1063 if (irq_type < PCITEST_IRQ_TYPE_INTX ||
1064 irq_type > PCITEST_IRQ_TYPE_MSIX) {
1065 dev_err(dev, "Invalid IRQ type\n");
1066 return -EINVAL;
1067 }
1068
1069 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
1070 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
1071 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
1072 COMMAND_ENABLE_DOORBELL);
1073
1074 left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
1075
1076 status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
1077 if (!left || (status & STATUS_DOORBELL_ENABLE_FAIL)) {
1078 dev_err(dev, "Failed to enable doorbell\n");
1079 return -EINVAL;
1080 }
1081
1082 data = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_DATA);
1083 addr = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_OFFSET);
1084 bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
1085
1086 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
1087 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
1088
1089 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0);
1090
1091 bar = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_DB_BAR);
1092
1093 writel(data, test->bar[bar] + addr);
1094
1095 left = wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
1096
1097 status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
1098
1099 if (!left || !(status & STATUS_DOORBELL_SUCCESS))
1100 dev_err(dev, "Failed to trigger doorbell in endpoint\n");
1101
1102 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
1103 COMMAND_DISABLE_DOORBELL);
1104
1105 wait_for_completion_timeout(&test->irq_raised, msecs_to_jiffies(1000));
1106
1107 status |= pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
1108
1109 if (status & STATUS_DOORBELL_DISABLE_FAIL) {
1110 dev_err(dev, "Failed to disable doorbell\n");
1111 return -EINVAL;
1112 }
1113
1114 if (!(status & STATUS_DOORBELL_SUCCESS))
1115 return -EINVAL;
1116
1117 return 0;
1118 }
1119
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1120 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
1121 unsigned long arg)
1122 {
1123 int ret = -EINVAL;
1124 enum pci_barno bar;
1125 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
1126 struct pci_dev *pdev = test->pdev;
1127
1128 mutex_lock(&test->mutex);
1129
1130 reinit_completion(&test->irq_raised);
1131 test->last_irq = -ENODATA;
1132
1133 switch (cmd) {
1134 case PCITEST_BAR:
1135 case PCITEST_BAR_SUBRANGE:
1136 bar = arg;
1137 if (bar <= NO_BAR || bar > BAR_5)
1138 goto ret;
1139 if (is_am654_pci_dev(pdev) && bar == BAR_0)
1140 goto ret;
1141
1142 if (cmd == PCITEST_BAR)
1143 ret = pci_endpoint_test_bar(test, bar);
1144 else
1145 ret = pci_endpoint_test_bar_subrange(test, bar);
1146 break;
1147 case PCITEST_BARS:
1148 ret = pci_endpoint_test_bars(test);
1149 break;
1150 case PCITEST_INTX_IRQ:
1151 ret = pci_endpoint_test_intx_irq(test);
1152 break;
1153 case PCITEST_MSI:
1154 case PCITEST_MSIX:
1155 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
1156 break;
1157 case PCITEST_WRITE:
1158 ret = pci_endpoint_test_write(test, arg);
1159 break;
1160 case PCITEST_READ:
1161 ret = pci_endpoint_test_read(test, arg);
1162 break;
1163 case PCITEST_COPY:
1164 ret = pci_endpoint_test_copy(test, arg);
1165 break;
1166 case PCITEST_SET_IRQTYPE:
1167 ret = pci_endpoint_test_set_irq(test, arg);
1168 break;
1169 case PCITEST_GET_IRQTYPE:
1170 ret = test->irq_type;
1171 break;
1172 case PCITEST_CLEAR_IRQ:
1173 ret = pci_endpoint_test_clear_irq(test);
1174 break;
1175 case PCITEST_DOORBELL:
1176 ret = pci_endpoint_test_doorbell(test);
1177 break;
1178 }
1179
1180 ret:
1181 mutex_unlock(&test->mutex);
1182 return ret;
1183 }
1184
1185 static const struct file_operations pci_endpoint_test_fops = {
1186 .owner = THIS_MODULE,
1187 .unlocked_ioctl = pci_endpoint_test_ioctl,
1188 };
1189
pci_endpoint_test_get_capabilities(struct pci_endpoint_test * test)1190 static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
1191 {
1192 struct pci_dev *pdev = test->pdev;
1193 struct device *dev = &pdev->dev;
1194
1195 test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
1196 dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps);
1197
1198 /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
1199 if (test->ep_caps & CAP_UNALIGNED_ACCESS)
1200 test->alignment = 0;
1201 }
1202
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1203 static int pci_endpoint_test_probe(struct pci_dev *pdev,
1204 const struct pci_device_id *ent)
1205 {
1206 int ret;
1207 int id;
1208 char name[29];
1209 enum pci_barno bar;
1210 void __iomem *base;
1211 struct device *dev = &pdev->dev;
1212 struct pci_endpoint_test *test;
1213 struct pci_endpoint_test_data *data;
1214 enum pci_barno test_reg_bar = BAR_0;
1215 struct miscdevice *misc_device;
1216
1217 if (pci_is_bridge(pdev))
1218 return -ENODEV;
1219
1220 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
1221 if (!test)
1222 return -ENOMEM;
1223
1224 test->pdev = pdev;
1225 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED;
1226
1227 data = (struct pci_endpoint_test_data *)ent->driver_data;
1228 if (data) {
1229 test_reg_bar = data->test_reg_bar;
1230 test->test_reg_bar = test_reg_bar;
1231 test->alignment = data->alignment;
1232 }
1233
1234 init_completion(&test->irq_raised);
1235 mutex_init(&test->mutex);
1236
1237 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
1238
1239 ret = pci_enable_device(pdev);
1240 if (ret) {
1241 dev_err(dev, "Cannot enable PCI device\n");
1242 return ret;
1243 }
1244
1245 ret = pci_request_regions(pdev, DRV_MODULE_NAME);
1246 if (ret) {
1247 dev_err(dev, "Cannot obtain PCI resources\n");
1248 goto err_disable_pdev;
1249 }
1250
1251 pci_set_master(pdev);
1252
1253 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1254 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
1255 base = pci_ioremap_bar(pdev, bar);
1256 if (!base) {
1257 dev_err(dev, "Failed to read BAR%d\n", bar);
1258 WARN_ON(bar == test_reg_bar);
1259 }
1260 test->bar[bar] = base;
1261 }
1262 }
1263
1264 test->base = test->bar[test_reg_bar];
1265 if (!test->base) {
1266 ret = -ENOMEM;
1267 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
1268 test_reg_bar);
1269 goto err_iounmap;
1270 }
1271
1272 pci_set_drvdata(pdev, test);
1273
1274 id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
1275 if (id < 0) {
1276 ret = id;
1277 dev_err(dev, "Unable to get id\n");
1278 goto err_iounmap;
1279 }
1280
1281 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
1282 test->name = kstrdup(name, GFP_KERNEL);
1283 if (!test->name) {
1284 ret = -ENOMEM;
1285 goto err_ida_remove;
1286 }
1287
1288 pci_endpoint_test_get_capabilities(test);
1289
1290 misc_device = &test->miscdev;
1291 misc_device->minor = MISC_DYNAMIC_MINOR;
1292 misc_device->name = kstrdup(name, GFP_KERNEL);
1293 if (!misc_device->name) {
1294 ret = -ENOMEM;
1295 goto err_kfree_test_name;
1296 }
1297 misc_device->parent = &pdev->dev;
1298 misc_device->fops = &pci_endpoint_test_fops;
1299
1300 ret = misc_register(misc_device);
1301 if (ret) {
1302 dev_err(dev, "Failed to register device\n");
1303 goto err_kfree_name;
1304 }
1305
1306 return 0;
1307
1308 err_kfree_name:
1309 kfree(misc_device->name);
1310
1311 err_kfree_test_name:
1312 kfree(test->name);
1313
1314 err_ida_remove:
1315 ida_free(&pci_endpoint_test_ida, id);
1316
1317 err_iounmap:
1318 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1319 if (test->bar[bar])
1320 pci_iounmap(pdev, test->bar[bar]);
1321 }
1322
1323 pci_release_regions(pdev);
1324
1325 err_disable_pdev:
1326 pci_disable_device(pdev);
1327
1328 return ret;
1329 }
1330
pci_endpoint_test_remove(struct pci_dev * pdev)1331 static void pci_endpoint_test_remove(struct pci_dev *pdev)
1332 {
1333 int id;
1334 enum pci_barno bar;
1335 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
1336 struct miscdevice *misc_device = &test->miscdev;
1337
1338 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
1339 return;
1340 if (id < 0)
1341 return;
1342
1343 pci_endpoint_test_release_irq(test);
1344 pci_endpoint_test_free_irq_vectors(test);
1345
1346 misc_deregister(&test->miscdev);
1347 kfree(misc_device->name);
1348 kfree(test->name);
1349 ida_free(&pci_endpoint_test_ida, id);
1350 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1351 if (test->bar[bar])
1352 pci_iounmap(pdev, test->bar[bar]);
1353 }
1354
1355 pci_release_regions(pdev);
1356 pci_disable_device(pdev);
1357 }
1358
1359 static const struct pci_endpoint_test_data default_data = {
1360 .test_reg_bar = BAR_0,
1361 .alignment = SZ_4K,
1362 };
1363
1364 static const struct pci_endpoint_test_data am654_data = {
1365 .test_reg_bar = BAR_2,
1366 .alignment = SZ_64K,
1367 };
1368
1369 static const struct pci_endpoint_test_data j721e_data = {
1370 .alignment = 256,
1371 };
1372
1373 static const struct pci_endpoint_test_data rk3588_data = {
1374 .alignment = SZ_64K,
1375 };
1376
1377 /*
1378 * If the controller's Vendor/Device ID are programmable, you may be able to
1379 * use one of the existing entries for testing instead of adding a new one.
1380 */
1381 static const struct pci_device_id pci_endpoint_test_tbl[] = {
1382 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1383 .driver_data = (kernel_ulong_t)&default_data,
1384 },
1385 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1386 .driver_data = (kernel_ulong_t)&default_data,
1387 },
1388 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1389 .driver_data = (kernel_ulong_t)&default_data,
1390 },
1391 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1392 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1393 .driver_data = (kernel_ulong_t)&default_data,
1394 },
1395 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1396 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1397 .driver_data = (kernel_ulong_t)&am654_data
1398 },
1399 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1400 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1401 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1402 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1403 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1404 .driver_data = (kernel_ulong_t)&default_data,
1405 },
1406 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1407 .driver_data = (kernel_ulong_t)&j721e_data,
1408 },
1409 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1410 .driver_data = (kernel_ulong_t)&j721e_data,
1411 },
1412 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1413 .driver_data = (kernel_ulong_t)&j721e_data,
1414 },
1415 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1416 .driver_data = (kernel_ulong_t)&j721e_data,
1417 },
1418 { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1419 .driver_data = (kernel_ulong_t)&rk3588_data,
1420 },
1421 { }
1422 };
1423 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1424
1425 static struct pci_driver pci_endpoint_test_driver = {
1426 .name = DRV_MODULE_NAME,
1427 .id_table = pci_endpoint_test_tbl,
1428 .probe = pci_endpoint_test_probe,
1429 .remove = pci_endpoint_test_remove,
1430 .sriov_configure = pci_sriov_configure_simple,
1431 };
1432 module_pci_driver(pci_endpoint_test_driver);
1433
1434 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1435 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1436 MODULE_LICENSE("GPL v2");
1437