1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Host side test driver to test endpoint functionality 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/cleanup.h> 11 #include <linux/delay.h> 12 #include <linux/fs.h> 13 #include <linux/io.h> 14 #include <linux/interrupt.h> 15 #include <linux/irq.h> 16 #include <linux/miscdevice.h> 17 #include <linux/module.h> 18 #include <linux/mutex.h> 19 #include <linux/random.h> 20 #include <linux/slab.h> 21 #include <linux/uaccess.h> 22 #include <linux/pci.h> 23 #include <linux/pci_ids.h> 24 25 #include <linux/pci_regs.h> 26 27 #include <uapi/linux/pcitest.h> 28 29 #define DRV_MODULE_NAME "pci-endpoint-test" 30 31 #define PCI_ENDPOINT_TEST_MAGIC 0x0 32 33 #define PCI_ENDPOINT_TEST_COMMAND 0x4 34 #define COMMAND_RAISE_INTX_IRQ BIT(0) 35 #define COMMAND_RAISE_MSI_IRQ BIT(1) 36 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 37 #define COMMAND_READ BIT(3) 38 #define COMMAND_WRITE BIT(4) 39 #define COMMAND_COPY BIT(5) 40 41 #define PCI_ENDPOINT_TEST_STATUS 0x8 42 #define STATUS_READ_SUCCESS BIT(0) 43 #define STATUS_READ_FAIL BIT(1) 44 #define STATUS_WRITE_SUCCESS BIT(2) 45 #define STATUS_WRITE_FAIL BIT(3) 46 #define STATUS_COPY_SUCCESS BIT(4) 47 #define STATUS_COPY_FAIL BIT(5) 48 #define STATUS_IRQ_RAISED BIT(6) 49 #define STATUS_SRC_ADDR_INVALID BIT(7) 50 #define STATUS_DST_ADDR_INVALID BIT(8) 51 52 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c 53 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 54 55 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14 56 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18 57 58 #define PCI_ENDPOINT_TEST_SIZE 0x1c 59 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20 60 61 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 62 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 63 64 #define PCI_ENDPOINT_TEST_FLAGS 0x2c 65 #define FLAG_USE_DMA BIT(0) 66 67 #define PCI_ENDPOINT_TEST_CAPS 0x30 68 #define CAP_UNALIGNED_ACCESS BIT(0) 69 #define CAP_MSI BIT(1) 70 #define CAP_MSIX BIT(2) 71 #define CAP_INTX BIT(3) 72 73 #define PCI_DEVICE_ID_TI_AM654 0xb00c 74 #define PCI_DEVICE_ID_TI_J7200 0xb00f 75 #define PCI_DEVICE_ID_TI_AM64 0xb010 76 #define PCI_DEVICE_ID_TI_J721S2 0xb013 77 #define PCI_DEVICE_ID_LS1088A 0x80c0 78 #define PCI_DEVICE_ID_IMX8 0x0808 79 80 #define is_am654_pci_dev(pdev) \ 81 ((pdev)->device == PCI_DEVICE_ID_TI_AM654) 82 83 #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028 84 #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b 85 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d 86 #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025 87 #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031 88 89 #define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588 90 91 static DEFINE_IDA(pci_endpoint_test_ida); 92 93 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ 94 miscdev) 95 96 enum pci_barno { 97 BAR_0, 98 BAR_1, 99 BAR_2, 100 BAR_3, 101 BAR_4, 102 BAR_5, 103 }; 104 105 struct pci_endpoint_test { 106 struct pci_dev *pdev; 107 void __iomem *base; 108 void __iomem *bar[PCI_STD_NUM_BARS]; 109 struct completion irq_raised; 110 int last_irq; 111 int num_irqs; 112 int irq_type; 113 /* mutex to protect the ioctls */ 114 struct mutex mutex; 115 struct miscdevice miscdev; 116 enum pci_barno test_reg_bar; 117 size_t alignment; 118 u32 ep_caps; 119 const char *name; 120 }; 121 122 struct pci_endpoint_test_data { 123 enum pci_barno test_reg_bar; 124 size_t alignment; 125 int irq_type; 126 }; 127 128 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, 129 u32 offset) 130 { 131 return readl(test->base + offset); 132 } 133 134 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test, 135 u32 offset, u32 value) 136 { 137 writel(value, test->base + offset); 138 } 139 140 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id) 141 { 142 struct pci_endpoint_test *test = dev_id; 143 u32 reg; 144 145 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); 146 if (reg & STATUS_IRQ_RAISED) { 147 test->last_irq = irq; 148 complete(&test->irq_raised); 149 } 150 151 return IRQ_HANDLED; 152 } 153 154 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test) 155 { 156 struct pci_dev *pdev = test->pdev; 157 158 pci_free_irq_vectors(pdev); 159 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED; 160 } 161 162 static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, 163 int type) 164 { 165 int irq; 166 struct pci_dev *pdev = test->pdev; 167 struct device *dev = &pdev->dev; 168 169 switch (type) { 170 case PCITEST_IRQ_TYPE_INTX: 171 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX); 172 if (irq < 0) { 173 dev_err(dev, "Failed to get Legacy interrupt\n"); 174 return irq; 175 } 176 177 break; 178 case PCITEST_IRQ_TYPE_MSI: 179 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); 180 if (irq < 0) { 181 dev_err(dev, "Failed to get MSI interrupts\n"); 182 return irq; 183 } 184 185 break; 186 case PCITEST_IRQ_TYPE_MSIX: 187 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX); 188 if (irq < 0) { 189 dev_err(dev, "Failed to get MSI-X interrupts\n"); 190 return irq; 191 } 192 193 break; 194 default: 195 dev_err(dev, "Invalid IRQ type selected\n"); 196 return -EINVAL; 197 } 198 199 test->irq_type = type; 200 test->num_irqs = irq; 201 202 return 0; 203 } 204 205 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test) 206 { 207 int i; 208 struct pci_dev *pdev = test->pdev; 209 210 for (i = 0; i < test->num_irqs; i++) 211 free_irq(pci_irq_vector(pdev, i), test); 212 213 test->num_irqs = 0; 214 } 215 216 static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test) 217 { 218 int i; 219 int ret; 220 struct pci_dev *pdev = test->pdev; 221 struct device *dev = &pdev->dev; 222 223 for (i = 0; i < test->num_irqs; i++) { 224 ret = request_irq(pci_irq_vector(pdev, i), 225 pci_endpoint_test_irqhandler, IRQF_SHARED, 226 test->name, test); 227 if (ret) 228 goto fail; 229 } 230 231 return 0; 232 233 fail: 234 switch (test->irq_type) { 235 case PCITEST_IRQ_TYPE_INTX: 236 dev_err(dev, "Failed to request IRQ %d for Legacy\n", 237 pci_irq_vector(pdev, i)); 238 break; 239 case PCITEST_IRQ_TYPE_MSI: 240 dev_err(dev, "Failed to request IRQ %d for MSI %d\n", 241 pci_irq_vector(pdev, i), 242 i + 1); 243 break; 244 case PCITEST_IRQ_TYPE_MSIX: 245 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n", 246 pci_irq_vector(pdev, i), 247 i + 1); 248 break; 249 } 250 251 test->num_irqs = i; 252 pci_endpoint_test_release_irq(test); 253 254 return ret; 255 } 256 257 static const u32 bar_test_pattern[] = { 258 0xA0A0A0A0, 259 0xA1A1A1A1, 260 0xA2A2A2A2, 261 0xA3A3A3A3, 262 0xA4A4A4A4, 263 0xA5A5A5A5, 264 }; 265 266 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test, 267 enum pci_barno barno, 268 resource_size_t offset, void *write_buf, 269 void *read_buf, int size) 270 { 271 memset(write_buf, bar_test_pattern[barno], size); 272 memcpy_toio(test->bar[barno] + offset, write_buf, size); 273 274 memcpy_fromio(read_buf, test->bar[barno] + offset, size); 275 276 return memcmp(write_buf, read_buf, size); 277 } 278 279 static int pci_endpoint_test_bar(struct pci_endpoint_test *test, 280 enum pci_barno barno) 281 { 282 resource_size_t bar_size, offset = 0; 283 void *write_buf __free(kfree) = NULL; 284 void *read_buf __free(kfree) = NULL; 285 struct pci_dev *pdev = test->pdev; 286 int buf_size; 287 288 bar_size = pci_resource_len(pdev, barno); 289 if (!bar_size) 290 return -ENODATA; 291 292 if (!test->bar[barno]) 293 return -ENOMEM; 294 295 if (barno == test->test_reg_bar) 296 bar_size = 0x4; 297 298 /* 299 * Allocate a buffer of max size 1MB, and reuse that buffer while 300 * iterating over the whole BAR size (which might be much larger). 301 */ 302 buf_size = min(SZ_1M, bar_size); 303 304 write_buf = kmalloc(buf_size, GFP_KERNEL); 305 if (!write_buf) 306 return -ENOMEM; 307 308 read_buf = kmalloc(buf_size, GFP_KERNEL); 309 if (!read_buf) 310 return -ENOMEM; 311 312 while (offset < bar_size) { 313 if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf, 314 read_buf, buf_size)) 315 return -EIO; 316 offset += buf_size; 317 } 318 319 return 0; 320 } 321 322 static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset) 323 { 324 u32 val; 325 326 /* Keep the BAR pattern in the top byte. */ 327 val = bar_test_pattern[barno] & 0xff000000; 328 /* Store the (partial) offset in the remaining bytes. */ 329 val |= offset & 0x00ffffff; 330 331 return val; 332 } 333 334 static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test, 335 enum pci_barno barno) 336 { 337 struct pci_dev *pdev = test->pdev; 338 int j, size; 339 340 size = pci_resource_len(pdev, barno); 341 342 if (barno == test->test_reg_bar) 343 size = 0x4; 344 345 for (j = 0; j < size; j += 4) 346 writel_relaxed(bar_test_pattern_with_offset(barno, j), 347 test->bar[barno] + j); 348 } 349 350 static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test, 351 enum pci_barno barno) 352 { 353 struct pci_dev *pdev = test->pdev; 354 struct device *dev = &pdev->dev; 355 int j, size; 356 u32 val; 357 358 size = pci_resource_len(pdev, barno); 359 360 if (barno == test->test_reg_bar) 361 size = 0x4; 362 363 for (j = 0; j < size; j += 4) { 364 u32 expected = bar_test_pattern_with_offset(barno, j); 365 366 val = readl_relaxed(test->bar[barno] + j); 367 if (val != expected) { 368 dev_err(dev, 369 "BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n", 370 barno, j, val, expected); 371 return -EIO; 372 } 373 } 374 375 return 0; 376 } 377 378 static int pci_endpoint_test_bars(struct pci_endpoint_test *test) 379 { 380 enum pci_barno bar; 381 int ret; 382 383 /* Write all BARs in order (without reading). */ 384 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) 385 if (test->bar[bar]) 386 pci_endpoint_test_bars_write_bar(test, bar); 387 388 /* 389 * Read all BARs in order (without writing). 390 * If there is an address translation issue on the EP, writing one BAR 391 * might have overwritten another BAR. Ensure that this is not the case. 392 * (Reading back the BAR directly after writing can not detect this.) 393 */ 394 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 395 if (test->bar[bar]) { 396 ret = pci_endpoint_test_bars_read_bar(test, bar); 397 if (ret) 398 return ret; 399 } 400 } 401 402 return 0; 403 } 404 405 static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test) 406 { 407 u32 val; 408 409 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, 410 PCITEST_IRQ_TYPE_INTX); 411 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0); 412 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 413 COMMAND_RAISE_INTX_IRQ); 414 val = wait_for_completion_timeout(&test->irq_raised, 415 msecs_to_jiffies(1000)); 416 if (!val) 417 return -ETIMEDOUT; 418 419 return 0; 420 } 421 422 static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, 423 u16 msi_num, bool msix) 424 { 425 struct pci_dev *pdev = test->pdev; 426 u32 val; 427 int ret; 428 429 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, 430 msix ? PCITEST_IRQ_TYPE_MSIX : 431 PCITEST_IRQ_TYPE_MSI); 432 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num); 433 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 434 msix ? COMMAND_RAISE_MSIX_IRQ : 435 COMMAND_RAISE_MSI_IRQ); 436 val = wait_for_completion_timeout(&test->irq_raised, 437 msecs_to_jiffies(1000)); 438 if (!val) 439 return -ETIMEDOUT; 440 441 ret = pci_irq_vector(pdev, msi_num - 1); 442 if (ret < 0) 443 return ret; 444 445 if (ret != test->last_irq) 446 return -EIO; 447 448 return 0; 449 } 450 451 static int pci_endpoint_test_validate_xfer_params(struct device *dev, 452 struct pci_endpoint_test_xfer_param *param, size_t alignment) 453 { 454 if (!param->size) { 455 dev_dbg(dev, "Data size is zero\n"); 456 return -EINVAL; 457 } 458 459 if (param->size > SIZE_MAX - alignment) { 460 dev_dbg(dev, "Maximum transfer data size exceeded\n"); 461 return -EINVAL; 462 } 463 464 return 0; 465 } 466 467 static int pci_endpoint_test_copy(struct pci_endpoint_test *test, 468 unsigned long arg) 469 { 470 struct pci_endpoint_test_xfer_param param; 471 void *src_addr; 472 void *dst_addr; 473 u32 flags = 0; 474 bool use_dma; 475 size_t size; 476 dma_addr_t src_phys_addr; 477 dma_addr_t dst_phys_addr; 478 struct pci_dev *pdev = test->pdev; 479 struct device *dev = &pdev->dev; 480 void *orig_src_addr; 481 dma_addr_t orig_src_phys_addr; 482 void *orig_dst_addr; 483 dma_addr_t orig_dst_phys_addr; 484 size_t offset; 485 size_t alignment = test->alignment; 486 int irq_type = test->irq_type; 487 u32 src_crc32; 488 u32 dst_crc32; 489 int ret; 490 491 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 492 if (ret) { 493 dev_err(dev, "Failed to get transfer param\n"); 494 return -EFAULT; 495 } 496 497 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); 498 if (ret) 499 return ret; 500 501 size = param.size; 502 503 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 504 if (use_dma) 505 flags |= FLAG_USE_DMA; 506 507 if (irq_type < PCITEST_IRQ_TYPE_INTX || 508 irq_type > PCITEST_IRQ_TYPE_MSIX) { 509 dev_err(dev, "Invalid IRQ type option\n"); 510 return -EINVAL; 511 } 512 513 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL); 514 if (!orig_src_addr) { 515 dev_err(dev, "Failed to allocate source buffer\n"); 516 return -ENOMEM; 517 } 518 519 get_random_bytes(orig_src_addr, size + alignment); 520 orig_src_phys_addr = dma_map_single(dev, orig_src_addr, 521 size + alignment, DMA_TO_DEVICE); 522 ret = dma_mapping_error(dev, orig_src_phys_addr); 523 if (ret) { 524 dev_err(dev, "failed to map source buffer address\n"); 525 goto err_src_phys_addr; 526 } 527 528 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) { 529 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment); 530 offset = src_phys_addr - orig_src_phys_addr; 531 src_addr = orig_src_addr + offset; 532 } else { 533 src_phys_addr = orig_src_phys_addr; 534 src_addr = orig_src_addr; 535 } 536 537 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, 538 lower_32_bits(src_phys_addr)); 539 540 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, 541 upper_32_bits(src_phys_addr)); 542 543 src_crc32 = crc32_le(~0, src_addr, size); 544 545 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL); 546 if (!orig_dst_addr) { 547 dev_err(dev, "Failed to allocate destination address\n"); 548 ret = -ENOMEM; 549 goto err_dst_addr; 550 } 551 552 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr, 553 size + alignment, DMA_FROM_DEVICE); 554 ret = dma_mapping_error(dev, orig_dst_phys_addr); 555 if (ret) { 556 dev_err(dev, "failed to map destination buffer address\n"); 557 goto err_dst_phys_addr; 558 } 559 560 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) { 561 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment); 562 offset = dst_phys_addr - orig_dst_phys_addr; 563 dst_addr = orig_dst_addr + offset; 564 } else { 565 dst_phys_addr = orig_dst_phys_addr; 566 dst_addr = orig_dst_addr; 567 } 568 569 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, 570 lower_32_bits(dst_phys_addr)); 571 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, 572 upper_32_bits(dst_phys_addr)); 573 574 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, 575 size); 576 577 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 578 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 579 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 580 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 581 COMMAND_COPY); 582 583 wait_for_completion(&test->irq_raised); 584 585 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment, 586 DMA_FROM_DEVICE); 587 588 dst_crc32 = crc32_le(~0, dst_addr, size); 589 if (dst_crc32 != src_crc32) 590 ret = -EIO; 591 592 err_dst_phys_addr: 593 kfree(orig_dst_addr); 594 595 err_dst_addr: 596 dma_unmap_single(dev, orig_src_phys_addr, size + alignment, 597 DMA_TO_DEVICE); 598 599 err_src_phys_addr: 600 kfree(orig_src_addr); 601 return ret; 602 } 603 604 static int pci_endpoint_test_write(struct pci_endpoint_test *test, 605 unsigned long arg) 606 { 607 struct pci_endpoint_test_xfer_param param; 608 u32 flags = 0; 609 bool use_dma; 610 u32 reg; 611 void *addr; 612 dma_addr_t phys_addr; 613 struct pci_dev *pdev = test->pdev; 614 struct device *dev = &pdev->dev; 615 void *orig_addr; 616 dma_addr_t orig_phys_addr; 617 size_t offset; 618 size_t alignment = test->alignment; 619 int irq_type = test->irq_type; 620 size_t size; 621 u32 crc32; 622 int ret; 623 624 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 625 if (ret) { 626 dev_err(dev, "Failed to get transfer param\n"); 627 return -EFAULT; 628 } 629 630 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); 631 if (ret) 632 return ret; 633 634 size = param.size; 635 636 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 637 if (use_dma) 638 flags |= FLAG_USE_DMA; 639 640 if (irq_type < PCITEST_IRQ_TYPE_INTX || 641 irq_type > PCITEST_IRQ_TYPE_MSIX) { 642 dev_err(dev, "Invalid IRQ type option\n"); 643 return -EINVAL; 644 } 645 646 orig_addr = kzalloc(size + alignment, GFP_KERNEL); 647 if (!orig_addr) { 648 dev_err(dev, "Failed to allocate address\n"); 649 return -ENOMEM; 650 } 651 652 get_random_bytes(orig_addr, size + alignment); 653 654 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, 655 DMA_TO_DEVICE); 656 ret = dma_mapping_error(dev, orig_phys_addr); 657 if (ret) { 658 dev_err(dev, "failed to map source buffer address\n"); 659 goto err_phys_addr; 660 } 661 662 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { 663 phys_addr = PTR_ALIGN(orig_phys_addr, alignment); 664 offset = phys_addr - orig_phys_addr; 665 addr = orig_addr + offset; 666 } else { 667 phys_addr = orig_phys_addr; 668 addr = orig_addr; 669 } 670 671 crc32 = crc32_le(~0, addr, size); 672 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM, 673 crc32); 674 675 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, 676 lower_32_bits(phys_addr)); 677 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, 678 upper_32_bits(phys_addr)); 679 680 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); 681 682 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 683 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 684 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 685 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 686 COMMAND_READ); 687 688 wait_for_completion(&test->irq_raised); 689 690 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); 691 if (!(reg & STATUS_READ_SUCCESS)) 692 ret = -EIO; 693 694 dma_unmap_single(dev, orig_phys_addr, size + alignment, 695 DMA_TO_DEVICE); 696 697 err_phys_addr: 698 kfree(orig_addr); 699 return ret; 700 } 701 702 static int pci_endpoint_test_read(struct pci_endpoint_test *test, 703 unsigned long arg) 704 { 705 struct pci_endpoint_test_xfer_param param; 706 u32 flags = 0; 707 bool use_dma; 708 size_t size; 709 void *addr; 710 dma_addr_t phys_addr; 711 struct pci_dev *pdev = test->pdev; 712 struct device *dev = &pdev->dev; 713 void *orig_addr; 714 dma_addr_t orig_phys_addr; 715 size_t offset; 716 size_t alignment = test->alignment; 717 int irq_type = test->irq_type; 718 u32 crc32; 719 int ret; 720 721 ret = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 722 if (ret) { 723 dev_err(dev, "Failed to get transfer param\n"); 724 return -EFAULT; 725 } 726 727 ret = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment); 728 if (ret) 729 return ret; 730 731 size = param.size; 732 733 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 734 if (use_dma) 735 flags |= FLAG_USE_DMA; 736 737 if (irq_type < PCITEST_IRQ_TYPE_INTX || 738 irq_type > PCITEST_IRQ_TYPE_MSIX) { 739 dev_err(dev, "Invalid IRQ type option\n"); 740 return -EINVAL; 741 } 742 743 orig_addr = kzalloc(size + alignment, GFP_KERNEL); 744 if (!orig_addr) { 745 dev_err(dev, "Failed to allocate destination address\n"); 746 return -ENOMEM; 747 } 748 749 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, 750 DMA_FROM_DEVICE); 751 ret = dma_mapping_error(dev, orig_phys_addr); 752 if (ret) { 753 dev_err(dev, "failed to map source buffer address\n"); 754 goto err_phys_addr; 755 } 756 757 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { 758 phys_addr = PTR_ALIGN(orig_phys_addr, alignment); 759 offset = phys_addr - orig_phys_addr; 760 addr = orig_addr + offset; 761 } else { 762 phys_addr = orig_phys_addr; 763 addr = orig_addr; 764 } 765 766 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, 767 lower_32_bits(phys_addr)); 768 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, 769 upper_32_bits(phys_addr)); 770 771 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); 772 773 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 774 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 775 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 776 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 777 COMMAND_WRITE); 778 779 wait_for_completion(&test->irq_raised); 780 781 dma_unmap_single(dev, orig_phys_addr, size + alignment, 782 DMA_FROM_DEVICE); 783 784 crc32 = crc32_le(~0, addr, size); 785 if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) 786 ret = -EIO; 787 788 err_phys_addr: 789 kfree(orig_addr); 790 return ret; 791 } 792 793 static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test) 794 { 795 pci_endpoint_test_release_irq(test); 796 pci_endpoint_test_free_irq_vectors(test); 797 798 return 0; 799 } 800 801 static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test, 802 int req_irq_type) 803 { 804 struct pci_dev *pdev = test->pdev; 805 struct device *dev = &pdev->dev; 806 int ret; 807 808 if (req_irq_type < PCITEST_IRQ_TYPE_INTX || 809 req_irq_type > PCITEST_IRQ_TYPE_AUTO) { 810 dev_err(dev, "Invalid IRQ type option\n"); 811 return -EINVAL; 812 } 813 814 if (req_irq_type == PCITEST_IRQ_TYPE_AUTO) { 815 if (test->ep_caps & CAP_MSI) 816 req_irq_type = PCITEST_IRQ_TYPE_MSI; 817 else if (test->ep_caps & CAP_MSIX) 818 req_irq_type = PCITEST_IRQ_TYPE_MSIX; 819 else if (test->ep_caps & CAP_INTX) 820 req_irq_type = PCITEST_IRQ_TYPE_INTX; 821 else 822 /* fallback to MSI if no caps defined */ 823 req_irq_type = PCITEST_IRQ_TYPE_MSI; 824 } 825 826 if (test->irq_type == req_irq_type) 827 return 0; 828 829 pci_endpoint_test_release_irq(test); 830 pci_endpoint_test_free_irq_vectors(test); 831 832 ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type); 833 if (ret) 834 return ret; 835 836 ret = pci_endpoint_test_request_irq(test); 837 if (ret) { 838 pci_endpoint_test_free_irq_vectors(test); 839 return ret; 840 } 841 842 return 0; 843 } 844 845 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, 846 unsigned long arg) 847 { 848 int ret = -EINVAL; 849 enum pci_barno bar; 850 struct pci_endpoint_test *test = to_endpoint_test(file->private_data); 851 struct pci_dev *pdev = test->pdev; 852 853 mutex_lock(&test->mutex); 854 855 reinit_completion(&test->irq_raised); 856 test->last_irq = -ENODATA; 857 858 switch (cmd) { 859 case PCITEST_BAR: 860 bar = arg; 861 if (bar > BAR_5) 862 goto ret; 863 if (is_am654_pci_dev(pdev) && bar == BAR_0) 864 goto ret; 865 ret = pci_endpoint_test_bar(test, bar); 866 break; 867 case PCITEST_BARS: 868 ret = pci_endpoint_test_bars(test); 869 break; 870 case PCITEST_INTX_IRQ: 871 ret = pci_endpoint_test_intx_irq(test); 872 break; 873 case PCITEST_MSI: 874 case PCITEST_MSIX: 875 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX); 876 break; 877 case PCITEST_WRITE: 878 ret = pci_endpoint_test_write(test, arg); 879 break; 880 case PCITEST_READ: 881 ret = pci_endpoint_test_read(test, arg); 882 break; 883 case PCITEST_COPY: 884 ret = pci_endpoint_test_copy(test, arg); 885 break; 886 case PCITEST_SET_IRQTYPE: 887 ret = pci_endpoint_test_set_irq(test, arg); 888 break; 889 case PCITEST_GET_IRQTYPE: 890 ret = test->irq_type; 891 break; 892 case PCITEST_CLEAR_IRQ: 893 ret = pci_endpoint_test_clear_irq(test); 894 break; 895 } 896 897 ret: 898 mutex_unlock(&test->mutex); 899 return ret; 900 } 901 902 static const struct file_operations pci_endpoint_test_fops = { 903 .owner = THIS_MODULE, 904 .unlocked_ioctl = pci_endpoint_test_ioctl, 905 }; 906 907 static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test) 908 { 909 struct pci_dev *pdev = test->pdev; 910 struct device *dev = &pdev->dev; 911 912 test->ep_caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS); 913 dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", test->ep_caps); 914 915 /* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */ 916 if (test->ep_caps & CAP_UNALIGNED_ACCESS) 917 test->alignment = 0; 918 } 919 920 static int pci_endpoint_test_probe(struct pci_dev *pdev, 921 const struct pci_device_id *ent) 922 { 923 int ret; 924 int id; 925 char name[29]; 926 enum pci_barno bar; 927 void __iomem *base; 928 struct device *dev = &pdev->dev; 929 struct pci_endpoint_test *test; 930 struct pci_endpoint_test_data *data; 931 enum pci_barno test_reg_bar = BAR_0; 932 struct miscdevice *misc_device; 933 934 if (pci_is_bridge(pdev)) 935 return -ENODEV; 936 937 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL); 938 if (!test) 939 return -ENOMEM; 940 941 test->test_reg_bar = 0; 942 test->alignment = 0; 943 test->pdev = pdev; 944 test->irq_type = PCITEST_IRQ_TYPE_UNDEFINED; 945 946 data = (struct pci_endpoint_test_data *)ent->driver_data; 947 if (data) { 948 test_reg_bar = data->test_reg_bar; 949 test->test_reg_bar = test_reg_bar; 950 test->alignment = data->alignment; 951 test->irq_type = data->irq_type; 952 } 953 954 init_completion(&test->irq_raised); 955 mutex_init(&test->mutex); 956 957 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); 958 959 ret = pci_enable_device(pdev); 960 if (ret) { 961 dev_err(dev, "Cannot enable PCI device\n"); 962 return ret; 963 } 964 965 ret = pci_request_regions(pdev, DRV_MODULE_NAME); 966 if (ret) { 967 dev_err(dev, "Cannot obtain PCI resources\n"); 968 goto err_disable_pdev; 969 } 970 971 pci_set_master(pdev); 972 973 ret = pci_endpoint_test_alloc_irq_vectors(test, test->irq_type); 974 if (ret) 975 goto err_disable_irq; 976 977 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 978 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 979 base = pci_ioremap_bar(pdev, bar); 980 if (!base) { 981 dev_err(dev, "Failed to read BAR%d\n", bar); 982 WARN_ON(bar == test_reg_bar); 983 } 984 test->bar[bar] = base; 985 } 986 } 987 988 test->base = test->bar[test_reg_bar]; 989 if (!test->base) { 990 ret = -ENOMEM; 991 dev_err(dev, "Cannot perform PCI test without BAR%d\n", 992 test_reg_bar); 993 goto err_iounmap; 994 } 995 996 pci_set_drvdata(pdev, test); 997 998 id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL); 999 if (id < 0) { 1000 ret = id; 1001 dev_err(dev, "Unable to get id\n"); 1002 goto err_iounmap; 1003 } 1004 1005 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); 1006 test->name = kstrdup(name, GFP_KERNEL); 1007 if (!test->name) { 1008 ret = -ENOMEM; 1009 goto err_ida_remove; 1010 } 1011 1012 ret = pci_endpoint_test_request_irq(test); 1013 if (ret) 1014 goto err_kfree_test_name; 1015 1016 pci_endpoint_test_get_capabilities(test); 1017 1018 misc_device = &test->miscdev; 1019 misc_device->minor = MISC_DYNAMIC_MINOR; 1020 misc_device->name = kstrdup(name, GFP_KERNEL); 1021 if (!misc_device->name) { 1022 ret = -ENOMEM; 1023 goto err_release_irq; 1024 } 1025 misc_device->parent = &pdev->dev; 1026 misc_device->fops = &pci_endpoint_test_fops; 1027 1028 ret = misc_register(misc_device); 1029 if (ret) { 1030 dev_err(dev, "Failed to register device\n"); 1031 goto err_kfree_name; 1032 } 1033 1034 return 0; 1035 1036 err_kfree_name: 1037 kfree(misc_device->name); 1038 1039 err_release_irq: 1040 pci_endpoint_test_release_irq(test); 1041 1042 err_kfree_test_name: 1043 kfree(test->name); 1044 1045 err_ida_remove: 1046 ida_free(&pci_endpoint_test_ida, id); 1047 1048 err_iounmap: 1049 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 1050 if (test->bar[bar]) 1051 pci_iounmap(pdev, test->bar[bar]); 1052 } 1053 1054 err_disable_irq: 1055 pci_endpoint_test_free_irq_vectors(test); 1056 pci_release_regions(pdev); 1057 1058 err_disable_pdev: 1059 pci_disable_device(pdev); 1060 1061 return ret; 1062 } 1063 1064 static void pci_endpoint_test_remove(struct pci_dev *pdev) 1065 { 1066 int id; 1067 enum pci_barno bar; 1068 struct pci_endpoint_test *test = pci_get_drvdata(pdev); 1069 struct miscdevice *misc_device = &test->miscdev; 1070 1071 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) 1072 return; 1073 if (id < 0) 1074 return; 1075 1076 pci_endpoint_test_release_irq(test); 1077 pci_endpoint_test_free_irq_vectors(test); 1078 1079 misc_deregister(&test->miscdev); 1080 kfree(misc_device->name); 1081 kfree(test->name); 1082 ida_free(&pci_endpoint_test_ida, id); 1083 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 1084 if (test->bar[bar]) 1085 pci_iounmap(pdev, test->bar[bar]); 1086 } 1087 1088 pci_release_regions(pdev); 1089 pci_disable_device(pdev); 1090 } 1091 1092 static const struct pci_endpoint_test_data default_data = { 1093 .test_reg_bar = BAR_0, 1094 .alignment = SZ_4K, 1095 .irq_type = PCITEST_IRQ_TYPE_MSI, 1096 }; 1097 1098 static const struct pci_endpoint_test_data am654_data = { 1099 .test_reg_bar = BAR_2, 1100 .alignment = SZ_64K, 1101 .irq_type = PCITEST_IRQ_TYPE_MSI, 1102 }; 1103 1104 static const struct pci_endpoint_test_data j721e_data = { 1105 .alignment = 256, 1106 .irq_type = PCITEST_IRQ_TYPE_MSI, 1107 }; 1108 1109 static const struct pci_endpoint_test_data rk3588_data = { 1110 .alignment = SZ_64K, 1111 .irq_type = PCITEST_IRQ_TYPE_MSI, 1112 }; 1113 1114 /* 1115 * If the controller's Vendor/Device ID are programmable, you may be able to 1116 * use one of the existing entries for testing instead of adding a new one. 1117 */ 1118 static const struct pci_device_id pci_endpoint_test_tbl[] = { 1119 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x), 1120 .driver_data = (kernel_ulong_t)&default_data, 1121 }, 1122 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x), 1123 .driver_data = (kernel_ulong_t)&default_data, 1124 }, 1125 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0), 1126 .driver_data = (kernel_ulong_t)&default_data, 1127 }, 1128 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),}, 1129 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A), 1130 .driver_data = (kernel_ulong_t)&default_data, 1131 }, 1132 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) }, 1133 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), 1134 .driver_data = (kernel_ulong_t)&am654_data 1135 }, 1136 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),}, 1137 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),}, 1138 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),}, 1139 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),}, 1140 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0), 1141 .driver_data = (kernel_ulong_t)&default_data, 1142 }, 1143 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), 1144 .driver_data = (kernel_ulong_t)&j721e_data, 1145 }, 1146 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200), 1147 .driver_data = (kernel_ulong_t)&j721e_data, 1148 }, 1149 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64), 1150 .driver_data = (kernel_ulong_t)&j721e_data, 1151 }, 1152 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2), 1153 .driver_data = (kernel_ulong_t)&j721e_data, 1154 }, 1155 { PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588), 1156 .driver_data = (kernel_ulong_t)&rk3588_data, 1157 }, 1158 { } 1159 }; 1160 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); 1161 1162 static struct pci_driver pci_endpoint_test_driver = { 1163 .name = DRV_MODULE_NAME, 1164 .id_table = pci_endpoint_test_tbl, 1165 .probe = pci_endpoint_test_probe, 1166 .remove = pci_endpoint_test_remove, 1167 .sriov_configure = pci_sriov_configure_simple, 1168 }; 1169 module_pci_driver(pci_endpoint_test_driver); 1170 1171 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER"); 1172 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 1173 MODULE_LICENSE("GPL v2"); 1174