1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for Xilinx Versal CPM DMA Bridge 4 * 5 * (C) Copyright 2019 - 2020, Xilinx, Inc. 6 */ 7 8 #include <linux/bitfield.h> 9 #include <linux/interrupt.h> 10 #include <linux/irq.h> 11 #include <linux/irqchip.h> 12 #include <linux/irqchip/chained_irq.h> 13 #include <linux/irqdomain.h> 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/of_address.h> 17 #include <linux/of_pci.h> 18 #include <linux/of_platform.h> 19 20 #include "../pci.h" 21 #include "pcie-xilinx-common.h" 22 23 /* Register definitions */ 24 #define XILINX_CPM_PCIE_REG_IDR 0x00000E10 25 #define XILINX_CPM_PCIE_REG_IMR 0x00000E14 26 #define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C 27 #define XILINX_CPM_PCIE_REG_RPSC 0x00000E20 28 #define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C 29 #define XILINX_CPM_PCIE_REG_IDRN 0x00000E38 30 #define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C 31 #define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340 32 #define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 33 #define XILINX_CPM_PCIE0_MISC_IR_LOCAL BIT(1) 34 #define XILINX_CPM_PCIE1_MISC_IR_LOCAL BIT(2) 35 36 #define XILINX_CPM_PCIE0_IR_STATUS 0x000002A0 37 #define XILINX_CPM_PCIE1_IR_STATUS 0x000002B4 38 #define XILINX_CPM_PCIE0_IR_ENABLE 0x000002A8 39 #define XILINX_CPM_PCIE1_IR_ENABLE 0x000002BC 40 #define XILINX_CPM_PCIE_IR_LOCAL BIT(0) 41 42 #define IMR(x) BIT(XILINX_PCIE_INTR_ ##x) 43 44 #define XILINX_CPM_PCIE_IMR_ALL_MASK \ 45 ( \ 46 IMR(LINK_DOWN) | \ 47 IMR(HOT_RESET) | \ 48 IMR(CFG_PCIE_TIMEOUT) | \ 49 IMR(CFG_TIMEOUT) | \ 50 IMR(CORRECTABLE) | \ 51 IMR(NONFATAL) | \ 52 IMR(FATAL) | \ 53 IMR(CFG_ERR_POISON) | \ 54 IMR(PME_TO_ACK_RCVD) | \ 55 IMR(INTX) | \ 56 IMR(PM_PME_RCVD) | \ 57 IMR(SLV_UNSUPP) | \ 58 IMR(SLV_UNEXP) | \ 59 IMR(SLV_COMPL) | \ 60 IMR(SLV_ERRP) | \ 61 IMR(SLV_CMPABT) | \ 62 IMR(SLV_ILLBUR) | \ 63 IMR(MST_DECERR) | \ 64 IMR(MST_SLVERR) | \ 65 IMR(SLV_PCIE_TIMEOUT) \ 66 ) 67 68 #define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF 69 #define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16) 70 #define XILINX_CPM_PCIE_IDRN_SHIFT 16 71 72 /* Root Port Error FIFO Read Register definitions */ 73 #define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18) 74 #define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0) 75 #define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF 76 77 /* Root Port Status/control Register definitions */ 78 #define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0) 79 80 /* Phy Status/Control Register definitions */ 81 #define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) 82 83 enum xilinx_cpm_version { 84 CPM, 85 CPM5, 86 CPM5_HOST1, 87 CPM5NC_HOST, 88 }; 89 90 /** 91 * struct xilinx_cpm_variant - CPM variant information 92 * @version: CPM version 93 * @ir_status: Offset for the error interrupt status register 94 * @ir_enable: Offset for the CPM5 local error interrupt enable register 95 * @ir_misc_value: A bitmask for the miscellaneous interrupt status 96 */ 97 struct xilinx_cpm_variant { 98 enum xilinx_cpm_version version; 99 u32 ir_status; 100 u32 ir_enable; 101 u32 ir_misc_value; 102 }; 103 104 /** 105 * struct xilinx_cpm_pcie - PCIe port information 106 * @dev: Device pointer 107 * @reg_base: Bridge Register Base 108 * @cpm_base: CPM System Level Control and Status Register(SLCR) Base 109 * @intx_domain: Legacy IRQ domain pointer 110 * @cpm_domain: CPM IRQ domain pointer 111 * @cfg: Holds mappings of config space window 112 * @intx_irq: legacy interrupt number 113 * @irq: Error interrupt number 114 * @lock: lock protecting shared register access 115 * @variant: CPM version check pointer 116 */ 117 struct xilinx_cpm_pcie { 118 struct device *dev; 119 void __iomem *reg_base; 120 void __iomem *cpm_base; 121 struct irq_domain *intx_domain; 122 struct irq_domain *cpm_domain; 123 struct pci_config_window *cfg; 124 int intx_irq; 125 int irq; 126 raw_spinlock_t lock; 127 const struct xilinx_cpm_variant *variant; 128 }; 129 130 static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg) 131 { 132 return readl_relaxed(port->reg_base + reg); 133 } 134 135 static void pcie_write(struct xilinx_cpm_pcie *port, 136 u32 val, u32 reg) 137 { 138 writel_relaxed(val, port->reg_base + reg); 139 } 140 141 static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port) 142 { 143 return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) & 144 XILINX_CPM_PCIE_REG_PSCR_LNKUP); 145 } 146 147 static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port) 148 { 149 unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR); 150 151 if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) { 152 dev_dbg(port->dev, "Requester ID %lu\n", 153 val & XILINX_CPM_PCIE_RPEFR_REQ_ID); 154 pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK, 155 XILINX_CPM_PCIE_REG_RPEFR); 156 } 157 } 158 159 static void xilinx_cpm_mask_leg_irq(struct irq_data *data) 160 { 161 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); 162 unsigned long flags; 163 u32 mask; 164 u32 val; 165 166 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); 167 raw_spin_lock_irqsave(&port->lock, flags); 168 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); 169 pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK); 170 raw_spin_unlock_irqrestore(&port->lock, flags); 171 } 172 173 static void xilinx_cpm_unmask_leg_irq(struct irq_data *data) 174 { 175 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data); 176 unsigned long flags; 177 u32 mask; 178 u32 val; 179 180 mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); 181 raw_spin_lock_irqsave(&port->lock, flags); 182 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); 183 pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK); 184 raw_spin_unlock_irqrestore(&port->lock, flags); 185 } 186 187 static struct irq_chip xilinx_cpm_leg_irq_chip = { 188 .name = "INTx", 189 .irq_mask = xilinx_cpm_mask_leg_irq, 190 .irq_unmask = xilinx_cpm_unmask_leg_irq, 191 }; 192 193 /** 194 * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid 195 * @domain: IRQ domain 196 * @irq: Virtual IRQ number 197 * @hwirq: HW interrupt number 198 * 199 * Return: Always returns 0. 200 */ 201 static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain, 202 unsigned int irq, irq_hw_number_t hwirq) 203 { 204 irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip, 205 handle_level_irq); 206 irq_set_chip_data(irq, domain->host_data); 207 irq_set_status_flags(irq, IRQ_LEVEL); 208 209 return 0; 210 } 211 212 /* INTx IRQ Domain operations */ 213 static const struct irq_domain_ops intx_domain_ops = { 214 .map = xilinx_cpm_pcie_intx_map, 215 }; 216 217 static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) 218 { 219 struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); 220 struct irq_chip *chip = irq_desc_get_chip(desc); 221 unsigned long val; 222 int i; 223 224 chained_irq_enter(chip, desc); 225 226 val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK, 227 pcie_read(port, XILINX_CPM_PCIE_REG_IDRN)); 228 229 for_each_set_bit(i, &val, PCI_NUM_INTX) 230 generic_handle_domain_irq(port->intx_domain, i); 231 232 chained_irq_exit(chip, desc); 233 } 234 235 static void xilinx_cpm_mask_event_irq(struct irq_data *d) 236 { 237 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); 238 u32 val; 239 240 raw_spin_lock(&port->lock); 241 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 242 val &= ~BIT(d->hwirq); 243 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); 244 raw_spin_unlock(&port->lock); 245 } 246 247 static void xilinx_cpm_unmask_event_irq(struct irq_data *d) 248 { 249 struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d); 250 u32 val; 251 252 raw_spin_lock(&port->lock); 253 val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 254 val |= BIT(d->hwirq); 255 pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); 256 raw_spin_unlock(&port->lock); 257 } 258 259 static struct irq_chip xilinx_cpm_event_irq_chip = { 260 .name = "RC-Event", 261 .irq_mask = xilinx_cpm_mask_event_irq, 262 .irq_unmask = xilinx_cpm_unmask_event_irq, 263 }; 264 265 static int xilinx_cpm_pcie_event_map(struct irq_domain *domain, 266 unsigned int irq, irq_hw_number_t hwirq) 267 { 268 irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip, 269 handle_level_irq); 270 irq_set_chip_data(irq, domain->host_data); 271 irq_set_status_flags(irq, IRQ_LEVEL); 272 return 0; 273 } 274 275 static const struct irq_domain_ops event_domain_ops = { 276 .map = xilinx_cpm_pcie_event_map, 277 }; 278 279 static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) 280 { 281 struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc); 282 struct irq_chip *chip = irq_desc_get_chip(desc); 283 const struct xilinx_cpm_variant *variant = port->variant; 284 unsigned long val; 285 int i; 286 287 chained_irq_enter(chip, desc); 288 val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR); 289 val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR); 290 for_each_set_bit(i, &val, 32) 291 generic_handle_domain_irq(port->cpm_domain, i); 292 pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); 293 294 if (variant->ir_status) { 295 val = readl_relaxed(port->cpm_base + variant->ir_status); 296 if (val) 297 writel_relaxed(val, port->cpm_base + 298 variant->ir_status); 299 } 300 301 /* 302 * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to 303 * CPM SLCR block. 304 */ 305 val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); 306 if (val) 307 writel_relaxed(val, 308 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); 309 310 chained_irq_exit(chip, desc); 311 } 312 313 #define _IC(x, s) \ 314 [XILINX_PCIE_INTR_ ## x] = { __stringify(x), s } 315 316 static const struct { 317 const char *sym; 318 const char *str; 319 } intr_cause[32] = { 320 _IC(LINK_DOWN, "Link Down"), 321 _IC(HOT_RESET, "Hot reset"), 322 _IC(CFG_TIMEOUT, "ECAM access timeout"), 323 _IC(CORRECTABLE, "Correctable error message"), 324 _IC(NONFATAL, "Non fatal error message"), 325 _IC(FATAL, "Fatal error message"), 326 _IC(SLV_UNSUPP, "Slave unsupported request"), 327 _IC(SLV_UNEXP, "Slave unexpected completion"), 328 _IC(SLV_COMPL, "Slave completion timeout"), 329 _IC(SLV_ERRP, "Slave Error Poison"), 330 _IC(SLV_CMPABT, "Slave Completer Abort"), 331 _IC(SLV_ILLBUR, "Slave Illegal Burst"), 332 _IC(MST_DECERR, "Master decode error"), 333 _IC(MST_SLVERR, "Master slave error"), 334 _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"), 335 _IC(CFG_ERR_POISON, "ECAM poisoned completion received"), 336 _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), 337 _IC(PM_PME_RCVD, "PM_PME message received"), 338 _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"), 339 }; 340 341 static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) 342 { 343 struct xilinx_cpm_pcie *port = dev_id; 344 struct device *dev = port->dev; 345 struct irq_data *d; 346 347 d = irq_domain_get_irq_data(port->cpm_domain, irq); 348 349 switch (d->hwirq) { 350 case XILINX_PCIE_INTR_CORRECTABLE: 351 case XILINX_PCIE_INTR_NONFATAL: 352 case XILINX_PCIE_INTR_FATAL: 353 cpm_pcie_clear_err_interrupts(port); 354 fallthrough; 355 356 default: 357 if (intr_cause[d->hwirq].str) 358 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); 359 else 360 dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq); 361 } 362 363 return IRQ_HANDLED; 364 } 365 366 static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port) 367 { 368 if (port->intx_domain) { 369 irq_domain_remove(port->intx_domain); 370 port->intx_domain = NULL; 371 } 372 373 if (port->cpm_domain) { 374 irq_domain_remove(port->cpm_domain); 375 port->cpm_domain = NULL; 376 } 377 } 378 379 /** 380 * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain 381 * @port: PCIe port information 382 * 383 * Return: '0' on success and error value on failure 384 */ 385 static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port) 386 { 387 struct device *dev = port->dev; 388 struct device_node *node = dev->of_node; 389 struct device_node *pcie_intc_node; 390 391 /* Setup INTx */ 392 pcie_intc_node = of_get_next_child(node, NULL); 393 if (!pcie_intc_node) { 394 dev_err(dev, "No PCIe Intc node found\n"); 395 return -EINVAL; 396 } 397 398 port->cpm_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), 32, 399 &event_domain_ops, port); 400 if (!port->cpm_domain) 401 goto out; 402 403 irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); 404 405 port->intx_domain = irq_domain_create_linear(of_fwnode_handle(pcie_intc_node), PCI_NUM_INTX, 406 &intx_domain_ops, port); 407 if (!port->intx_domain) 408 goto out; 409 410 irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); 411 412 of_node_put(pcie_intc_node); 413 raw_spin_lock_init(&port->lock); 414 415 return 0; 416 out: 417 xilinx_cpm_free_irq_domains(port); 418 of_node_put(pcie_intc_node); 419 dev_err(dev, "Failed to allocate IRQ domains\n"); 420 421 return -ENOMEM; 422 } 423 424 static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port) 425 { 426 struct device *dev = port->dev; 427 struct platform_device *pdev = to_platform_device(dev); 428 int i, irq; 429 430 port->irq = platform_get_irq(pdev, 0); 431 if (port->irq < 0) 432 return port->irq; 433 434 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { 435 int err; 436 437 if (!intr_cause[i].str) 438 continue; 439 440 irq = irq_create_mapping(port->cpm_domain, i); 441 if (!irq) { 442 dev_err(dev, "Failed to map interrupt\n"); 443 return -ENXIO; 444 } 445 446 err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler, 447 0, intr_cause[i].sym, port); 448 if (err) { 449 dev_err(dev, "Failed to request IRQ %d\n", irq); 450 return err; 451 } 452 } 453 454 port->intx_irq = irq_create_mapping(port->cpm_domain, 455 XILINX_PCIE_INTR_INTX); 456 if (!port->intx_irq) { 457 dev_err(dev, "Failed to map INTx interrupt\n"); 458 return -ENXIO; 459 } 460 461 /* Plug the INTx chained handler */ 462 irq_set_chained_handler_and_data(port->intx_irq, 463 xilinx_cpm_pcie_intx_flow, port); 464 465 /* Plug the main event chained handler */ 466 irq_set_chained_handler_and_data(port->irq, 467 xilinx_cpm_pcie_event_flow, port); 468 469 return 0; 470 } 471 472 /** 473 * xilinx_cpm_pcie_init_port - Initialize hardware 474 * @port: PCIe port information 475 */ 476 static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port) 477 { 478 const struct xilinx_cpm_variant *variant = port->variant; 479 480 if (variant->version == CPM5NC_HOST) 481 return; 482 483 if (cpm_pcie_link_up(port)) 484 dev_info(port->dev, "PCIe Link is UP\n"); 485 else 486 dev_info(port->dev, "PCIe Link is DOWN\n"); 487 488 /* Disable all interrupts */ 489 pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK, 490 XILINX_CPM_PCIE_REG_IMR); 491 492 /* Clear pending interrupts */ 493 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) & 494 XILINX_CPM_PCIE_IMR_ALL_MASK, 495 XILINX_CPM_PCIE_REG_IDR); 496 497 /* 498 * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to 499 * CPM SLCR block. 500 */ 501 writel(variant->ir_misc_value, 502 port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); 503 504 if (variant->ir_enable) { 505 writel(XILINX_CPM_PCIE_IR_LOCAL, 506 port->cpm_base + variant->ir_enable); 507 } 508 509 /* Set Bridge enable bit */ 510 pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | 511 XILINX_CPM_PCIE_REG_RPSC_BEN, 512 XILINX_CPM_PCIE_REG_RPSC); 513 } 514 515 /** 516 * xilinx_cpm_pcie_parse_dt - Parse Device tree 517 * @port: PCIe port information 518 * @bus_range: Bus resource 519 * 520 * Return: '0' on success and error value on failure 521 */ 522 static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port, 523 struct resource *bus_range) 524 { 525 struct device *dev = port->dev; 526 struct platform_device *pdev = to_platform_device(dev); 527 struct resource *res; 528 529 port->cpm_base = devm_platform_ioremap_resource_byname(pdev, 530 "cpm_slcr"); 531 if (IS_ERR(port->cpm_base)) 532 return PTR_ERR(port->cpm_base); 533 534 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); 535 if (!res) 536 return -ENXIO; 537 538 port->cfg = pci_ecam_create(dev, res, bus_range, 539 &pci_generic_ecam_ops); 540 if (IS_ERR(port->cfg)) 541 return PTR_ERR(port->cfg); 542 543 if (port->variant->version == CPM5 || 544 port->variant->version == CPM5_HOST1) { 545 port->reg_base = devm_platform_ioremap_resource_byname(pdev, 546 "cpm_csr"); 547 if (IS_ERR(port->reg_base)) 548 return PTR_ERR(port->reg_base); 549 } else { 550 port->reg_base = port->cfg->win; 551 } 552 553 return 0; 554 } 555 556 static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port) 557 { 558 irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL); 559 irq_set_chained_handler_and_data(port->irq, NULL, NULL); 560 } 561 562 /** 563 * xilinx_cpm_pcie_probe - Probe function 564 * @pdev: Platform device pointer 565 * 566 * Return: '0' on success and error value on failure 567 */ 568 static int xilinx_cpm_pcie_probe(struct platform_device *pdev) 569 { 570 struct xilinx_cpm_pcie *port; 571 struct device *dev = &pdev->dev; 572 struct pci_host_bridge *bridge; 573 struct resource_entry *bus; 574 int err; 575 576 bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); 577 if (!bridge) 578 return -ENODEV; 579 580 port = pci_host_bridge_priv(bridge); 581 582 port->dev = dev; 583 584 port->variant = of_device_get_match_data(dev); 585 586 if (port->variant->version != CPM5NC_HOST) { 587 err = xilinx_cpm_pcie_init_irq_domain(port); 588 if (err) 589 return err; 590 } 591 592 bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); 593 if (!bus) { 594 err = -ENODEV; 595 goto err_free_irq_domains; 596 } 597 598 err = xilinx_cpm_pcie_parse_dt(port, bus->res); 599 if (err) { 600 dev_err(dev, "Parsing DT failed\n"); 601 goto err_free_irq_domains; 602 } 603 604 xilinx_cpm_pcie_init_port(port); 605 606 if (port->variant->version != CPM5NC_HOST) { 607 err = xilinx_cpm_setup_irq(port); 608 if (err) { 609 dev_err(dev, "Failed to set up interrupts\n"); 610 goto err_setup_irq; 611 } 612 } 613 614 bridge->sysdata = port->cfg; 615 bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; 616 617 err = pci_host_probe(bridge); 618 if (err < 0) 619 goto err_host_bridge; 620 621 return 0; 622 623 err_host_bridge: 624 if (port->variant->version != CPM5NC_HOST) 625 xilinx_cpm_free_interrupts(port); 626 err_setup_irq: 627 pci_ecam_free(port->cfg); 628 err_free_irq_domains: 629 if (port->variant->version != CPM5NC_HOST) 630 xilinx_cpm_free_irq_domains(port); 631 return err; 632 } 633 634 static const struct xilinx_cpm_variant cpm_host = { 635 .version = CPM, 636 .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL, 637 }; 638 639 static const struct xilinx_cpm_variant cpm5_host = { 640 .version = CPM5, 641 .ir_misc_value = XILINX_CPM_PCIE0_MISC_IR_LOCAL, 642 .ir_status = XILINX_CPM_PCIE0_IR_STATUS, 643 .ir_enable = XILINX_CPM_PCIE0_IR_ENABLE, 644 }; 645 646 static const struct xilinx_cpm_variant cpm5_host1 = { 647 .version = CPM5_HOST1, 648 .ir_misc_value = XILINX_CPM_PCIE1_MISC_IR_LOCAL, 649 .ir_status = XILINX_CPM_PCIE1_IR_STATUS, 650 .ir_enable = XILINX_CPM_PCIE1_IR_ENABLE, 651 }; 652 653 static const struct xilinx_cpm_variant cpm5n_host = { 654 .version = CPM5NC_HOST, 655 }; 656 657 static const struct of_device_id xilinx_cpm_pcie_of_match[] = { 658 { 659 .compatible = "xlnx,versal-cpm-host-1.00", 660 .data = &cpm_host, 661 }, 662 { 663 .compatible = "xlnx,versal-cpm5-host", 664 .data = &cpm5_host, 665 }, 666 { 667 .compatible = "xlnx,versal-cpm5-host1", 668 .data = &cpm5_host1, 669 }, 670 { 671 .compatible = "xlnx,versal-cpm5nc-host", 672 .data = &cpm5n_host, 673 }, 674 {} 675 }; 676 677 static struct platform_driver xilinx_cpm_pcie_driver = { 678 .driver = { 679 .name = "xilinx-cpm-pcie", 680 .of_match_table = xilinx_cpm_pcie_of_match, 681 .suppress_bind_attrs = true, 682 }, 683 .probe = xilinx_cpm_pcie_probe, 684 }; 685 686 builtin_platform_driver(xilinx_cpm_pcie_driver); 687