1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * PCIe host controller driver for AMD MDB PCIe Bridge
4 *
5 * Copyright (C) 2024-2025, Advanced Micro Devices, Inc.
6 */
7
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/gpio.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqdomain.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/of_device.h>
16 #include <linux/pci.h>
17 #include <linux/platform_device.h>
18 #include <linux/resource.h>
19 #include <linux/types.h>
20
21 #include "pcie-designware.h"
22
23 #define AMD_MDB_TLP_IR_STATUS_MISC 0x4C0
24 #define AMD_MDB_TLP_IR_MASK_MISC 0x4C4
25 #define AMD_MDB_TLP_IR_ENABLE_MISC 0x4C8
26 #define AMD_MDB_TLP_IR_DISABLE_MISC 0x4CC
27
28 #define AMD_MDB_TLP_PCIE_INTX_MASK GENMASK(23, 16)
29
30 #define AMD_MDB_PCIE_INTR_INTX_ASSERT(x) BIT((x) * 2)
31
32 /* Interrupt registers definitions. */
33 #define AMD_MDB_PCIE_INTR_CMPL_TIMEOUT 15
34 #define AMD_MDB_PCIE_INTR_INTX 16
35 #define AMD_MDB_PCIE_INTR_PM_PME_RCVD 24
36 #define AMD_MDB_PCIE_INTR_PME_TO_ACK_RCVD 25
37 #define AMD_MDB_PCIE_INTR_MISC_CORRECTABLE 26
38 #define AMD_MDB_PCIE_INTR_NONFATAL 27
39 #define AMD_MDB_PCIE_INTR_FATAL 28
40
41 #define IMR(x) BIT(AMD_MDB_PCIE_INTR_ ##x)
42 #define AMD_MDB_PCIE_IMR_ALL_MASK \
43 ( \
44 IMR(CMPL_TIMEOUT) | \
45 IMR(PM_PME_RCVD) | \
46 IMR(PME_TO_ACK_RCVD) | \
47 IMR(MISC_CORRECTABLE) | \
48 IMR(NONFATAL) | \
49 IMR(FATAL) | \
50 AMD_MDB_TLP_PCIE_INTX_MASK \
51 )
52
53 /**
54 * struct amd_mdb_pcie - PCIe port information
55 * @pci: DesignWare PCIe controller structure
56 * @slcr: MDB System Level Control and Status Register (SLCR) base
57 * @intx_domain: INTx IRQ domain pointer
58 * @mdb_domain: MDB IRQ domain pointer
59 * @intx_irq: INTx IRQ interrupt number
60 */
61 struct amd_mdb_pcie {
62 struct dw_pcie pci;
63 void __iomem *slcr;
64 struct irq_domain *intx_domain;
65 struct irq_domain *mdb_domain;
66 int intx_irq;
67 };
68
69 static const struct dw_pcie_host_ops amd_mdb_pcie_host_ops = {
70 };
71
amd_mdb_intx_irq_mask(struct irq_data * data)72 static void amd_mdb_intx_irq_mask(struct irq_data *data)
73 {
74 struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
75 struct dw_pcie *pci = &pcie->pci;
76 struct dw_pcie_rp *port = &pci->pp;
77 unsigned long flags;
78 u32 val;
79
80 raw_spin_lock_irqsave(&port->lock, flags);
81 val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
82 AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
83
84 /*
85 * Writing '1' to a bit in AMD_MDB_TLP_IR_DISABLE_MISC disables that
86 * interrupt, writing '0' has no effect.
87 */
88 writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
89 raw_spin_unlock_irqrestore(&port->lock, flags);
90 }
91
amd_mdb_intx_irq_unmask(struct irq_data * data)92 static void amd_mdb_intx_irq_unmask(struct irq_data *data)
93 {
94 struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(data);
95 struct dw_pcie *pci = &pcie->pci;
96 struct dw_pcie_rp *port = &pci->pp;
97 unsigned long flags;
98 u32 val;
99
100 raw_spin_lock_irqsave(&port->lock, flags);
101 val = FIELD_PREP(AMD_MDB_TLP_PCIE_INTX_MASK,
102 AMD_MDB_PCIE_INTR_INTX_ASSERT(data->hwirq));
103
104 /*
105 * Writing '1' to a bit in AMD_MDB_TLP_IR_ENABLE_MISC enables that
106 * interrupt, writing '0' has no effect.
107 */
108 writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
109 raw_spin_unlock_irqrestore(&port->lock, flags);
110 }
111
112 static struct irq_chip amd_mdb_intx_irq_chip = {
113 .name = "AMD MDB INTx",
114 .irq_mask = amd_mdb_intx_irq_mask,
115 .irq_unmask = amd_mdb_intx_irq_unmask,
116 };
117
118 /**
119 * amd_mdb_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
120 * @domain: IRQ domain
121 * @irq: Virtual IRQ number
122 * @hwirq: Hardware interrupt number
123 *
124 * Return: Always returns '0'.
125 */
amd_mdb_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)126 static int amd_mdb_pcie_intx_map(struct irq_domain *domain,
127 unsigned int irq, irq_hw_number_t hwirq)
128 {
129 irq_set_chip_and_handler(irq, &amd_mdb_intx_irq_chip,
130 handle_level_irq);
131 irq_set_chip_data(irq, domain->host_data);
132 irq_set_status_flags(irq, IRQ_LEVEL);
133
134 return 0;
135 }
136
137 /* INTx IRQ domain operations. */
138 static const struct irq_domain_ops amd_intx_domain_ops = {
139 .map = amd_mdb_pcie_intx_map,
140 };
141
dw_pcie_rp_intx(int irq,void * args)142 static irqreturn_t dw_pcie_rp_intx(int irq, void *args)
143 {
144 struct amd_mdb_pcie *pcie = args;
145 unsigned long val;
146 int i, int_status;
147
148 val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
149 int_status = FIELD_GET(AMD_MDB_TLP_PCIE_INTX_MASK, val);
150
151 for (i = 0; i < PCI_NUM_INTX; i++) {
152 if (int_status & AMD_MDB_PCIE_INTR_INTX_ASSERT(i))
153 generic_handle_domain_irq(pcie->intx_domain, i);
154 }
155
156 return IRQ_HANDLED;
157 }
158
159 #define _IC(x, s)[AMD_MDB_PCIE_INTR_ ## x] = { __stringify(x), s }
160
161 static const struct {
162 const char *sym;
163 const char *str;
164 } intr_cause[32] = {
165 _IC(CMPL_TIMEOUT, "Completion timeout"),
166 _IC(PM_PME_RCVD, "PM_PME message received"),
167 _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"),
168 _IC(MISC_CORRECTABLE, "Correctable error message"),
169 _IC(NONFATAL, "Non fatal error message"),
170 _IC(FATAL, "Fatal error message"),
171 };
172
amd_mdb_event_irq_mask(struct irq_data * d)173 static void amd_mdb_event_irq_mask(struct irq_data *d)
174 {
175 struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
176 struct dw_pcie *pci = &pcie->pci;
177 struct dw_pcie_rp *port = &pci->pp;
178 unsigned long flags;
179 u32 val;
180
181 raw_spin_lock_irqsave(&port->lock, flags);
182 val = BIT(d->hwirq);
183 writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
184 raw_spin_unlock_irqrestore(&port->lock, flags);
185 }
186
amd_mdb_event_irq_unmask(struct irq_data * d)187 static void amd_mdb_event_irq_unmask(struct irq_data *d)
188 {
189 struct amd_mdb_pcie *pcie = irq_data_get_irq_chip_data(d);
190 struct dw_pcie *pci = &pcie->pci;
191 struct dw_pcie_rp *port = &pci->pp;
192 unsigned long flags;
193 u32 val;
194
195 raw_spin_lock_irqsave(&port->lock, flags);
196 val = BIT(d->hwirq);
197 writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
198 raw_spin_unlock_irqrestore(&port->lock, flags);
199 }
200
201 static struct irq_chip amd_mdb_event_irq_chip = {
202 .name = "AMD MDB RC-Event",
203 .irq_mask = amd_mdb_event_irq_mask,
204 .irq_unmask = amd_mdb_event_irq_unmask,
205 };
206
amd_mdb_pcie_event_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)207 static int amd_mdb_pcie_event_map(struct irq_domain *domain,
208 unsigned int irq, irq_hw_number_t hwirq)
209 {
210 irq_set_chip_and_handler(irq, &amd_mdb_event_irq_chip,
211 handle_level_irq);
212 irq_set_chip_data(irq, domain->host_data);
213 irq_set_status_flags(irq, IRQ_LEVEL);
214
215 return 0;
216 }
217
218 static const struct irq_domain_ops event_domain_ops = {
219 .map = amd_mdb_pcie_event_map,
220 };
221
amd_mdb_pcie_event(int irq,void * args)222 static irqreturn_t amd_mdb_pcie_event(int irq, void *args)
223 {
224 struct amd_mdb_pcie *pcie = args;
225 unsigned long val;
226 int i;
227
228 val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
229 val &= ~readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_MASK_MISC);
230 for_each_set_bit(i, &val, 32)
231 generic_handle_domain_irq(pcie->mdb_domain, i);
232 writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
233
234 return IRQ_HANDLED;
235 }
236
amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie * pcie)237 static void amd_mdb_pcie_free_irq_domains(struct amd_mdb_pcie *pcie)
238 {
239 if (pcie->intx_domain) {
240 irq_domain_remove(pcie->intx_domain);
241 pcie->intx_domain = NULL;
242 }
243
244 if (pcie->mdb_domain) {
245 irq_domain_remove(pcie->mdb_domain);
246 pcie->mdb_domain = NULL;
247 }
248 }
249
amd_mdb_pcie_init_port(struct amd_mdb_pcie * pcie)250 static int amd_mdb_pcie_init_port(struct amd_mdb_pcie *pcie)
251 {
252 unsigned long val;
253
254 /* Disable all TLP interrupts. */
255 writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
256 pcie->slcr + AMD_MDB_TLP_IR_DISABLE_MISC);
257
258 /* Clear pending TLP interrupts. */
259 val = readl_relaxed(pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
260 val &= AMD_MDB_PCIE_IMR_ALL_MASK;
261 writel_relaxed(val, pcie->slcr + AMD_MDB_TLP_IR_STATUS_MISC);
262
263 /* Enable all TLP interrupts. */
264 writel_relaxed(AMD_MDB_PCIE_IMR_ALL_MASK,
265 pcie->slcr + AMD_MDB_TLP_IR_ENABLE_MISC);
266
267 return 0;
268 }
269
270 /**
271 * amd_mdb_pcie_init_irq_domains - Initialize IRQ domain
272 * @pcie: PCIe port information
273 * @pdev: Platform device
274 *
275 * Return: Returns '0' on success and error value on failure.
276 */
amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie * pcie,struct platform_device * pdev)277 static int amd_mdb_pcie_init_irq_domains(struct amd_mdb_pcie *pcie,
278 struct platform_device *pdev)
279 {
280 struct dw_pcie *pci = &pcie->pci;
281 struct dw_pcie_rp *pp = &pci->pp;
282 struct device *dev = &pdev->dev;
283 struct device_node *node = dev->of_node;
284 struct device_node *pcie_intc_node;
285 int err;
286
287 pcie_intc_node = of_get_next_child(node, NULL);
288 if (!pcie_intc_node) {
289 dev_err(dev, "No PCIe Intc node found\n");
290 return -ENODEV;
291 }
292
293 pcie->mdb_domain = irq_domain_add_linear(pcie_intc_node, 32,
294 &event_domain_ops, pcie);
295 if (!pcie->mdb_domain) {
296 err = -ENOMEM;
297 dev_err(dev, "Failed to add MDB domain\n");
298 goto out;
299 }
300
301 irq_domain_update_bus_token(pcie->mdb_domain, DOMAIN_BUS_NEXUS);
302
303 pcie->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
304 &amd_intx_domain_ops, pcie);
305 if (!pcie->intx_domain) {
306 err = -ENOMEM;
307 dev_err(dev, "Failed to add INTx domain\n");
308 goto mdb_out;
309 }
310
311 of_node_put(pcie_intc_node);
312 irq_domain_update_bus_token(pcie->intx_domain, DOMAIN_BUS_WIRED);
313
314 raw_spin_lock_init(&pp->lock);
315
316 return 0;
317 mdb_out:
318 amd_mdb_pcie_free_irq_domains(pcie);
319 out:
320 of_node_put(pcie_intc_node);
321 return err;
322 }
323
amd_mdb_pcie_intr_handler(int irq,void * args)324 static irqreturn_t amd_mdb_pcie_intr_handler(int irq, void *args)
325 {
326 struct amd_mdb_pcie *pcie = args;
327 struct device *dev;
328 struct irq_data *d;
329
330 dev = pcie->pci.dev;
331
332 /*
333 * In the future, error reporting will be hooked to the AER subsystem.
334 * Currently, the driver prints a warning message to the user.
335 */
336 d = irq_domain_get_irq_data(pcie->mdb_domain, irq);
337 if (intr_cause[d->hwirq].str)
338 dev_warn(dev, "%s\n", intr_cause[d->hwirq].str);
339 else
340 dev_warn_once(dev, "Unknown IRQ %ld\n", d->hwirq);
341
342 return IRQ_HANDLED;
343 }
344
amd_mdb_setup_irq(struct amd_mdb_pcie * pcie,struct platform_device * pdev)345 static int amd_mdb_setup_irq(struct amd_mdb_pcie *pcie,
346 struct platform_device *pdev)
347 {
348 struct dw_pcie *pci = &pcie->pci;
349 struct dw_pcie_rp *pp = &pci->pp;
350 struct device *dev = &pdev->dev;
351 int i, irq, err;
352
353 amd_mdb_pcie_init_port(pcie);
354
355 pp->irq = platform_get_irq(pdev, 0);
356 if (pp->irq < 0)
357 return pp->irq;
358
359 for (i = 0; i < ARRAY_SIZE(intr_cause); i++) {
360 if (!intr_cause[i].str)
361 continue;
362
363 irq = irq_create_mapping(pcie->mdb_domain, i);
364 if (!irq) {
365 dev_err(dev, "Failed to map MDB domain interrupt\n");
366 return -ENOMEM;
367 }
368
369 err = devm_request_irq(dev, irq, amd_mdb_pcie_intr_handler,
370 IRQF_NO_THREAD, intr_cause[i].sym, pcie);
371 if (err) {
372 dev_err(dev, "Failed to request IRQ %d, err=%d\n",
373 irq, err);
374 return err;
375 }
376 }
377
378 pcie->intx_irq = irq_create_mapping(pcie->mdb_domain,
379 AMD_MDB_PCIE_INTR_INTX);
380 if (!pcie->intx_irq) {
381 dev_err(dev, "Failed to map INTx interrupt\n");
382 return -ENXIO;
383 }
384
385 err = devm_request_irq(dev, pcie->intx_irq, dw_pcie_rp_intx,
386 IRQF_NO_THREAD, NULL, pcie);
387 if (err) {
388 dev_err(dev, "Failed to request INTx IRQ %d, err=%d\n",
389 irq, err);
390 return err;
391 }
392
393 /* Plug the main event handler. */
394 err = devm_request_irq(dev, pp->irq, amd_mdb_pcie_event, IRQF_NO_THREAD,
395 "amd_mdb pcie_irq", pcie);
396 if (err) {
397 dev_err(dev, "Failed to request event IRQ %d, err=%d\n",
398 pp->irq, err);
399 return err;
400 }
401
402 return 0;
403 }
404
amd_mdb_add_pcie_port(struct amd_mdb_pcie * pcie,struct platform_device * pdev)405 static int amd_mdb_add_pcie_port(struct amd_mdb_pcie *pcie,
406 struct platform_device *pdev)
407 {
408 struct dw_pcie *pci = &pcie->pci;
409 struct dw_pcie_rp *pp = &pci->pp;
410 struct device *dev = &pdev->dev;
411 int err;
412
413 pcie->slcr = devm_platform_ioremap_resource_byname(pdev, "slcr");
414 if (IS_ERR(pcie->slcr))
415 return PTR_ERR(pcie->slcr);
416
417 err = amd_mdb_pcie_init_irq_domains(pcie, pdev);
418 if (err)
419 return err;
420
421 err = amd_mdb_setup_irq(pcie, pdev);
422 if (err) {
423 dev_err(dev, "Failed to set up interrupts, err=%d\n", err);
424 goto out;
425 }
426
427 pp->ops = &amd_mdb_pcie_host_ops;
428
429 err = dw_pcie_host_init(pp);
430 if (err) {
431 dev_err(dev, "Failed to initialize host, err=%d\n", err);
432 goto out;
433 }
434
435 return 0;
436
437 out:
438 amd_mdb_pcie_free_irq_domains(pcie);
439 return err;
440 }
441
amd_mdb_pcie_probe(struct platform_device * pdev)442 static int amd_mdb_pcie_probe(struct platform_device *pdev)
443 {
444 struct device *dev = &pdev->dev;
445 struct amd_mdb_pcie *pcie;
446 struct dw_pcie *pci;
447
448 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
449 if (!pcie)
450 return -ENOMEM;
451
452 pci = &pcie->pci;
453 pci->dev = dev;
454
455 platform_set_drvdata(pdev, pcie);
456
457 return amd_mdb_add_pcie_port(pcie, pdev);
458 }
459
460 static const struct of_device_id amd_mdb_pcie_of_match[] = {
461 {
462 .compatible = "amd,versal2-mdb-host",
463 },
464 {},
465 };
466
467 static struct platform_driver amd_mdb_pcie_driver = {
468 .driver = {
469 .name = "amd-mdb-pcie",
470 .of_match_table = amd_mdb_pcie_of_match,
471 .suppress_bind_attrs = true,
472 },
473 .probe = amd_mdb_pcie_probe,
474 };
475
476 builtin_platform_driver(amd_mdb_pcie_driver);
477