1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2025 Aspeed Technology Inc.
4 */
5 #include <linux/bitfield.h>
6 #include <linux/clk.h>
7 #include <linux/interrupt.h>
8 #include <linux/irq.h>
9 #include <linux/irqdomain.h>
10 #include <linux/irqchip/chained_irq.h>
11 #include <linux/irqchip/irq-msi-lib.h>
12 #include <linux/kernel.h>
13 #include <linux/mfd/syscon.h>
14 #include <linux/module.h>
15 #include <linux/msi.h>
16 #include <linux/mutex.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_pci.h>
20 #include <linux/pci.h>
21 #include <linux/platform_device.h>
22 #include <linux/phy/pcie.h>
23 #include <linux/phy/phy.h>
24 #include <linux/regmap.h>
25 #include <linux/reset.h>
26
27 #include "../pci.h"
28
29 #define MAX_MSI_HOST_IRQS 64
30 #define ASPEED_RESET_RC_WAIT_MS 10
31
32 /* AST2600 AHBC Registers */
33 #define ASPEED_AHBC_KEY 0x00
34 #define ASPEED_AHBC_UNLOCK_KEY 0xaeed1a03
35 #define ASPEED_AHBC_UNLOCK 0x01
36 #define ASPEED_AHBC_ADDR_MAPPING 0x8c
37 #define ASPEED_PCIE_RC_MEMORY_EN BIT(5)
38
39 /* AST2600 H2X Controller Registers */
40 #define ASPEED_H2X_INT_STS 0x08
41 #define ASPEED_PCIE_TX_IDLE_CLEAR BIT(0)
42 #define ASPEED_PCIE_INTX_STS GENMASK(3, 0)
43 #define ASPEED_H2X_HOST_RX_DESC_DATA 0x0c
44 #define ASPEED_H2X_TX_DESC0 0x10
45 #define ASPEED_H2X_TX_DESC1 0x14
46 #define ASPEED_H2X_TX_DESC2 0x18
47 #define ASPEED_H2X_TX_DESC3 0x1c
48 #define ASPEED_H2X_TX_DESC_DATA 0x20
49 #define ASPEED_H2X_STS 0x24
50 #define ASPEED_PCIE_TX_IDLE BIT(31)
51 #define ASPEED_PCIE_STATUS_OF_TX GENMASK(25, 24)
52 #define ASPEED_PCIE_RC_H_TX_COMPLETE BIT(25)
53 #define ASPEED_PCIE_TRIGGER_TX BIT(0)
54 #define ASPEED_H2X_AHB_ADDR_CONFIG0 0x60
55 #define ASPEED_AHB_REMAP_LO_ADDR(x) (x & GENMASK(15, 4))
56 #define ASPEED_AHB_MASK_LO_ADDR(x) FIELD_PREP(GENMASK(31, 20), x)
57 #define ASPEED_H2X_AHB_ADDR_CONFIG1 0x64
58 #define ASPEED_AHB_REMAP_HI_ADDR(x) (x)
59 #define ASPEED_H2X_AHB_ADDR_CONFIG2 0x68
60 #define ASPEED_AHB_MASK_HI_ADDR(x) (x)
61 #define ASPEED_H2X_DEV_CTRL 0xc0
62 #define ASPEED_PCIE_RX_DMA_EN BIT(9)
63 #define ASPEED_PCIE_RX_LINEAR BIT(8)
64 #define ASPEED_PCIE_RX_MSI_SEL BIT(7)
65 #define ASPEED_PCIE_RX_MSI_EN BIT(6)
66 #define ASPEED_PCIE_UNLOCK_RX_BUFF BIT(4)
67 #define ASPEED_PCIE_WAIT_RX_TLP_CLR BIT(2)
68 #define ASPEED_PCIE_RC_RX_ENABLE BIT(1)
69 #define ASPEED_PCIE_RC_ENABLE BIT(0)
70 #define ASPEED_H2X_DEV_STS 0xc8
71 #define ASPEED_PCIE_RC_RX_DONE_ISR BIT(4)
72 #define ASPEED_H2X_DEV_RX_DESC_DATA 0xcc
73 #define ASPEED_H2X_DEV_RX_DESC1 0xd4
74 #define ASPEED_H2X_DEV_TX_TAG 0xfc
75 #define ASPEED_RC_TLP_TX_TAG_NUM 0x28
76
77 /* AST2700 H2X */
78 #define ASPEED_H2X_CTRL 0x00
79 #define ASPEED_H2X_BRIDGE_EN BIT(0)
80 #define ASPEED_H2X_BRIDGE_DIRECT_EN BIT(1)
81 #define ASPEED_H2X_CFGE_INT_STS 0x08
82 #define ASPEED_CFGE_TX_IDLE BIT(0)
83 #define ASPEED_CFGE_RX_BUSY BIT(1)
84 #define ASPEED_H2X_CFGI_TLP 0x20
85 #define ASPEED_CFGI_BYTE_EN_MASK GENMASK(19, 16)
86 #define ASPEED_CFGI_BYTE_EN(x) \
87 FIELD_PREP(ASPEED_CFGI_BYTE_EN_MASK, (x))
88 #define ASPEED_H2X_CFGI_WR_DATA 0x24
89 #define ASPEED_CFGI_WRITE BIT(20)
90 #define ASPEED_H2X_CFGI_CTRL 0x28
91 #define ASPEED_CFGI_TLP_FIRE BIT(0)
92 #define ASPEED_H2X_CFGI_RET_DATA 0x2c
93 #define ASPEED_H2X_CFGE_TLP_1ST 0x30
94 #define ASPEED_H2X_CFGE_TLP_NEXT 0x34
95 #define ASPEED_H2X_CFGE_CTRL 0x38
96 #define ASPEED_CFGE_TLP_FIRE BIT(0)
97 #define ASPEED_H2X_CFGE_RET_DATA 0x3c
98 #define ASPEED_H2X_REMAP_PREF_ADDR 0x70
99 #define ASPEED_REMAP_PREF_ADDR_63_32(x) (x)
100 #define ASPEED_H2X_REMAP_PCI_ADDR_HI 0x74
101 #define ASPEED_REMAP_PCI_ADDR_63_32(x) (((x) >> 32) & GENMASK(31, 0))
102 #define ASPEED_H2X_REMAP_PCI_ADDR_LO 0x78
103 #define ASPEED_REMAP_PCI_ADDR_31_12(x) ((x) & GENMASK(31, 12))
104
105 /* AST2700 SCU */
106 #define ASPEED_SCU_60 0x60
107 #define ASPEED_RC_E2M_PATH_EN BIT(0)
108 #define ASPEED_RC_H2XS_PATH_EN BIT(16)
109 #define ASPEED_RC_H2XD_PATH_EN BIT(17)
110 #define ASPEED_RC_H2XX_PATH_EN BIT(18)
111 #define ASPEED_RC_UPSTREAM_MEM_EN BIT(19)
112 #define ASPEED_SCU_64 0x64
113 #define ASPEED_RC0_DECODE_DMA_BASE(x) FIELD_PREP(GENMASK(7, 0), x)
114 #define ASPEED_RC0_DECODE_DMA_LIMIT(x) FIELD_PREP(GENMASK(15, 8), x)
115 #define ASPEED_RC1_DECODE_DMA_BASE(x) FIELD_PREP(GENMASK(23, 16), x)
116 #define ASPEED_RC1_DECODE_DMA_LIMIT(x) FIELD_PREP(GENMASK(31, 24), x)
117 #define ASPEED_SCU_70 0x70
118 #define ASPEED_DISABLE_EP_FUNC 0
119
120 /* Macro to combine Fmt and Type into the 8-bit field */
121 #define ASPEED_TLP_FMT_TYPE(fmt, type) ((((fmt) & 0x7) << 5) | ((type) & 0x1f))
122 #define ASPEED_TLP_COMMON_FIELDS GENMASK(31, 24)
123
124 /* Completion status */
125 #define CPL_STS(x) FIELD_GET(GENMASK(15, 13), (x))
126 /* TLP configuration type 0 and type 1 */
127 #define CFG0_READ_FMTTYPE \
128 FIELD_PREP(ASPEED_TLP_COMMON_FIELDS, \
129 ASPEED_TLP_FMT_TYPE(PCIE_TLP_FMT_3DW_NO_DATA, \
130 PCIE_TLP_TYPE_CFG0_RD))
131 #define CFG0_WRITE_FMTTYPE \
132 FIELD_PREP(ASPEED_TLP_COMMON_FIELDS, \
133 ASPEED_TLP_FMT_TYPE(PCIE_TLP_FMT_3DW_DATA, \
134 PCIE_TLP_TYPE_CFG0_WR))
135 #define CFG1_READ_FMTTYPE \
136 FIELD_PREP(ASPEED_TLP_COMMON_FIELDS, \
137 ASPEED_TLP_FMT_TYPE(PCIE_TLP_FMT_3DW_NO_DATA, \
138 PCIE_TLP_TYPE_CFG1_RD))
139 #define CFG1_WRITE_FMTTYPE \
140 FIELD_PREP(ASPEED_TLP_COMMON_FIELDS, \
141 ASPEED_TLP_FMT_TYPE(PCIE_TLP_FMT_3DW_DATA, \
142 PCIE_TLP_TYPE_CFG1_WR))
143 #define CFG_PAYLOAD_SIZE 0x01 /* 1 DWORD */
144 #define TLP_HEADER_BYTE_EN(x, y) ((GENMASK((x) - 1, 0) << ((y) % 4)))
145 #define TLP_GET_VALUE(x, y, z) \
146 (((x) >> ((((z) % 4)) * 8)) & GENMASK((8 * (y)) - 1, 0))
147 #define TLP_SET_VALUE(x, y, z) \
148 ((((x) & GENMASK((8 * (y)) - 1, 0)) << ((((z) % 4)) * 8)))
149 #define AST2600_TX_DESC1_VALUE 0x00002000
150 #define AST2700_TX_DESC1_VALUE 0x00401000
151
152 /**
153 * struct aspeed_pcie_port - PCIe port information
154 * @list: port list
155 * @pcie: pointer to PCIe host info
156 * @clk: pointer to the port clock gate
157 * @phy: pointer to PCIe PHY
158 * @perst: pointer to port reset control
159 * @slot: port slot
160 */
161 struct aspeed_pcie_port {
162 struct list_head list;
163 struct aspeed_pcie *pcie;
164 struct clk *clk;
165 struct phy *phy;
166 struct reset_control *perst;
167 u32 slot;
168 };
169
170 /**
171 * struct aspeed_pcie - PCIe RC information
172 * @host: pointer to PCIe host bridge
173 * @dev: pointer to device structure
174 * @reg: PCIe host register base address
175 * @ahbc: pointer to AHHC register map
176 * @cfg: pointer to Aspeed PCIe configuration register map
177 * @platform: platform specific information
178 * @ports: list of PCIe ports
179 * @tx_tag: current TX tag for the port
180 * @root_bus_nr: bus number of the host bridge
181 * @h2xrst: pointer to H2X reset control
182 * @intx_domain: IRQ domain for INTx interrupts
183 * @msi_domain: IRQ domain for MSI interrupts
184 * @lock: mutex to protect MSI bitmap variable
185 * @msi_irq_in_use: bitmap to track used MSI host IRQs
186 * @clear_msi_twice: AST2700 workaround to clear MSI status twice
187 */
188 struct aspeed_pcie {
189 struct pci_host_bridge *host;
190 struct device *dev;
191 void __iomem *reg;
192 struct regmap *ahbc;
193 struct regmap *cfg;
194 const struct aspeed_pcie_rc_platform *platform;
195 struct list_head ports;
196
197 u8 tx_tag;
198 u8 root_bus_nr;
199
200 struct reset_control *h2xrst;
201
202 struct irq_domain *intx_domain;
203 struct irq_domain *msi_domain;
204 struct mutex lock;
205 DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_HOST_IRQS);
206
207 bool clear_msi_twice; /* AST2700 workaround */
208 };
209
210 /**
211 * struct aspeed_pcie_rc_platform - Platform information
212 * @setup: initialization function
213 * @pcie_map_ranges: function to map PCIe address ranges
214 * @reg_intx_en: INTx enable register offset
215 * @reg_intx_sts: INTx status register offset
216 * @reg_msi_en: MSI enable register offset
217 * @reg_msi_sts: MSI enable register offset
218 * @msi_address: HW fixed MSI address
219 */
220 struct aspeed_pcie_rc_platform {
221 int (*setup)(struct platform_device *pdev);
222 void (*pcie_map_ranges)(struct aspeed_pcie *pcie, u64 pci_addr);
223 int reg_intx_en;
224 int reg_intx_sts;
225 int reg_msi_en;
226 int reg_msi_sts;
227 u32 msi_address;
228 };
229
aspeed_pcie_intx_irq_ack(struct irq_data * d)230 static void aspeed_pcie_intx_irq_ack(struct irq_data *d)
231 {
232 struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d);
233 int intx_en = pcie->platform->reg_intx_en;
234 u32 en;
235
236 en = readl(pcie->reg + intx_en);
237 en |= BIT(d->hwirq);
238 writel(en, pcie->reg + intx_en);
239 }
240
aspeed_pcie_intx_irq_mask(struct irq_data * d)241 static void aspeed_pcie_intx_irq_mask(struct irq_data *d)
242 {
243 struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d);
244 int intx_en = pcie->platform->reg_intx_en;
245 u32 en;
246
247 en = readl(pcie->reg + intx_en);
248 en &= ~BIT(d->hwirq);
249 writel(en, pcie->reg + intx_en);
250 }
251
aspeed_pcie_intx_irq_unmask(struct irq_data * d)252 static void aspeed_pcie_intx_irq_unmask(struct irq_data *d)
253 {
254 struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d);
255 int intx_en = pcie->platform->reg_intx_en;
256 u32 en;
257
258 en = readl(pcie->reg + intx_en);
259 en |= BIT(d->hwirq);
260 writel(en, pcie->reg + intx_en);
261 }
262
263 static struct irq_chip aspeed_intx_irq_chip = {
264 .name = "INTx",
265 .irq_ack = aspeed_pcie_intx_irq_ack,
266 .irq_mask = aspeed_pcie_intx_irq_mask,
267 .irq_unmask = aspeed_pcie_intx_irq_unmask,
268 };
269
aspeed_pcie_intx_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)270 static int aspeed_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
271 irq_hw_number_t hwirq)
272 {
273 irq_set_chip_and_handler(irq, &aspeed_intx_irq_chip, handle_level_irq);
274 irq_set_chip_data(irq, domain->host_data);
275 irq_set_status_flags(irq, IRQ_LEVEL);
276
277 return 0;
278 }
279
280 static const struct irq_domain_ops aspeed_intx_domain_ops = {
281 .map = aspeed_pcie_intx_map,
282 };
283
aspeed_pcie_intr_handler(int irq,void * dev_id)284 static irqreturn_t aspeed_pcie_intr_handler(int irq, void *dev_id)
285 {
286 struct aspeed_pcie *pcie = dev_id;
287 const struct aspeed_pcie_rc_platform *platform = pcie->platform;
288 unsigned long status;
289 unsigned long intx;
290 u32 bit;
291 int i;
292
293 intx = FIELD_GET(ASPEED_PCIE_INTX_STS,
294 readl(pcie->reg + platform->reg_intx_sts));
295 for_each_set_bit(bit, &intx, PCI_NUM_INTX)
296 generic_handle_domain_irq(pcie->intx_domain, bit);
297
298 for (i = 0; i < 2; i++) {
299 int msi_sts_reg = platform->reg_msi_sts + (i * 4);
300
301 status = readl(pcie->reg + msi_sts_reg);
302 writel(status, pcie->reg + msi_sts_reg);
303
304 /*
305 * AST2700 workaround:
306 * The MSI status needs to clear one more time.
307 */
308 if (pcie->clear_msi_twice)
309 writel(status, pcie->reg + msi_sts_reg);
310
311 for_each_set_bit(bit, &status, 32) {
312 bit += (i * 32);
313 generic_handle_domain_irq(pcie->msi_domain, bit);
314 }
315 }
316
317 return IRQ_HANDLED;
318 }
319
aspeed_pcie_get_bdf_offset(struct pci_bus * bus,unsigned int devfn,int where)320 static u32 aspeed_pcie_get_bdf_offset(struct pci_bus *bus, unsigned int devfn,
321 int where)
322 {
323 return ((bus->number) << 24) | (PCI_SLOT(devfn) << 19) |
324 (PCI_FUNC(devfn) << 16) | (where & ~3);
325 }
326
aspeed_ast2600_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val,u32 fmt_type,bool write)327 static int aspeed_ast2600_conf(struct pci_bus *bus, unsigned int devfn,
328 int where, int size, u32 *val, u32 fmt_type,
329 bool write)
330 {
331 struct aspeed_pcie *pcie = bus->sysdata;
332 u32 bdf_offset, cfg_val, isr;
333 int ret;
334
335 bdf_offset = aspeed_pcie_get_bdf_offset(bus, devfn, where);
336
337 /* Driver may set unlock RX buffer before triggering next TX config */
338 cfg_val = readl(pcie->reg + ASPEED_H2X_DEV_CTRL);
339 writel(ASPEED_PCIE_UNLOCK_RX_BUFF | cfg_val,
340 pcie->reg + ASPEED_H2X_DEV_CTRL);
341
342 cfg_val = fmt_type | CFG_PAYLOAD_SIZE;
343 writel(cfg_val, pcie->reg + ASPEED_H2X_TX_DESC0);
344
345 cfg_val = AST2600_TX_DESC1_VALUE |
346 FIELD_PREP(GENMASK(11, 8), pcie->tx_tag) |
347 TLP_HEADER_BYTE_EN(size, where);
348 writel(cfg_val, pcie->reg + ASPEED_H2X_TX_DESC1);
349
350 writel(bdf_offset, pcie->reg + ASPEED_H2X_TX_DESC2);
351 writel(0, pcie->reg + ASPEED_H2X_TX_DESC3);
352 if (write)
353 writel(TLP_SET_VALUE(*val, size, where),
354 pcie->reg + ASPEED_H2X_TX_DESC_DATA);
355
356 cfg_val = readl(pcie->reg + ASPEED_H2X_STS);
357 cfg_val |= ASPEED_PCIE_TRIGGER_TX;
358 writel(cfg_val, pcie->reg + ASPEED_H2X_STS);
359
360 ret = readl_poll_timeout(pcie->reg + ASPEED_H2X_STS, cfg_val,
361 (cfg_val & ASPEED_PCIE_TX_IDLE), 0, 50);
362 if (ret) {
363 dev_err(pcie->dev,
364 "%02x:%02x.%d CR tx timeout sts: 0x%08x\n",
365 bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), cfg_val);
366 ret = PCIBIOS_SET_FAILED;
367 PCI_SET_ERROR_RESPONSE(val);
368 goto out;
369 }
370
371 cfg_val = readl(pcie->reg + ASPEED_H2X_INT_STS);
372 cfg_val |= ASPEED_PCIE_TX_IDLE_CLEAR;
373 writel(cfg_val, pcie->reg + ASPEED_H2X_INT_STS);
374
375 cfg_val = readl(pcie->reg + ASPEED_H2X_STS);
376 switch (cfg_val & ASPEED_PCIE_STATUS_OF_TX) {
377 case ASPEED_PCIE_RC_H_TX_COMPLETE:
378 ret = readl_poll_timeout(pcie->reg + ASPEED_H2X_DEV_STS, isr,
379 (isr & ASPEED_PCIE_RC_RX_DONE_ISR), 0,
380 50);
381 if (ret) {
382 dev_err(pcie->dev,
383 "%02x:%02x.%d CR rx timeout sts: 0x%08x\n",
384 bus->number, PCI_SLOT(devfn),
385 PCI_FUNC(devfn), isr);
386 ret = PCIBIOS_SET_FAILED;
387 PCI_SET_ERROR_RESPONSE(val);
388 goto out;
389 }
390 if (!write) {
391 cfg_val = readl(pcie->reg + ASPEED_H2X_DEV_RX_DESC1);
392 if (CPL_STS(cfg_val) != PCIE_CPL_STS_SUCCESS) {
393 ret = PCIBIOS_SET_FAILED;
394 PCI_SET_ERROR_RESPONSE(val);
395 goto out;
396 } else {
397 *val = readl(pcie->reg +
398 ASPEED_H2X_DEV_RX_DESC_DATA);
399 }
400 }
401 break;
402 case ASPEED_PCIE_STATUS_OF_TX:
403 ret = PCIBIOS_SET_FAILED;
404 PCI_SET_ERROR_RESPONSE(val);
405 goto out;
406 default:
407 *val = readl(pcie->reg + ASPEED_H2X_HOST_RX_DESC_DATA);
408 break;
409 }
410
411 cfg_val = readl(pcie->reg + ASPEED_H2X_DEV_CTRL);
412 cfg_val |= ASPEED_PCIE_UNLOCK_RX_BUFF;
413 writel(cfg_val, pcie->reg + ASPEED_H2X_DEV_CTRL);
414
415 *val = TLP_GET_VALUE(*val, size, where);
416
417 ret = PCIBIOS_SUCCESSFUL;
418 out:
419 cfg_val = readl(pcie->reg + ASPEED_H2X_DEV_STS);
420 writel(cfg_val, pcie->reg + ASPEED_H2X_DEV_STS);
421 pcie->tx_tag = (pcie->tx_tag + 1) % 0x8;
422 return ret;
423 }
424
aspeed_ast2600_rd_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)425 static int aspeed_ast2600_rd_conf(struct pci_bus *bus, unsigned int devfn,
426 int where, int size, u32 *val)
427 {
428 /*
429 * AST2600 has only one Root Port on the root bus.
430 */
431 if (PCI_SLOT(devfn) != 8)
432 return PCIBIOS_DEVICE_NOT_FOUND;
433
434 return aspeed_ast2600_conf(bus, devfn, where, size, val,
435 CFG0_READ_FMTTYPE, false);
436 }
437
aspeed_ast2600_child_rd_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)438 static int aspeed_ast2600_child_rd_conf(struct pci_bus *bus, unsigned int devfn,
439 int where, int size, u32 *val)
440 {
441 return aspeed_ast2600_conf(bus, devfn, where, size, val,
442 CFG1_READ_FMTTYPE, false);
443 }
444
aspeed_ast2600_wr_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)445 static int aspeed_ast2600_wr_conf(struct pci_bus *bus, unsigned int devfn,
446 int where, int size, u32 val)
447 {
448 /*
449 * AST2600 has only one Root Port on the root bus.
450 */
451 if (PCI_SLOT(devfn) != 8)
452 return PCIBIOS_DEVICE_NOT_FOUND;
453
454 return aspeed_ast2600_conf(bus, devfn, where, size, &val,
455 CFG0_WRITE_FMTTYPE, true);
456 }
457
aspeed_ast2600_child_wr_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)458 static int aspeed_ast2600_child_wr_conf(struct pci_bus *bus, unsigned int devfn,
459 int where, int size, u32 val)
460 {
461 return aspeed_ast2600_conf(bus, devfn, where, size, &val,
462 CFG1_WRITE_FMTTYPE, true);
463 }
464
aspeed_ast2700_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val,bool write)465 static int aspeed_ast2700_config(struct pci_bus *bus, unsigned int devfn,
466 int where, int size, u32 *val, bool write)
467 {
468 struct aspeed_pcie *pcie = bus->sysdata;
469 u32 cfg_val;
470
471 cfg_val = ASPEED_CFGI_BYTE_EN(TLP_HEADER_BYTE_EN(size, where)) |
472 (where & ~3);
473 if (write)
474 cfg_val |= ASPEED_CFGI_WRITE;
475 writel(cfg_val, pcie->reg + ASPEED_H2X_CFGI_TLP);
476
477 writel(TLP_SET_VALUE(*val, size, where),
478 pcie->reg + ASPEED_H2X_CFGI_WR_DATA);
479 writel(ASPEED_CFGI_TLP_FIRE, pcie->reg + ASPEED_H2X_CFGI_CTRL);
480 *val = readl(pcie->reg + ASPEED_H2X_CFGI_RET_DATA);
481 *val = TLP_GET_VALUE(*val, size, where);
482
483 return PCIBIOS_SUCCESSFUL;
484 }
485
aspeed_ast2700_child_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val,bool write)486 static int aspeed_ast2700_child_config(struct pci_bus *bus, unsigned int devfn,
487 int where, int size, u32 *val,
488 bool write)
489 {
490 struct aspeed_pcie *pcie = bus->sysdata;
491 u32 bdf_offset, status, cfg_val;
492 int ret;
493
494 bdf_offset = aspeed_pcie_get_bdf_offset(bus, devfn, where);
495
496 cfg_val = CFG_PAYLOAD_SIZE;
497 if (write)
498 cfg_val |= (bus->number == (pcie->root_bus_nr + 1)) ?
499 CFG0_WRITE_FMTTYPE :
500 CFG1_WRITE_FMTTYPE;
501 else
502 cfg_val |= (bus->number == (pcie->root_bus_nr + 1)) ?
503 CFG0_READ_FMTTYPE :
504 CFG1_READ_FMTTYPE;
505 writel(cfg_val, pcie->reg + ASPEED_H2X_CFGE_TLP_1ST);
506
507 cfg_val = AST2700_TX_DESC1_VALUE |
508 FIELD_PREP(GENMASK(11, 8), pcie->tx_tag) |
509 TLP_HEADER_BYTE_EN(size, where);
510 writel(cfg_val, pcie->reg + ASPEED_H2X_CFGE_TLP_NEXT);
511
512 writel(bdf_offset, pcie->reg + ASPEED_H2X_CFGE_TLP_NEXT);
513 if (write)
514 writel(TLP_SET_VALUE(*val, size, where),
515 pcie->reg + ASPEED_H2X_CFGE_TLP_NEXT);
516 writel(ASPEED_CFGE_TX_IDLE | ASPEED_CFGE_RX_BUSY,
517 pcie->reg + ASPEED_H2X_CFGE_INT_STS);
518 writel(ASPEED_CFGE_TLP_FIRE, pcie->reg + ASPEED_H2X_CFGE_CTRL);
519
520 ret = readl_poll_timeout(pcie->reg + ASPEED_H2X_CFGE_INT_STS, status,
521 (status & ASPEED_CFGE_TX_IDLE), 0, 50);
522 if (ret) {
523 dev_err(pcie->dev,
524 "%02x:%02x.%d CR tx timeout sts: 0x%08x\n",
525 bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), status);
526 ret = PCIBIOS_SET_FAILED;
527 PCI_SET_ERROR_RESPONSE(val);
528 goto out;
529 }
530
531 ret = readl_poll_timeout(pcie->reg + ASPEED_H2X_CFGE_INT_STS, status,
532 (status & ASPEED_CFGE_RX_BUSY), 0, 50);
533 if (ret) {
534 dev_err(pcie->dev,
535 "%02x:%02x.%d CR rx timeout sts: 0x%08x\n",
536 bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), status);
537 ret = PCIBIOS_SET_FAILED;
538 PCI_SET_ERROR_RESPONSE(val);
539 goto out;
540 }
541 *val = readl(pcie->reg + ASPEED_H2X_CFGE_RET_DATA);
542 *val = TLP_GET_VALUE(*val, size, where);
543
544 ret = PCIBIOS_SUCCESSFUL;
545 out:
546 writel(status, pcie->reg + ASPEED_H2X_CFGE_INT_STS);
547 pcie->tx_tag = (pcie->tx_tag + 1) % 0xf;
548 return ret;
549 }
550
aspeed_ast2700_rd_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)551 static int aspeed_ast2700_rd_conf(struct pci_bus *bus, unsigned int devfn,
552 int where, int size, u32 *val)
553 {
554 /*
555 * AST2700 has only one Root Port on the root bus.
556 */
557 if (devfn != 0)
558 return PCIBIOS_DEVICE_NOT_FOUND;
559
560 return aspeed_ast2700_config(bus, devfn, where, size, val, false);
561 }
562
aspeed_ast2700_child_rd_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)563 static int aspeed_ast2700_child_rd_conf(struct pci_bus *bus, unsigned int devfn,
564 int where, int size, u32 *val)
565 {
566 return aspeed_ast2700_child_config(bus, devfn, where, size, val, false);
567 }
568
aspeed_ast2700_wr_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)569 static int aspeed_ast2700_wr_conf(struct pci_bus *bus, unsigned int devfn,
570 int where, int size, u32 val)
571 {
572 /*
573 * AST2700 has only one Root Port on the root bus.
574 */
575 if (devfn != 0)
576 return PCIBIOS_DEVICE_NOT_FOUND;
577
578 return aspeed_ast2700_config(bus, devfn, where, size, &val, true);
579 }
580
aspeed_ast2700_child_wr_conf(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)581 static int aspeed_ast2700_child_wr_conf(struct pci_bus *bus, unsigned int devfn,
582 int where, int size, u32 val)
583 {
584 return aspeed_ast2700_child_config(bus, devfn, where, size, &val, true);
585 }
586
587 static struct pci_ops aspeed_ast2600_pcie_ops = {
588 .read = aspeed_ast2600_rd_conf,
589 .write = aspeed_ast2600_wr_conf,
590 };
591
592 static struct pci_ops aspeed_ast2600_pcie_child_ops = {
593 .read = aspeed_ast2600_child_rd_conf,
594 .write = aspeed_ast2600_child_wr_conf,
595 };
596
597 static struct pci_ops aspeed_ast2700_pcie_ops = {
598 .read = aspeed_ast2700_rd_conf,
599 .write = aspeed_ast2700_wr_conf,
600 };
601
602 static struct pci_ops aspeed_ast2700_pcie_child_ops = {
603 .read = aspeed_ast2700_child_rd_conf,
604 .write = aspeed_ast2700_child_wr_conf,
605 };
606
aspeed_irq_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)607 static void aspeed_irq_compose_msi_msg(struct irq_data *data,
608 struct msi_msg *msg)
609 {
610 struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(data);
611
612 msg->address_hi = 0;
613 msg->address_lo = pcie->platform->msi_address;
614 msg->data = data->hwirq;
615 }
616
617 static struct irq_chip aspeed_msi_bottom_irq_chip = {
618 .name = "ASPEED MSI",
619 .irq_compose_msi_msg = aspeed_irq_compose_msi_msg,
620 };
621
aspeed_irq_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)622 static int aspeed_irq_msi_domain_alloc(struct irq_domain *domain,
623 unsigned int virq, unsigned int nr_irqs,
624 void *args)
625 {
626 struct aspeed_pcie *pcie = domain->host_data;
627 int bit;
628 int i;
629
630 guard(mutex)(&pcie->lock);
631
632 bit = bitmap_find_free_region(pcie->msi_irq_in_use, MAX_MSI_HOST_IRQS,
633 get_count_order(nr_irqs));
634
635 if (bit < 0)
636 return -ENOSPC;
637
638 for (i = 0; i < nr_irqs; i++) {
639 irq_domain_set_info(domain, virq + i, bit + i,
640 &aspeed_msi_bottom_irq_chip,
641 domain->host_data, handle_simple_irq, NULL,
642 NULL);
643 }
644
645 return 0;
646 }
647
aspeed_irq_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)648 static void aspeed_irq_msi_domain_free(struct irq_domain *domain,
649 unsigned int virq, unsigned int nr_irqs)
650 {
651 struct irq_data *data = irq_domain_get_irq_data(domain, virq);
652 struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(data);
653
654 guard(mutex)(&pcie->lock);
655
656 bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
657 get_count_order(nr_irqs));
658 }
659
660 static const struct irq_domain_ops aspeed_msi_domain_ops = {
661 .alloc = aspeed_irq_msi_domain_alloc,
662 .free = aspeed_irq_msi_domain_free,
663 };
664
665 #define ASPEED_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \
666 MSI_FLAG_USE_DEF_CHIP_OPS | \
667 MSI_FLAG_NO_AFFINITY)
668
669 #define ASPEED_MSI_FLAGS_SUPPORTED (MSI_GENERIC_FLAGS_MASK | \
670 MSI_FLAG_MULTI_PCI_MSI | \
671 MSI_FLAG_PCI_MSIX)
672
673 static const struct msi_parent_ops aspeed_msi_parent_ops = {
674 .required_flags = ASPEED_MSI_FLAGS_REQUIRED,
675 .supported_flags = ASPEED_MSI_FLAGS_SUPPORTED,
676 .bus_select_token = DOMAIN_BUS_PCI_MSI,
677 .chip_flags = MSI_CHIP_FLAG_SET_ACK,
678 .prefix = "ASPEED-",
679 .init_dev_msi_info = msi_lib_init_dev_msi_info,
680 };
681
aspeed_pcie_msi_init(struct aspeed_pcie * pcie)682 static int aspeed_pcie_msi_init(struct aspeed_pcie *pcie)
683 {
684 writel(~0, pcie->reg + pcie->platform->reg_msi_en);
685 writel(~0, pcie->reg + pcie->platform->reg_msi_en + 0x04);
686 writel(~0, pcie->reg + pcie->platform->reg_msi_sts);
687 writel(~0, pcie->reg + pcie->platform->reg_msi_sts + 0x04);
688
689 struct irq_domain_info info = {
690 .fwnode = dev_fwnode(pcie->dev),
691 .ops = &aspeed_msi_domain_ops,
692 .host_data = pcie,
693 .size = MAX_MSI_HOST_IRQS,
694 };
695
696 pcie->msi_domain = msi_create_parent_irq_domain(&info,
697 &aspeed_msi_parent_ops);
698 if (!pcie->msi_domain)
699 return dev_err_probe(pcie->dev, -ENOMEM,
700 "failed to create MSI domain\n");
701
702 return 0;
703 }
704
aspeed_pcie_msi_free(struct aspeed_pcie * pcie)705 static void aspeed_pcie_msi_free(struct aspeed_pcie *pcie)
706 {
707 if (pcie->msi_domain) {
708 irq_domain_remove(pcie->msi_domain);
709 pcie->msi_domain = NULL;
710 }
711 }
712
aspeed_pcie_irq_domain_free(void * d)713 static void aspeed_pcie_irq_domain_free(void *d)
714 {
715 struct aspeed_pcie *pcie = d;
716
717 if (pcie->intx_domain) {
718 irq_domain_remove(pcie->intx_domain);
719 pcie->intx_domain = NULL;
720 }
721 aspeed_pcie_msi_free(pcie);
722 }
723
aspeed_pcie_init_irq_domain(struct aspeed_pcie * pcie)724 static int aspeed_pcie_init_irq_domain(struct aspeed_pcie *pcie)
725 {
726 int ret;
727
728 pcie->intx_domain = irq_domain_add_linear(pcie->dev->of_node,
729 PCI_NUM_INTX,
730 &aspeed_intx_domain_ops,
731 pcie);
732 if (!pcie->intx_domain) {
733 ret = dev_err_probe(pcie->dev, -ENOMEM,
734 "failed to get INTx IRQ domain\n");
735 goto err;
736 }
737
738 writel(0, pcie->reg + pcie->platform->reg_intx_en);
739 writel(~0, pcie->reg + pcie->platform->reg_intx_sts);
740
741 ret = aspeed_pcie_msi_init(pcie);
742 if (ret)
743 goto err;
744
745 return 0;
746 err:
747 aspeed_pcie_irq_domain_free(pcie);
748 return ret;
749 }
750
aspeed_pcie_port_init(struct aspeed_pcie_port * port)751 static int aspeed_pcie_port_init(struct aspeed_pcie_port *port)
752 {
753 struct aspeed_pcie *pcie = port->pcie;
754 struct device *dev = pcie->dev;
755 int ret;
756
757 ret = clk_prepare_enable(port->clk);
758 if (ret)
759 return dev_err_probe(dev, ret,
760 "failed to set clock for slot (%d)\n",
761 port->slot);
762
763 ret = phy_init(port->phy);
764 if (ret)
765 return dev_err_probe(dev, ret,
766 "failed to init phy pcie for slot (%d)\n",
767 port->slot);
768
769 ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
770 if (ret)
771 return dev_err_probe(dev, ret,
772 "failed to set phy mode for slot (%d)\n",
773 port->slot);
774
775 reset_control_deassert(port->perst);
776 msleep(PCIE_RESET_CONFIG_WAIT_MS);
777
778 return 0;
779 }
780
aspeed_host_reset(struct aspeed_pcie * pcie)781 static void aspeed_host_reset(struct aspeed_pcie *pcie)
782 {
783 reset_control_assert(pcie->h2xrst);
784 mdelay(ASPEED_RESET_RC_WAIT_MS);
785 reset_control_deassert(pcie->h2xrst);
786 }
787
aspeed_pcie_map_ranges(struct aspeed_pcie * pcie)788 static void aspeed_pcie_map_ranges(struct aspeed_pcie *pcie)
789 {
790 struct pci_host_bridge *bridge = pcie->host;
791 struct resource_entry *window;
792
793 resource_list_for_each_entry(window, &bridge->windows) {
794 u64 pci_addr;
795
796 if (resource_type(window->res) != IORESOURCE_MEM)
797 continue;
798
799 pci_addr = window->res->start - window->offset;
800 pcie->platform->pcie_map_ranges(pcie, pci_addr);
801 break;
802 }
803 }
804
aspeed_ast2600_pcie_map_ranges(struct aspeed_pcie * pcie,u64 pci_addr)805 static void aspeed_ast2600_pcie_map_ranges(struct aspeed_pcie *pcie,
806 u64 pci_addr)
807 {
808 u32 pci_addr_lo = pci_addr & GENMASK(31, 0);
809 u32 pci_addr_hi = (pci_addr >> 32) & GENMASK(31, 0);
810
811 pci_addr_lo >>= 16;
812 writel(ASPEED_AHB_REMAP_LO_ADDR(pci_addr_lo) |
813 ASPEED_AHB_MASK_LO_ADDR(0xe00),
814 pcie->reg + ASPEED_H2X_AHB_ADDR_CONFIG0);
815 writel(ASPEED_AHB_REMAP_HI_ADDR(pci_addr_hi),
816 pcie->reg + ASPEED_H2X_AHB_ADDR_CONFIG1);
817 writel(ASPEED_AHB_MASK_HI_ADDR(~0),
818 pcie->reg + ASPEED_H2X_AHB_ADDR_CONFIG2);
819 }
820
aspeed_ast2600_setup(struct platform_device * pdev)821 static int aspeed_ast2600_setup(struct platform_device *pdev)
822 {
823 struct aspeed_pcie *pcie = platform_get_drvdata(pdev);
824 struct device *dev = pcie->dev;
825
826 pcie->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node,
827 "aspeed,ahbc");
828 if (IS_ERR(pcie->ahbc))
829 return dev_err_probe(dev, PTR_ERR(pcie->ahbc),
830 "failed to map ahbc base\n");
831
832 aspeed_host_reset(pcie);
833
834 regmap_write(pcie->ahbc, ASPEED_AHBC_KEY, ASPEED_AHBC_UNLOCK_KEY);
835 regmap_update_bits(pcie->ahbc, ASPEED_AHBC_ADDR_MAPPING,
836 ASPEED_PCIE_RC_MEMORY_EN, ASPEED_PCIE_RC_MEMORY_EN);
837 regmap_write(pcie->ahbc, ASPEED_AHBC_KEY, ASPEED_AHBC_UNLOCK);
838
839 writel(ASPEED_H2X_BRIDGE_EN, pcie->reg + ASPEED_H2X_CTRL);
840
841 writel(ASPEED_PCIE_RX_DMA_EN | ASPEED_PCIE_RX_LINEAR |
842 ASPEED_PCIE_RX_MSI_SEL | ASPEED_PCIE_RX_MSI_EN |
843 ASPEED_PCIE_WAIT_RX_TLP_CLR | ASPEED_PCIE_RC_RX_ENABLE |
844 ASPEED_PCIE_RC_ENABLE,
845 pcie->reg + ASPEED_H2X_DEV_CTRL);
846
847 writel(ASPEED_RC_TLP_TX_TAG_NUM, pcie->reg + ASPEED_H2X_DEV_TX_TAG);
848
849 pcie->host->ops = &aspeed_ast2600_pcie_ops;
850 pcie->host->child_ops = &aspeed_ast2600_pcie_child_ops;
851
852 return 0;
853 }
854
aspeed_ast2700_pcie_map_ranges(struct aspeed_pcie * pcie,u64 pci_addr)855 static void aspeed_ast2700_pcie_map_ranges(struct aspeed_pcie *pcie,
856 u64 pci_addr)
857 {
858 writel(ASPEED_REMAP_PCI_ADDR_31_12(pci_addr),
859 pcie->reg + ASPEED_H2X_REMAP_PCI_ADDR_LO);
860 writel(ASPEED_REMAP_PCI_ADDR_63_32(pci_addr),
861 pcie->reg + ASPEED_H2X_REMAP_PCI_ADDR_HI);
862 }
863
aspeed_ast2700_setup(struct platform_device * pdev)864 static int aspeed_ast2700_setup(struct platform_device *pdev)
865 {
866 struct aspeed_pcie *pcie = platform_get_drvdata(pdev);
867 struct device *dev = pcie->dev;
868
869 pcie->cfg = syscon_regmap_lookup_by_phandle(dev->of_node,
870 "aspeed,pciecfg");
871 if (IS_ERR(pcie->cfg))
872 return dev_err_probe(dev, PTR_ERR(pcie->cfg),
873 "failed to map pciecfg base\n");
874
875 regmap_update_bits(pcie->cfg, ASPEED_SCU_60,
876 ASPEED_RC_E2M_PATH_EN | ASPEED_RC_H2XS_PATH_EN |
877 ASPEED_RC_H2XD_PATH_EN | ASPEED_RC_H2XX_PATH_EN |
878 ASPEED_RC_UPSTREAM_MEM_EN,
879 ASPEED_RC_E2M_PATH_EN | ASPEED_RC_H2XS_PATH_EN |
880 ASPEED_RC_H2XD_PATH_EN | ASPEED_RC_H2XX_PATH_EN |
881 ASPEED_RC_UPSTREAM_MEM_EN);
882 regmap_write(pcie->cfg, ASPEED_SCU_64,
883 ASPEED_RC0_DECODE_DMA_BASE(0) |
884 ASPEED_RC0_DECODE_DMA_LIMIT(0xff) |
885 ASPEED_RC1_DECODE_DMA_BASE(0) |
886 ASPEED_RC1_DECODE_DMA_LIMIT(0xff));
887 regmap_write(pcie->cfg, ASPEED_SCU_70, ASPEED_DISABLE_EP_FUNC);
888
889 aspeed_host_reset(pcie);
890
891 writel(0, pcie->reg + ASPEED_H2X_CTRL);
892 writel(ASPEED_H2X_BRIDGE_EN | ASPEED_H2X_BRIDGE_DIRECT_EN,
893 pcie->reg + ASPEED_H2X_CTRL);
894
895 /* Prepare for 64-bit BAR pref */
896 writel(ASPEED_REMAP_PREF_ADDR_63_32(0x3),
897 pcie->reg + ASPEED_H2X_REMAP_PREF_ADDR);
898
899 pcie->host->ops = &aspeed_ast2700_pcie_ops;
900 pcie->host->child_ops = &aspeed_ast2700_pcie_child_ops;
901 pcie->clear_msi_twice = true;
902
903 return 0;
904 }
905
aspeed_pcie_reset_release(void * d)906 static void aspeed_pcie_reset_release(void *d)
907 {
908 struct reset_control *perst = d;
909
910 if (!perst)
911 return;
912
913 reset_control_put(perst);
914 }
915
aspeed_pcie_parse_port(struct aspeed_pcie * pcie,struct device_node * node,int slot)916 static int aspeed_pcie_parse_port(struct aspeed_pcie *pcie,
917 struct device_node *node,
918 int slot)
919 {
920 struct aspeed_pcie_port *port;
921 struct device *dev = pcie->dev;
922 int ret;
923
924 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
925 if (!port)
926 return -ENOMEM;
927
928 port->clk = devm_get_clk_from_child(dev, node, NULL);
929 if (IS_ERR(port->clk))
930 return dev_err_probe(dev, PTR_ERR(port->clk),
931 "failed to get pcie%d clock\n", slot);
932
933 port->phy = devm_of_phy_get(dev, node, NULL);
934 if (IS_ERR(port->phy))
935 return dev_err_probe(dev, PTR_ERR(port->phy),
936 "failed to get phy pcie%d\n", slot);
937
938 port->perst = of_reset_control_get_exclusive(node, "perst");
939 if (IS_ERR(port->perst))
940 return dev_err_probe(dev, PTR_ERR(port->perst),
941 "failed to get pcie%d reset control\n",
942 slot);
943 ret = devm_add_action_or_reset(dev, aspeed_pcie_reset_release,
944 port->perst);
945 if (ret)
946 return ret;
947 reset_control_assert(port->perst);
948
949 port->slot = slot;
950 port->pcie = pcie;
951
952 INIT_LIST_HEAD(&port->list);
953 list_add_tail(&port->list, &pcie->ports);
954
955 ret = aspeed_pcie_port_init(port);
956 if (ret)
957 return ret;
958
959 return 0;
960 }
961
aspeed_pcie_parse_dt(struct aspeed_pcie * pcie)962 static int aspeed_pcie_parse_dt(struct aspeed_pcie *pcie)
963 {
964 struct device *dev = pcie->dev;
965 struct device_node *node = dev->of_node;
966 int ret;
967
968 for_each_available_child_of_node_scoped(node, child) {
969 int slot;
970 const char *type;
971
972 ret = of_property_read_string(child, "device_type", &type);
973 if (ret || strcmp(type, "pci"))
974 continue;
975
976 ret = of_pci_get_devfn(child);
977 if (ret < 0)
978 return dev_err_probe(dev, ret,
979 "failed to parse devfn\n");
980
981 slot = PCI_SLOT(ret);
982
983 ret = aspeed_pcie_parse_port(pcie, child, slot);
984 if (ret)
985 return ret;
986 }
987
988 if (list_empty(&pcie->ports))
989 return dev_err_probe(dev, -ENODEV,
990 "No PCIe port found in DT\n");
991
992 return 0;
993 }
994
aspeed_pcie_probe(struct platform_device * pdev)995 static int aspeed_pcie_probe(struct platform_device *pdev)
996 {
997 struct device *dev = &pdev->dev;
998 struct pci_host_bridge *host;
999 struct aspeed_pcie *pcie;
1000 struct resource_entry *entry;
1001 const struct aspeed_pcie_rc_platform *md;
1002 int irq, ret;
1003
1004 md = of_device_get_match_data(dev);
1005 if (!md)
1006 return -ENODEV;
1007
1008 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
1009 if (!host)
1010 return -ENOMEM;
1011
1012 pcie = pci_host_bridge_priv(host);
1013 pcie->dev = dev;
1014 pcie->tx_tag = 0;
1015 platform_set_drvdata(pdev, pcie);
1016
1017 pcie->platform = md;
1018 pcie->host = host;
1019 INIT_LIST_HEAD(&pcie->ports);
1020
1021 /* Get root bus num for cfg command to decide tlp type 0 or type 1 */
1022 entry = resource_list_first_type(&host->windows, IORESOURCE_BUS);
1023 if (entry)
1024 pcie->root_bus_nr = entry->res->start;
1025
1026 pcie->reg = devm_platform_ioremap_resource(pdev, 0);
1027 if (IS_ERR(pcie->reg))
1028 return PTR_ERR(pcie->reg);
1029
1030 pcie->h2xrst = devm_reset_control_get_exclusive(dev, "h2x");
1031 if (IS_ERR(pcie->h2xrst))
1032 return dev_err_probe(dev, PTR_ERR(pcie->h2xrst),
1033 "failed to get h2x reset\n");
1034
1035 ret = devm_mutex_init(dev, &pcie->lock);
1036 if (ret)
1037 return dev_err_probe(dev, ret, "failed to init mutex\n");
1038
1039 ret = pcie->platform->setup(pdev);
1040 if (ret)
1041 return dev_err_probe(dev, ret, "failed to setup PCIe RC\n");
1042
1043 aspeed_pcie_map_ranges(pcie);
1044
1045 ret = aspeed_pcie_parse_dt(pcie);
1046 if (ret)
1047 return ret;
1048
1049 host->sysdata = pcie;
1050
1051 ret = aspeed_pcie_init_irq_domain(pcie);
1052 if (ret)
1053 return ret;
1054
1055 irq = platform_get_irq(pdev, 0);
1056 if (irq < 0)
1057 return irq;
1058
1059 ret = devm_add_action_or_reset(dev, aspeed_pcie_irq_domain_free, pcie);
1060 if (ret)
1061 return ret;
1062
1063 ret = devm_request_irq(dev, irq, aspeed_pcie_intr_handler, IRQF_SHARED,
1064 dev_name(dev), pcie);
1065 if (ret)
1066 return ret;
1067
1068 return pci_host_probe(host);
1069 }
1070
1071 static const struct aspeed_pcie_rc_platform pcie_rc_ast2600 = {
1072 .setup = aspeed_ast2600_setup,
1073 .pcie_map_ranges = aspeed_ast2600_pcie_map_ranges,
1074 .reg_intx_en = 0xc4,
1075 .reg_intx_sts = 0xc8,
1076 .reg_msi_en = 0xe0,
1077 .reg_msi_sts = 0xe8,
1078 .msi_address = 0x1e77005c,
1079 };
1080
1081 static const struct aspeed_pcie_rc_platform pcie_rc_ast2700 = {
1082 .setup = aspeed_ast2700_setup,
1083 .pcie_map_ranges = aspeed_ast2700_pcie_map_ranges,
1084 .reg_intx_en = 0x40,
1085 .reg_intx_sts = 0x48,
1086 .reg_msi_en = 0x50,
1087 .reg_msi_sts = 0x58,
1088 .msi_address = 0x000000f0,
1089 };
1090
1091 static const struct of_device_id aspeed_pcie_of_match[] = {
1092 { .compatible = "aspeed,ast2600-pcie", .data = &pcie_rc_ast2600 },
1093 { .compatible = "aspeed,ast2700-pcie", .data = &pcie_rc_ast2700 },
1094 {}
1095 };
1096
1097 static struct platform_driver aspeed_pcie_driver = {
1098 .driver = {
1099 .name = "aspeed-pcie",
1100 .of_match_table = aspeed_pcie_of_match,
1101 .suppress_bind_attrs = true,
1102 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
1103 },
1104 .probe = aspeed_pcie_probe,
1105 };
1106
1107 builtin_platform_driver(aspeed_pcie_driver);
1108
1109 MODULE_AUTHOR("Jacky Chou <jacky_chou@aspeedtech.com>");
1110 MODULE_DESCRIPTION("ASPEED PCIe Root Complex");
1111 MODULE_LICENSE("GPL");
1112