1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Synopsys DesignWare PCIe host controller driver
4 *
5 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
6 * https://www.samsung.com
7 *
8 * Author: Jingoo Han <jg1.han@samsung.com>
9 */
10
11 #include <linux/align.h>
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dma/edma.h>
16 #include <linux/gpio/consumer.h>
17 #include <linux/ioport.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <linux/pcie-dwc.h>
21 #include <linux/platform_device.h>
22 #include <linux/sizes.h>
23 #include <linux/types.h>
24
25 #include "../../pci.h"
26 #include "pcie-designware.h"
27
28 static const char * const dw_pcie_app_clks[DW_PCIE_NUM_APP_CLKS] = {
29 [DW_PCIE_DBI_CLK] = "dbi",
30 [DW_PCIE_MSTR_CLK] = "mstr",
31 [DW_PCIE_SLV_CLK] = "slv",
32 };
33
34 static const char * const dw_pcie_core_clks[DW_PCIE_NUM_CORE_CLKS] = {
35 [DW_PCIE_PIPE_CLK] = "pipe",
36 [DW_PCIE_CORE_CLK] = "core",
37 [DW_PCIE_AUX_CLK] = "aux",
38 [DW_PCIE_REF_CLK] = "ref",
39 };
40
41 static const char * const dw_pcie_app_rsts[DW_PCIE_NUM_APP_RSTS] = {
42 [DW_PCIE_DBI_RST] = "dbi",
43 [DW_PCIE_MSTR_RST] = "mstr",
44 [DW_PCIE_SLV_RST] = "slv",
45 };
46
47 static const char * const dw_pcie_core_rsts[DW_PCIE_NUM_CORE_RSTS] = {
48 [DW_PCIE_NON_STICKY_RST] = "non-sticky",
49 [DW_PCIE_STICKY_RST] = "sticky",
50 [DW_PCIE_CORE_RST] = "core",
51 [DW_PCIE_PIPE_RST] = "pipe",
52 [DW_PCIE_PHY_RST] = "phy",
53 [DW_PCIE_HOT_RST] = "hot",
54 [DW_PCIE_PWR_RST] = "pwr",
55 };
56
57 static const struct dwc_pcie_vsec_id dwc_pcie_ptm_vsec_ids[] = {
58 { .vendor_id = PCI_VENDOR_ID_QCOM, /* EP */
59 .vsec_id = 0x03, .vsec_rev = 0x1 },
60 { .vendor_id = PCI_VENDOR_ID_QCOM, /* RC */
61 .vsec_id = 0x04, .vsec_rev = 0x1 },
62 { }
63 };
64
dw_pcie_get_clocks(struct dw_pcie * pci)65 static int dw_pcie_get_clocks(struct dw_pcie *pci)
66 {
67 int i, ret;
68
69 for (i = 0; i < DW_PCIE_NUM_APP_CLKS; i++)
70 pci->app_clks[i].id = dw_pcie_app_clks[i];
71
72 for (i = 0; i < DW_PCIE_NUM_CORE_CLKS; i++)
73 pci->core_clks[i].id = dw_pcie_core_clks[i];
74
75 ret = devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_APP_CLKS,
76 pci->app_clks);
77 if (ret)
78 return ret;
79
80 return devm_clk_bulk_get_optional(pci->dev, DW_PCIE_NUM_CORE_CLKS,
81 pci->core_clks);
82 }
83
dw_pcie_get_resets(struct dw_pcie * pci)84 static int dw_pcie_get_resets(struct dw_pcie *pci)
85 {
86 int i, ret;
87
88 for (i = 0; i < DW_PCIE_NUM_APP_RSTS; i++)
89 pci->app_rsts[i].id = dw_pcie_app_rsts[i];
90
91 for (i = 0; i < DW_PCIE_NUM_CORE_RSTS; i++)
92 pci->core_rsts[i].id = dw_pcie_core_rsts[i];
93
94 ret = devm_reset_control_bulk_get_optional_shared(pci->dev,
95 DW_PCIE_NUM_APP_RSTS,
96 pci->app_rsts);
97 if (ret)
98 return ret;
99
100 ret = devm_reset_control_bulk_get_optional_exclusive(pci->dev,
101 DW_PCIE_NUM_CORE_RSTS,
102 pci->core_rsts);
103 if (ret)
104 return ret;
105
106 pci->pe_rst = devm_gpiod_get_optional(pci->dev, "reset", GPIOD_OUT_HIGH);
107 if (IS_ERR(pci->pe_rst))
108 return PTR_ERR(pci->pe_rst);
109
110 return 0;
111 }
112
dw_pcie_get_resources(struct dw_pcie * pci)113 int dw_pcie_get_resources(struct dw_pcie *pci)
114 {
115 struct platform_device *pdev = to_platform_device(pci->dev);
116 struct device_node *np = dev_of_node(pci->dev);
117 struct resource *res;
118 int ret;
119
120 if (!pci->dbi_base) {
121 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
122 pci->dbi_base = devm_pci_remap_cfg_resource(pci->dev, res);
123 if (IS_ERR(pci->dbi_base))
124 return PTR_ERR(pci->dbi_base);
125 pci->dbi_phys_addr = res->start;
126 }
127
128 /* DBI2 is mainly useful for the endpoint controller */
129 if (!pci->dbi_base2) {
130 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
131 if (res) {
132 pci->dbi_base2 = devm_pci_remap_cfg_resource(pci->dev, res);
133 if (IS_ERR(pci->dbi_base2))
134 return PTR_ERR(pci->dbi_base2);
135 } else {
136 pci->dbi_base2 = pci->dbi_base + SZ_4K;
137 }
138 }
139
140 /* For non-unrolled iATU/eDMA platforms this range will be ignored */
141 if (!pci->atu_base) {
142 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
143 if (res) {
144 pci->atu_size = resource_size(res);
145 pci->atu_base = devm_ioremap_resource(pci->dev, res);
146 if (IS_ERR(pci->atu_base))
147 return PTR_ERR(pci->atu_base);
148 pci->atu_phys_addr = res->start;
149 } else {
150 pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
151 }
152 }
153
154 /* Set a default value suitable for at most 8 in and 8 out windows */
155 if (!pci->atu_size)
156 pci->atu_size = SZ_4K;
157
158 /* eDMA region can be mapped to a custom base address */
159 if (!pci->edma.reg_base) {
160 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
161 if (res) {
162 pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
163 if (IS_ERR(pci->edma.reg_base))
164 return PTR_ERR(pci->edma.reg_base);
165 } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
166 pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
167 }
168 }
169
170 /* ELBI is an optional resource */
171 if (!pci->elbi_base) {
172 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
173 if (res) {
174 pci->elbi_base = devm_ioremap_resource(pci->dev, res);
175 if (IS_ERR(pci->elbi_base))
176 return PTR_ERR(pci->elbi_base);
177 }
178 }
179
180 /* LLDD is supposed to manually switch the clocks and resets state */
181 if (dw_pcie_cap_is(pci, REQ_RES)) {
182 ret = dw_pcie_get_clocks(pci);
183 if (ret)
184 return ret;
185
186 ret = dw_pcie_get_resets(pci);
187 if (ret)
188 return ret;
189 }
190
191 if (pci->max_link_speed < 1)
192 pci->max_link_speed = of_pci_get_max_link_speed(np);
193
194 of_property_read_u32(np, "num-lanes", &pci->num_lanes);
195
196 if (of_property_read_bool(np, "snps,enable-cdm-check"))
197 dw_pcie_cap_set(pci, CDM_CHECK);
198
199 return 0;
200 }
201
dw_pcie_version_detect(struct dw_pcie * pci)202 void dw_pcie_version_detect(struct dw_pcie *pci)
203 {
204 u32 ver;
205
206 /* The content of the CSR is zero on DWC PCIe older than v4.70a */
207 ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_NUMBER);
208 if (!ver)
209 return;
210
211 if (pci->version && pci->version != ver)
212 dev_warn(pci->dev, "Versions don't match (%08x != %08x)\n",
213 pci->version, ver);
214 else
215 pci->version = ver;
216
217 ver = dw_pcie_readl_dbi(pci, PCIE_VERSION_TYPE);
218
219 if (pci->type && pci->type != ver)
220 dev_warn(pci->dev, "Types don't match (%08x != %08x)\n",
221 pci->type, ver);
222 else
223 pci->type = ver;
224 }
225
dw_pcie_find_capability(struct dw_pcie * pci,u8 cap)226 u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap)
227 {
228 return PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
229 NULL, pci);
230 }
231 EXPORT_SYMBOL_GPL(dw_pcie_find_capability);
232
dw_pcie_find_ext_capability(struct dw_pcie * pci,u8 cap)233 u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap)
234 {
235 return PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, NULL, pci);
236 }
237 EXPORT_SYMBOL_GPL(dw_pcie_find_ext_capability);
238
dw_pcie_remove_capability(struct dw_pcie * pci,u8 cap)239 void dw_pcie_remove_capability(struct dw_pcie *pci, u8 cap)
240 {
241 u8 cap_pos, pre_pos, next_pos;
242 u16 reg;
243
244 cap_pos = PCI_FIND_NEXT_CAP(dw_pcie_read_cfg, PCI_CAPABILITY_LIST, cap,
245 &pre_pos, pci);
246 if (!cap_pos)
247 return;
248
249 reg = dw_pcie_readw_dbi(pci, cap_pos);
250 next_pos = (reg & 0xff00) >> 8;
251
252 dw_pcie_dbi_ro_wr_en(pci);
253 if (pre_pos == PCI_CAPABILITY_LIST)
254 dw_pcie_writeb_dbi(pci, PCI_CAPABILITY_LIST, next_pos);
255 else
256 dw_pcie_writeb_dbi(pci, pre_pos + 1, next_pos);
257 dw_pcie_dbi_ro_wr_dis(pci);
258 }
259 EXPORT_SYMBOL_GPL(dw_pcie_remove_capability);
260
dw_pcie_remove_ext_capability(struct dw_pcie * pci,u8 cap)261 void dw_pcie_remove_ext_capability(struct dw_pcie *pci, u8 cap)
262 {
263 int cap_pos, next_pos, pre_pos;
264 u32 pre_header, header;
265
266 cap_pos = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, 0, cap, &pre_pos, pci);
267 if (!cap_pos)
268 return;
269
270 header = dw_pcie_readl_dbi(pci, cap_pos);
271
272 /*
273 * If the first cap at offset PCI_CFG_SPACE_SIZE is removed,
274 * only set its capid to zero as it cannot be skipped.
275 */
276 if (cap_pos == PCI_CFG_SPACE_SIZE) {
277 dw_pcie_dbi_ro_wr_en(pci);
278 dw_pcie_writel_dbi(pci, cap_pos, header & 0xffff0000);
279 dw_pcie_dbi_ro_wr_dis(pci);
280 return;
281 }
282
283 pre_header = dw_pcie_readl_dbi(pci, pre_pos);
284 next_pos = PCI_EXT_CAP_NEXT(header);
285
286 dw_pcie_dbi_ro_wr_en(pci);
287 dw_pcie_writel_dbi(pci, pre_pos,
288 (pre_header & 0xfffff) | (next_pos << 20));
289 dw_pcie_dbi_ro_wr_dis(pci);
290 }
291 EXPORT_SYMBOL_GPL(dw_pcie_remove_ext_capability);
292
__dw_pcie_find_vsec_capability(struct dw_pcie * pci,u16 vendor_id,u16 vsec_id)293 static u16 __dw_pcie_find_vsec_capability(struct dw_pcie *pci, u16 vendor_id,
294 u16 vsec_id)
295 {
296 u16 vsec = 0;
297 u32 header;
298
299 if (vendor_id != dw_pcie_readw_dbi(pci, PCI_VENDOR_ID))
300 return 0;
301
302 while ((vsec = PCI_FIND_NEXT_EXT_CAP(dw_pcie_read_cfg, vsec,
303 PCI_EXT_CAP_ID_VNDR, NULL, pci))) {
304 header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
305 if (PCI_VNDR_HEADER_ID(header) == vsec_id)
306 return vsec;
307 }
308
309 return 0;
310 }
311
dw_pcie_find_vsec_capability(struct dw_pcie * pci,const struct dwc_pcie_vsec_id * vsec_ids)312 static u16 dw_pcie_find_vsec_capability(struct dw_pcie *pci,
313 const struct dwc_pcie_vsec_id *vsec_ids)
314 {
315 const struct dwc_pcie_vsec_id *vid;
316 u16 vsec;
317 u32 header;
318
319 for (vid = vsec_ids; vid->vendor_id; vid++) {
320 vsec = __dw_pcie_find_vsec_capability(pci, vid->vendor_id,
321 vid->vsec_id);
322 if (vsec) {
323 header = dw_pcie_readl_dbi(pci, vsec + PCI_VNDR_HEADER);
324 if (PCI_VNDR_HEADER_REV(header) == vid->vsec_rev)
325 return vsec;
326 }
327 }
328
329 return 0;
330 }
331
dw_pcie_find_rasdes_capability(struct dw_pcie * pci)332 u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci)
333 {
334 return dw_pcie_find_vsec_capability(pci, dwc_pcie_rasdes_vsec_ids);
335 }
336 EXPORT_SYMBOL_GPL(dw_pcie_find_rasdes_capability);
337
dw_pcie_find_ptm_capability(struct dw_pcie * pci)338 u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci)
339 {
340 return dw_pcie_find_vsec_capability(pci, dwc_pcie_ptm_vsec_ids);
341 }
342 EXPORT_SYMBOL_GPL(dw_pcie_find_ptm_capability);
343
dw_pcie_read(void __iomem * addr,int size,u32 * val)344 int dw_pcie_read(void __iomem *addr, int size, u32 *val)
345 {
346 if (!IS_ALIGNED((uintptr_t)addr, size)) {
347 *val = 0;
348 return PCIBIOS_BAD_REGISTER_NUMBER;
349 }
350
351 if (size == 4) {
352 *val = readl(addr);
353 } else if (size == 2) {
354 *val = readw(addr);
355 } else if (size == 1) {
356 *val = readb(addr);
357 } else {
358 *val = 0;
359 return PCIBIOS_BAD_REGISTER_NUMBER;
360 }
361
362 return PCIBIOS_SUCCESSFUL;
363 }
364 EXPORT_SYMBOL_GPL(dw_pcie_read);
365
dw_pcie_write(void __iomem * addr,int size,u32 val)366 int dw_pcie_write(void __iomem *addr, int size, u32 val)
367 {
368 if (!IS_ALIGNED((uintptr_t)addr, size))
369 return PCIBIOS_BAD_REGISTER_NUMBER;
370
371 if (size == 4)
372 writel(val, addr);
373 else if (size == 2)
374 writew(val, addr);
375 else if (size == 1)
376 writeb(val, addr);
377 else
378 return PCIBIOS_BAD_REGISTER_NUMBER;
379
380 return PCIBIOS_SUCCESSFUL;
381 }
382 EXPORT_SYMBOL_GPL(dw_pcie_write);
383
dw_pcie_read_dbi(struct dw_pcie * pci,u32 reg,size_t size)384 u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size)
385 {
386 int ret;
387 u32 val;
388
389 if (pci->ops && pci->ops->read_dbi)
390 return pci->ops->read_dbi(pci, pci->dbi_base, reg, size);
391
392 ret = dw_pcie_read(pci->dbi_base + reg, size, &val);
393 if (ret)
394 dev_err(pci->dev, "Read DBI address failed\n");
395
396 return val;
397 }
398 EXPORT_SYMBOL_GPL(dw_pcie_read_dbi);
399
dw_pcie_write_dbi(struct dw_pcie * pci,u32 reg,size_t size,u32 val)400 void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
401 {
402 int ret;
403
404 if (pci->ops && pci->ops->write_dbi) {
405 pci->ops->write_dbi(pci, pci->dbi_base, reg, size, val);
406 return;
407 }
408
409 ret = dw_pcie_write(pci->dbi_base + reg, size, val);
410 if (ret)
411 dev_err(pci->dev, "Write DBI address failed\n");
412 }
413 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi);
414
dw_pcie_write_dbi2(struct dw_pcie * pci,u32 reg,size_t size,u32 val)415 void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val)
416 {
417 int ret;
418
419 if (pci->ops && pci->ops->write_dbi2) {
420 pci->ops->write_dbi2(pci, pci->dbi_base2, reg, size, val);
421 return;
422 }
423
424 ret = dw_pcie_write(pci->dbi_base2 + reg, size, val);
425 if (ret)
426 dev_err(pci->dev, "write DBI address failed\n");
427 }
428 EXPORT_SYMBOL_GPL(dw_pcie_write_dbi2);
429
dw_pcie_select_atu(struct dw_pcie * pci,u32 dir,u32 index)430 static inline void __iomem *dw_pcie_select_atu(struct dw_pcie *pci, u32 dir,
431 u32 index)
432 {
433 if (dw_pcie_cap_is(pci, IATU_UNROLL))
434 return pci->atu_base + PCIE_ATU_UNROLL_BASE(dir, index);
435
436 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, dir | index);
437 return pci->atu_base;
438 }
439
dw_pcie_readl_atu(struct dw_pcie * pci,u32 dir,u32 index,u32 reg)440 static u32 dw_pcie_readl_atu(struct dw_pcie *pci, u32 dir, u32 index, u32 reg)
441 {
442 void __iomem *base;
443 int ret;
444 u32 val;
445
446 base = dw_pcie_select_atu(pci, dir, index);
447
448 if (pci->ops && pci->ops->read_dbi)
449 return pci->ops->read_dbi(pci, base, reg, 4);
450
451 ret = dw_pcie_read(base + reg, 4, &val);
452 if (ret)
453 dev_err(pci->dev, "Read ATU address failed\n");
454
455 return val;
456 }
457
dw_pcie_writel_atu(struct dw_pcie * pci,u32 dir,u32 index,u32 reg,u32 val)458 static void dw_pcie_writel_atu(struct dw_pcie *pci, u32 dir, u32 index,
459 u32 reg, u32 val)
460 {
461 void __iomem *base;
462 int ret;
463
464 base = dw_pcie_select_atu(pci, dir, index);
465
466 if (pci->ops && pci->ops->write_dbi) {
467 pci->ops->write_dbi(pci, base, reg, 4, val);
468 return;
469 }
470
471 ret = dw_pcie_write(base + reg, 4, val);
472 if (ret)
473 dev_err(pci->dev, "Write ATU address failed\n");
474 }
475
dw_pcie_readl_atu_ob(struct dw_pcie * pci,u32 index,u32 reg)476 static inline u32 dw_pcie_readl_atu_ob(struct dw_pcie *pci, u32 index, u32 reg)
477 {
478 return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg);
479 }
480
dw_pcie_writel_atu_ob(struct dw_pcie * pci,u32 index,u32 reg,u32 val)481 static inline void dw_pcie_writel_atu_ob(struct dw_pcie *pci, u32 index, u32 reg,
482 u32 val)
483 {
484 dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_OB, index, reg, val);
485 }
486
dw_pcie_enable_ecrc(u32 val)487 static inline u32 dw_pcie_enable_ecrc(u32 val)
488 {
489 /*
490 * DesignWare core version 4.90A has a design issue where the 'TD'
491 * bit in the Control register-1 of the ATU outbound region acts
492 * like an override for the ECRC setting, i.e., the presence of TLP
493 * Digest (ECRC) in the outgoing TLPs is solely determined by this
494 * bit. This is contrary to the PCIe spec which says that the
495 * enablement of the ECRC is solely determined by the AER
496 * registers.
497 *
498 * Because of this, even when the ECRC is enabled through AER
499 * registers, the transactions going through ATU won't have TLP
500 * Digest as there is no way the PCI core AER code could program
501 * the TD bit which is specific to the DesignWare core.
502 *
503 * The best way to handle this scenario is to program the TD bit
504 * always. It affects only the traffic from root port to downstream
505 * devices.
506 *
507 * At this point,
508 * When ECRC is enabled in AER registers, everything works normally
509 * When ECRC is NOT enabled in AER registers, then,
510 * on Root Port:- TLP Digest (DWord size) gets appended to each packet
511 * even through it is not required. Since downstream
512 * TLPs are mostly for configuration accesses and BAR
513 * accesses, they are not in critical path and won't
514 * have much negative effect on the performance.
515 * on End Point:- TLP Digest is received for some/all the packets coming
516 * from the root port. TLP Digest is ignored because,
517 * as per the PCIe Spec r5.0 v1.0 section 2.2.3
518 * "TLP Digest Rules", when an endpoint receives TLP
519 * Digest when its ECRC check functionality is disabled
520 * in AER registers, received TLP Digest is just ignored.
521 * Since there is no issue or error reported either side, best way to
522 * handle the scenario is to program TD bit by default.
523 */
524
525 return val | PCIE_ATU_TD;
526 }
527
dw_pcie_prog_outbound_atu(struct dw_pcie * pci,const struct dw_pcie_ob_atu_cfg * atu)528 int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
529 const struct dw_pcie_ob_atu_cfg *atu)
530 {
531 u64 parent_bus_addr = atu->parent_bus_addr;
532 u32 retries, val;
533 u64 limit_addr;
534
535 if (atu->index >= pci->num_ob_windows)
536 return -ENOSPC;
537
538 limit_addr = parent_bus_addr + atu->size - 1;
539
540 if ((limit_addr & ~pci->region_limit) != (parent_bus_addr & ~pci->region_limit) ||
541 !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
542 !IS_ALIGNED(atu->pci_addr, pci->region_align) || !atu->size) {
543 return -EINVAL;
544 }
545
546 dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_BASE,
547 lower_32_bits(parent_bus_addr));
548 dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_BASE,
549 upper_32_bits(parent_bus_addr));
550
551 dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LIMIT,
552 lower_32_bits(limit_addr));
553 if (dw_pcie_ver_is_ge(pci, 460A))
554 dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_LIMIT,
555 upper_32_bits(limit_addr));
556
557 dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_LOWER_TARGET,
558 lower_32_bits(atu->pci_addr));
559 dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_UPPER_TARGET,
560 upper_32_bits(atu->pci_addr));
561
562 val = atu->type | atu->routing | PCIE_ATU_FUNC_NUM(atu->func_no);
563 if (upper_32_bits(limit_addr) > upper_32_bits(parent_bus_addr) &&
564 dw_pcie_ver_is_ge(pci, 460A))
565 val |= PCIE_ATU_INCREASE_REGION_SIZE;
566 if (dw_pcie_ver_is(pci, 490A))
567 val = dw_pcie_enable_ecrc(val);
568 dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL1, val);
569
570 val = PCIE_ATU_ENABLE | atu->ctrl2;
571 if (atu->type == PCIE_ATU_TYPE_MSG) {
572 /* The data-less messages only for now */
573 val |= PCIE_ATU_INHIBIT_PAYLOAD | atu->code;
574 }
575 dw_pcie_writel_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2, val);
576
577 /*
578 * Make sure ATU enable takes effect before any subsequent config
579 * and I/O accesses.
580 */
581 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
582 val = dw_pcie_readl_atu_ob(pci, atu->index, PCIE_ATU_REGION_CTRL2);
583 if (val & PCIE_ATU_ENABLE)
584 return 0;
585
586 mdelay(LINK_WAIT_IATU);
587 }
588
589 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
590
591 return -ETIMEDOUT;
592 }
593
dw_pcie_readl_atu_ib(struct dw_pcie * pci,u32 index,u32 reg)594 static inline u32 dw_pcie_readl_atu_ib(struct dw_pcie *pci, u32 index, u32 reg)
595 {
596 return dw_pcie_readl_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg);
597 }
598
dw_pcie_writel_atu_ib(struct dw_pcie * pci,u32 index,u32 reg,u32 val)599 static inline void dw_pcie_writel_atu_ib(struct dw_pcie *pci, u32 index, u32 reg,
600 u32 val)
601 {
602 dw_pcie_writel_atu(pci, PCIE_ATU_REGION_DIR_IB, index, reg, val);
603 }
604
dw_pcie_prog_inbound_atu(struct dw_pcie * pci,int index,int type,u64 parent_bus_addr,u64 pci_addr,u64 size)605 int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
606 u64 parent_bus_addr, u64 pci_addr, u64 size)
607 {
608 u64 limit_addr = pci_addr + size - 1;
609 u32 retries, val;
610
611 if (index >= pci->num_ib_windows)
612 return -ENOSPC;
613
614 if ((limit_addr & ~pci->region_limit) != (pci_addr & ~pci->region_limit) ||
615 !IS_ALIGNED(parent_bus_addr, pci->region_align) ||
616 !IS_ALIGNED(pci_addr, pci->region_align) || !size) {
617 return -EINVAL;
618 }
619
620 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_BASE,
621 lower_32_bits(pci_addr));
622 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_BASE,
623 upper_32_bits(pci_addr));
624
625 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LIMIT,
626 lower_32_bits(limit_addr));
627 if (dw_pcie_ver_is_ge(pci, 460A))
628 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_LIMIT,
629 upper_32_bits(limit_addr));
630
631 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
632 lower_32_bits(parent_bus_addr));
633 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
634 upper_32_bits(parent_bus_addr));
635
636 val = type;
637 if (upper_32_bits(limit_addr) > upper_32_bits(pci_addr) &&
638 dw_pcie_ver_is_ge(pci, 460A))
639 val |= PCIE_ATU_INCREASE_REGION_SIZE;
640 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, val);
641 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2, PCIE_ATU_ENABLE);
642
643 /*
644 * Make sure ATU enable takes effect before any subsequent config
645 * and I/O accesses.
646 */
647 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
648 val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
649 if (val & PCIE_ATU_ENABLE)
650 return 0;
651
652 mdelay(LINK_WAIT_IATU);
653 }
654
655 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
656
657 return -ETIMEDOUT;
658 }
659
dw_pcie_prog_ep_inbound_atu(struct dw_pcie * pci,u8 func_no,int index,int type,u64 parent_bus_addr,u8 bar,size_t size)660 int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
661 int type, u64 parent_bus_addr, u8 bar, size_t size)
662 {
663 u32 retries, val;
664
665 if (!IS_ALIGNED(parent_bus_addr, pci->region_align) ||
666 !IS_ALIGNED(parent_bus_addr, size))
667 return -EINVAL;
668
669 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_LOWER_TARGET,
670 lower_32_bits(parent_bus_addr));
671 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_UPPER_TARGET,
672 upper_32_bits(parent_bus_addr));
673
674 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL1, type |
675 PCIE_ATU_FUNC_NUM(func_no));
676 dw_pcie_writel_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2,
677 PCIE_ATU_ENABLE | PCIE_ATU_FUNC_NUM_MATCH_EN |
678 PCIE_ATU_BAR_MODE_ENABLE | (bar << 8));
679
680 /*
681 * Make sure ATU enable takes effect before any subsequent config
682 * and I/O accesses.
683 */
684 for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) {
685 val = dw_pcie_readl_atu_ib(pci, index, PCIE_ATU_REGION_CTRL2);
686 if (val & PCIE_ATU_ENABLE)
687 return 0;
688
689 mdelay(LINK_WAIT_IATU);
690 }
691
692 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
693
694 return -ETIMEDOUT;
695 }
696
dw_pcie_disable_atu(struct dw_pcie * pci,u32 dir,int index)697 void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index)
698 {
699 dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0);
700 }
701
dw_pcie_ltssm_status_string(enum dw_pcie_ltssm ltssm)702 const char *dw_pcie_ltssm_status_string(enum dw_pcie_ltssm ltssm)
703 {
704 const char *str;
705
706 switch (ltssm) {
707 #define DW_PCIE_LTSSM_NAME(n) case n: str = #n; break
708 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_QUIET);
709 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_ACT);
710 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_ACTIVE);
711 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_COMPLIANCE);
712 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_POLL_CONFIG);
713 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_PRE_DETECT_QUIET);
714 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DETECT_WAIT);
715 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_START);
716 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LINKWD_ACEPT);
717 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_WAI);
718 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_LANENUM_ACEPT);
719 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_COMPLETE);
720 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_CFG_IDLE);
721 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_LOCK);
722 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_SPEED);
723 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_RCVRCFG);
724 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_IDLE);
725 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0);
726 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L0S);
727 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L123_SEND_EIDLE);
728 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_IDLE);
729 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_IDLE);
730 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L2_WAKE);
731 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_ENTRY);
732 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED_IDLE);
733 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_DISABLED);
734 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ENTRY);
735 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_ACTIVE);
736 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT);
737 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT);
738 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET_ENTRY);
739 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_HOT_RESET);
740 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ0);
741 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ1);
742 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ2);
743 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_RCVRY_EQ3);
744 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_1);
745 DW_PCIE_LTSSM_NAME(DW_PCIE_LTSSM_L1_2);
746 default:
747 str = "DW_PCIE_LTSSM_UNKNOWN";
748 break;
749 }
750
751 return str + strlen("DW_PCIE_LTSSM_");
752 }
753
754 /**
755 * dw_pcie_wait_for_link - Wait for the PCIe link to be up
756 * @pci: DWC instance
757 *
758 * Returns: 0 if link is up, -ENODEV if device is not found, -EIO if the device
759 * is found but not active and -ETIMEDOUT if the link fails to come up for other
760 * reasons.
761 */
dw_pcie_wait_for_link(struct dw_pcie * pci)762 int dw_pcie_wait_for_link(struct dw_pcie *pci)
763 {
764 u32 offset, val, ltssm;
765 int retries;
766
767 /* Check if the link is up or not */
768 for (retries = 0; retries < PCIE_LINK_WAIT_MAX_RETRIES; retries++) {
769 if (dw_pcie_link_up(pci))
770 break;
771
772 msleep(PCIE_LINK_WAIT_SLEEP_MS);
773 }
774
775 if (retries >= PCIE_LINK_WAIT_MAX_RETRIES) {
776 /*
777 * If the link is in Detect.Quiet or Detect.Active state, it
778 * indicates that no device is detected.
779 */
780 ltssm = dw_pcie_get_ltssm(pci);
781 if (ltssm == DW_PCIE_LTSSM_DETECT_QUIET ||
782 ltssm == DW_PCIE_LTSSM_DETECT_ACT) {
783 dev_info(pci->dev, "Device not found\n");
784 return -ENODEV;
785
786 /*
787 * If the link is in POLL.{Active/Compliance} state, then the
788 * device is found to be connected to the bus, but it is not
789 * active i.e., the device firmware might not yet initialized.
790 */
791 } else if (ltssm == DW_PCIE_LTSSM_POLL_ACTIVE ||
792 ltssm == DW_PCIE_LTSSM_POLL_COMPLIANCE) {
793 dev_info(pci->dev, "Device found, but not active\n");
794 return -EIO;
795 }
796
797 dev_err(pci->dev, "Link failed to come up. LTSSM: %s\n",
798 dw_pcie_ltssm_status_string(ltssm));
799 return -ETIMEDOUT;
800 }
801
802 /*
803 * As per PCIe r6.0, sec 6.6.1, a Downstream Port that supports Link
804 * speeds greater than 5.0 GT/s, software must wait a minimum of 100 ms
805 * after Link training completes before sending a Configuration Request.
806 */
807 if (pci->max_link_speed > 2)
808 msleep(PCIE_RESET_CONFIG_WAIT_MS);
809
810 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
811 val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA);
812
813 dev_info(pci->dev, "PCIe Gen.%u x%u link up\n",
814 FIELD_GET(PCI_EXP_LNKSTA_CLS, val),
815 FIELD_GET(PCI_EXP_LNKSTA_NLW, val));
816
817 return 0;
818 }
819 EXPORT_SYMBOL_GPL(dw_pcie_wait_for_link);
820
dw_pcie_link_up(struct dw_pcie * pci)821 bool dw_pcie_link_up(struct dw_pcie *pci)
822 {
823 u32 val;
824
825 if (pci->ops && pci->ops->link_up)
826 return pci->ops->link_up(pci);
827
828 val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1);
829 return ((val & PCIE_PORT_DEBUG1_LINK_UP) &&
830 (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING)));
831 }
832 EXPORT_SYMBOL_GPL(dw_pcie_link_up);
833
dw_pcie_upconfig_setup(struct dw_pcie * pci)834 void dw_pcie_upconfig_setup(struct dw_pcie *pci)
835 {
836 u32 val;
837
838 val = dw_pcie_readl_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL);
839 val |= PORT_MLTI_UPCFG_SUPPORT;
840 dw_pcie_writel_dbi(pci, PCIE_PORT_MULTI_LANE_CTRL, val);
841 }
842 EXPORT_SYMBOL_GPL(dw_pcie_upconfig_setup);
843
dw_pcie_link_set_max_speed(struct dw_pcie * pci)844 static void dw_pcie_link_set_max_speed(struct dw_pcie *pci)
845 {
846 u32 cap, ctrl2, link_speed;
847 u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
848
849 cap = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
850
851 /*
852 * Even if the platform doesn't want to limit the maximum link speed,
853 * just cache the hardware default value so that the vendor drivers can
854 * use it to do any link specific configuration.
855 */
856 if (pci->max_link_speed < 1) {
857 pci->max_link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
858 return;
859 }
860
861 ctrl2 = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCTL2);
862 ctrl2 &= ~PCI_EXP_LNKCTL2_TLS;
863
864 switch (pcie_link_speed[pci->max_link_speed]) {
865 case PCIE_SPEED_2_5GT:
866 link_speed = PCI_EXP_LNKCTL2_TLS_2_5GT;
867 break;
868 case PCIE_SPEED_5_0GT:
869 link_speed = PCI_EXP_LNKCTL2_TLS_5_0GT;
870 break;
871 case PCIE_SPEED_8_0GT:
872 link_speed = PCI_EXP_LNKCTL2_TLS_8_0GT;
873 break;
874 case PCIE_SPEED_16_0GT:
875 link_speed = PCI_EXP_LNKCTL2_TLS_16_0GT;
876 break;
877 default:
878 /* Use hardware capability */
879 link_speed = FIELD_GET(PCI_EXP_LNKCAP_SLS, cap);
880 ctrl2 &= ~PCI_EXP_LNKCTL2_HASD;
881 break;
882 }
883
884 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCTL2, ctrl2 | link_speed);
885
886 cap &= ~((u32)PCI_EXP_LNKCAP_SLS);
887 dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, cap | link_speed);
888
889 }
890
dw_pcie_link_get_max_link_width(struct dw_pcie * pci)891 int dw_pcie_link_get_max_link_width(struct dw_pcie *pci)
892 {
893 u8 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
894 u32 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
895
896 return FIELD_GET(PCI_EXP_LNKCAP_MLW, lnkcap);
897 }
898
dw_pcie_link_set_max_link_width(struct dw_pcie * pci,u32 num_lanes)899 static void dw_pcie_link_set_max_link_width(struct dw_pcie *pci, u32 num_lanes)
900 {
901 u32 lnkcap, lwsc, plc;
902 u8 cap;
903
904 if (!num_lanes)
905 return;
906
907 /* Set the number of lanes */
908 plc = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
909 plc &= ~PORT_LINK_FAST_LINK_MODE;
910 plc &= ~PORT_LINK_MODE_MASK;
911
912 /* Set link width speed control register */
913 lwsc = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
914 lwsc &= ~PORT_LOGIC_LINK_WIDTH_MASK;
915 lwsc |= PORT_LOGIC_LINK_WIDTH_1_LANES;
916 switch (num_lanes) {
917 case 1:
918 plc |= PORT_LINK_MODE_1_LANES;
919 break;
920 case 2:
921 plc |= PORT_LINK_MODE_2_LANES;
922 break;
923 case 4:
924 plc |= PORT_LINK_MODE_4_LANES;
925 break;
926 case 8:
927 plc |= PORT_LINK_MODE_8_LANES;
928 break;
929 case 16:
930 plc |= PORT_LINK_MODE_16_LANES;
931 break;
932 default:
933 dev_err(pci->dev, "num-lanes %u: invalid value\n", num_lanes);
934 return;
935 }
936 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, plc);
937 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, lwsc);
938
939 cap = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
940 lnkcap = dw_pcie_readl_dbi(pci, cap + PCI_EXP_LNKCAP);
941 lnkcap &= ~PCI_EXP_LNKCAP_MLW;
942 lnkcap |= FIELD_PREP(PCI_EXP_LNKCAP_MLW, num_lanes);
943 dw_pcie_writel_dbi(pci, cap + PCI_EXP_LNKCAP, lnkcap);
944 }
945
dw_pcie_iatu_detect(struct dw_pcie * pci)946 void dw_pcie_iatu_detect(struct dw_pcie *pci)
947 {
948 int max_region, ob, ib;
949 u32 val, min, dir;
950 u64 max;
951
952 val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
953 if (val == 0xFFFFFFFF) {
954 dw_pcie_cap_set(pci, IATU_UNROLL);
955
956 max_region = min((int)pci->atu_size / 512, 256);
957 } else {
958 pci->atu_base = pci->dbi_base + PCIE_ATU_VIEWPORT_BASE;
959 pci->atu_size = PCIE_ATU_VIEWPORT_SIZE;
960
961 dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, 0xFF);
962 max_region = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT) + 1;
963 }
964
965 for (ob = 0; ob < max_region; ob++) {
966 dw_pcie_writel_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET, 0x11110000);
967 val = dw_pcie_readl_atu_ob(pci, ob, PCIE_ATU_LOWER_TARGET);
968 if (val != 0x11110000)
969 break;
970 }
971
972 for (ib = 0; ib < max_region; ib++) {
973 dw_pcie_writel_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET, 0x11110000);
974 val = dw_pcie_readl_atu_ib(pci, ib, PCIE_ATU_LOWER_TARGET);
975 if (val != 0x11110000)
976 break;
977 }
978
979 if (ob) {
980 dir = PCIE_ATU_REGION_DIR_OB;
981 } else if (ib) {
982 dir = PCIE_ATU_REGION_DIR_IB;
983 } else {
984 dev_err(pci->dev, "No iATU regions found\n");
985 return;
986 }
987
988 dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_LIMIT, 0x0);
989 min = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_LIMIT);
990
991 if (dw_pcie_ver_is_ge(pci, 460A)) {
992 dw_pcie_writel_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT, 0xFFFFFFFF);
993 max = dw_pcie_readl_atu(pci, dir, 0, PCIE_ATU_UPPER_LIMIT);
994 } else {
995 max = 0;
996 }
997
998 pci->num_ob_windows = ob;
999 pci->num_ib_windows = ib;
1000 pci->region_align = 1 << fls(min);
1001 pci->region_limit = (max << 32) | (SZ_4G - 1);
1002
1003 dev_info(pci->dev, "iATU: unroll %s, %u ob, %u ib, align %uK, limit %lluG\n",
1004 dw_pcie_cap_is(pci, IATU_UNROLL) ? "T" : "F",
1005 pci->num_ob_windows, pci->num_ib_windows,
1006 pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
1007 }
1008
dw_pcie_readl_dma(struct dw_pcie * pci,u32 reg)1009 static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg)
1010 {
1011 u32 val = 0;
1012 int ret;
1013
1014 if (pci->ops && pci->ops->read_dbi)
1015 return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4);
1016
1017 ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val);
1018 if (ret)
1019 dev_err(pci->dev, "Read DMA address failed\n");
1020
1021 return val;
1022 }
1023
dw_pcie_edma_irq_vector(struct device * dev,unsigned int nr)1024 static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
1025 {
1026 struct platform_device *pdev = to_platform_device(dev);
1027 char name[6];
1028 int ret;
1029
1030 if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH)
1031 return -EINVAL;
1032
1033 ret = platform_get_irq_byname_optional(pdev, "dma");
1034 if (ret > 0)
1035 return ret;
1036
1037 snprintf(name, sizeof(name), "dma%u", nr);
1038
1039 return platform_get_irq_byname_optional(pdev, name);
1040 }
1041
1042 static struct dw_edma_plat_ops dw_pcie_edma_ops = {
1043 .irq_vector = dw_pcie_edma_irq_vector,
1044 };
1045
dw_pcie_edma_init_data(struct dw_pcie * pci)1046 static void dw_pcie_edma_init_data(struct dw_pcie *pci)
1047 {
1048 pci->edma.dev = pci->dev;
1049
1050 if (!pci->edma.ops)
1051 pci->edma.ops = &dw_pcie_edma_ops;
1052
1053 pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
1054 }
1055
dw_pcie_edma_find_mf(struct dw_pcie * pci)1056 static int dw_pcie_edma_find_mf(struct dw_pcie *pci)
1057 {
1058 u32 val;
1059
1060 /*
1061 * Bail out finding the mapping format if it is already set by the glue
1062 * driver. Also ensure that the edma.reg_base is pointing to a valid
1063 * memory region.
1064 */
1065 if (pci->edma.mf != EDMA_MF_EDMA_LEGACY)
1066 return pci->edma.reg_base ? 0 : -ENODEV;
1067
1068 /*
1069 * Indirect eDMA CSRs access has been completely removed since v5.40a
1070 * thus no space is now reserved for the eDMA channels viewport and
1071 * former DMA CTRL register is no longer fixed to FFs.
1072 */
1073 if (dw_pcie_ver_is_ge(pci, 540A))
1074 val = 0xFFFFFFFF;
1075 else
1076 val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
1077
1078 if (val == 0xFFFFFFFF && pci->edma.reg_base) {
1079 pci->edma.mf = EDMA_MF_EDMA_UNROLL;
1080 } else if (val != 0xFFFFFFFF) {
1081 pci->edma.mf = EDMA_MF_EDMA_LEGACY;
1082
1083 pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE;
1084 } else {
1085 return -ENODEV;
1086 }
1087
1088 return 0;
1089 }
1090
dw_pcie_edma_find_channels(struct dw_pcie * pci)1091 static int dw_pcie_edma_find_channels(struct dw_pcie *pci)
1092 {
1093 u32 val;
1094
1095 /*
1096 * Autodetect the read/write channels count only for non-HDMA platforms.
1097 * HDMA platforms with native CSR mapping doesn't support autodetect,
1098 * so the glue drivers should've passed the valid count already. If not,
1099 * the below sanity check will catch it.
1100 */
1101 if (pci->edma.mf != EDMA_MF_HDMA_NATIVE) {
1102 val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
1103
1104 pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
1105 pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
1106 }
1107
1108 /* Sanity check the channels count if the mapping was incorrect */
1109 if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
1110 !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH)
1111 return -EINVAL;
1112
1113 return 0;
1114 }
1115
dw_pcie_edma_find_chip(struct dw_pcie * pci)1116 static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
1117 {
1118 int ret;
1119
1120 dw_pcie_edma_init_data(pci);
1121
1122 ret = dw_pcie_edma_find_mf(pci);
1123 if (ret)
1124 return ret;
1125
1126 return dw_pcie_edma_find_channels(pci);
1127 }
1128
dw_pcie_edma_irq_verify(struct dw_pcie * pci)1129 static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
1130 {
1131 struct platform_device *pdev = to_platform_device(pci->dev);
1132 u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
1133 char name[15];
1134 int ret;
1135
1136 if (pci->edma.nr_irqs > 1)
1137 return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
1138
1139 ret = platform_get_irq_byname_optional(pdev, "dma");
1140 if (ret > 0) {
1141 pci->edma.nr_irqs = 1;
1142 return 0;
1143 }
1144
1145 for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) {
1146 snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs);
1147
1148 ret = platform_get_irq_byname_optional(pdev, name);
1149 if (ret <= 0)
1150 return -EINVAL;
1151 }
1152
1153 return 0;
1154 }
1155
dw_pcie_edma_ll_alloc(struct dw_pcie * pci)1156 static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci)
1157 {
1158 struct dw_edma_region *ll;
1159 dma_addr_t paddr;
1160 int i;
1161
1162 for (i = 0; i < pci->edma.ll_wr_cnt; i++) {
1163 ll = &pci->edma.ll_region_wr[i];
1164 ll->sz = DMA_LLP_MEM_SIZE;
1165 ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
1166 &paddr, GFP_KERNEL);
1167 if (!ll->vaddr.mem)
1168 return -ENOMEM;
1169
1170 ll->paddr = paddr;
1171 }
1172
1173 for (i = 0; i < pci->edma.ll_rd_cnt; i++) {
1174 ll = &pci->edma.ll_region_rd[i];
1175 ll->sz = DMA_LLP_MEM_SIZE;
1176 ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
1177 &paddr, GFP_KERNEL);
1178 if (!ll->vaddr.mem)
1179 return -ENOMEM;
1180
1181 ll->paddr = paddr;
1182 }
1183
1184 return 0;
1185 }
1186
dw_pcie_edma_detect(struct dw_pcie * pci)1187 int dw_pcie_edma_detect(struct dw_pcie *pci)
1188 {
1189 int ret;
1190
1191 /* Don't fail if no eDMA was found (for the backward compatibility) */
1192 ret = dw_pcie_edma_find_chip(pci);
1193 if (ret)
1194 return 0;
1195
1196 /* Don't fail on the IRQs verification (for the backward compatibility) */
1197 ret = dw_pcie_edma_irq_verify(pci);
1198 if (ret) {
1199 dev_err(pci->dev, "Invalid eDMA IRQs found\n");
1200 return 0;
1201 }
1202
1203 ret = dw_pcie_edma_ll_alloc(pci);
1204 if (ret) {
1205 dev_err(pci->dev, "Couldn't allocate LLP memory\n");
1206 return ret;
1207 }
1208
1209 /* Don't fail if the DW eDMA driver can't find the device */
1210 ret = dw_edma_probe(&pci->edma);
1211 if (ret && ret != -ENODEV) {
1212 dev_err(pci->dev, "Couldn't register eDMA device\n");
1213 return ret;
1214 }
1215
1216 dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n",
1217 pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F",
1218 pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt);
1219
1220 return 0;
1221 }
1222
dw_pcie_edma_remove(struct dw_pcie * pci)1223 void dw_pcie_edma_remove(struct dw_pcie *pci)
1224 {
1225 dw_edma_remove(&pci->edma);
1226 }
1227
dw_pcie_hide_unsupported_l1ss(struct dw_pcie * pci)1228 void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci)
1229 {
1230 u16 l1ss;
1231 u32 l1ss_cap;
1232
1233 if (pci->l1ss_support)
1234 return;
1235
1236 l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
1237 if (!l1ss)
1238 return;
1239
1240 /*
1241 * Unless the driver claims "l1ss_support", don't advertise L1 PM
1242 * Substates because they require CLKREQ# and possibly other
1243 * device-specific configuration.
1244 */
1245 l1ss_cap = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP);
1246 l1ss_cap &= ~(PCI_L1SS_CAP_PCIPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_1 |
1247 PCI_L1SS_CAP_PCIPM_L1_2 | PCI_L1SS_CAP_ASPM_L1_2 |
1248 PCI_L1SS_CAP_L1_PM_SS);
1249 dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, l1ss_cap);
1250 }
1251
dw_pcie_setup(struct dw_pcie * pci)1252 void dw_pcie_setup(struct dw_pcie *pci)
1253 {
1254 u32 val;
1255
1256 dw_pcie_link_set_max_speed(pci);
1257
1258 /* Configure Gen1 N_FTS */
1259 if (pci->n_fts[0]) {
1260 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
1261 val &= ~(PORT_AFR_N_FTS_MASK | PORT_AFR_CC_N_FTS_MASK);
1262 val |= PORT_AFR_N_FTS(pci->n_fts[0]);
1263 val |= PORT_AFR_CC_N_FTS(pci->n_fts[0]);
1264 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
1265 }
1266
1267 /* Configure Gen2+ N_FTS */
1268 if (pci->n_fts[1]) {
1269 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
1270 val &= ~PORT_LOGIC_N_FTS_MASK;
1271 val |= pci->n_fts[1];
1272 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
1273 }
1274
1275 if (dw_pcie_cap_is(pci, CDM_CHECK)) {
1276 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
1277 val |= PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS |
1278 PCIE_PL_CHK_REG_CHK_REG_START;
1279 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
1280 }
1281
1282 val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL);
1283 val &= ~PORT_LINK_FAST_LINK_MODE;
1284 val |= PORT_LINK_DLL_LINK_EN;
1285 dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val);
1286
1287 dw_pcie_link_set_max_link_width(pci, pci->num_lanes);
1288 }
1289
dw_pcie_parent_bus_offset(struct dw_pcie * pci,const char * reg_name,resource_size_t cpu_phys_addr)1290 resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
1291 const char *reg_name,
1292 resource_size_t cpu_phys_addr)
1293 {
1294 struct device *dev = pci->dev;
1295 struct device_node *np = dev->of_node;
1296 int index;
1297 u64 reg_addr, fixup_addr;
1298 u64 (*fixup)(struct dw_pcie *pcie, u64 cpu_addr);
1299
1300 /* Look up reg_name address on parent bus */
1301 index = of_property_match_string(np, "reg-names", reg_name);
1302
1303 if (index < 0) {
1304 dev_err(dev, "No %s in devicetree \"reg\" property\n", reg_name);
1305 return 0;
1306 }
1307
1308 of_property_read_reg(np, index, ®_addr, NULL);
1309
1310 fixup = pci->ops ? pci->ops->cpu_addr_fixup : NULL;
1311 if (fixup) {
1312 fixup_addr = fixup(pci, cpu_phys_addr);
1313 if (reg_addr == fixup_addr) {
1314 dev_info(dev, "%s reg[%d] %#010llx == %#010llx == fixup(cpu %#010llx); %ps is redundant with this devicetree\n",
1315 reg_name, index, reg_addr, fixup_addr,
1316 (unsigned long long) cpu_phys_addr, fixup);
1317 } else {
1318 dev_warn(dev, "%s reg[%d] %#010llx != %#010llx == fixup(cpu %#010llx); devicetree is broken\n",
1319 reg_name, index, reg_addr, fixup_addr,
1320 (unsigned long long) cpu_phys_addr);
1321 reg_addr = fixup_addr;
1322 }
1323
1324 return cpu_phys_addr - reg_addr;
1325 }
1326
1327 if (pci->use_parent_dt_ranges) {
1328
1329 /*
1330 * This platform once had a fixup, presumably because it
1331 * translates between CPU and PCI controller addresses.
1332 * Log a note if devicetree didn't describe a translation.
1333 */
1334 if (reg_addr == cpu_phys_addr)
1335 dev_info(dev, "%s reg[%d] %#010llx == cpu %#010llx\n; no fixup was ever needed for this devicetree\n",
1336 reg_name, index, reg_addr,
1337 (unsigned long long) cpu_phys_addr);
1338 } else {
1339 if (reg_addr != cpu_phys_addr) {
1340 dev_warn(dev, "%s reg[%d] %#010llx != cpu %#010llx; no fixup and devicetree \"ranges\" is broken, assuming no translation\n",
1341 reg_name, index, reg_addr,
1342 (unsigned long long) cpu_phys_addr);
1343 return 0;
1344 }
1345 }
1346
1347 return cpu_phys_addr - reg_addr;
1348 }
1349