xref: /linux/drivers/pci/controller/dwc/pcie-qcom.c (revision 40286d6379aacfcc053253ef78dc78b09addffda)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Qualcomm PCIe root complex driver
4  *
5  * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
6  * Copyright 2015 Linaro Limited.
7  *
8  * Author: Stanimir Varbanov <svarbanov@mm-sol.com>
9  */
10 
11 #include <linux/clk.h>
12 #include <linux/crc8.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/gpio/consumer.h>
16 #include <linux/interconnect.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/limits.h>
22 #include <linux/init.h>
23 #include <linux/of.h>
24 #include <linux/of_pci.h>
25 #include <linux/pci.h>
26 #include <linux/pci-ecam.h>
27 #include <linux/pci-pwrctrl.h>
28 #include <linux/pm_opp.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/platform_device.h>
31 #include <linux/phy/pcie.h>
32 #include <linux/phy/phy.h>
33 #include <linux/regulator/consumer.h>
34 #include <linux/reset.h>
35 #include <linux/slab.h>
36 #include <linux/types.h>
37 #include <linux/units.h>
38 
39 #include "../../pci.h"
40 #include "../pci-host-common.h"
41 #include "pcie-designware.h"
42 #include "pcie-qcom-common.h"
43 
44 /* PARF registers */
45 #define PARF_SYS_CTRL				0x00
46 #define PARF_PM_CTRL				0x20
47 #define PARF_PCS_DEEMPH				0x34
48 #define PARF_PCS_SWING				0x38
49 #define PARF_PHY_CTRL				0x40
50 #define PARF_PHY_REFCLK				0x4c
51 #define PARF_CONFIG_BITS			0x50
52 #define PARF_DBI_BASE_ADDR			0x168
53 #define PARF_SLV_ADDR_SPACE_SIZE		0x16c
54 #define PARF_MHI_CLOCK_RESET_CTRL		0x174
55 #define PARF_AXI_MSTR_WR_ADDR_HALT		0x178
56 #define PARF_AXI_MSTR_WR_ADDR_HALT_V2		0x1a8
57 #define PARF_Q2A_FLUSH				0x1ac
58 #define PARF_LTSSM				0x1b0
59 #define PARF_SID_OFFSET				0x234
60 #define PARF_BDF_TRANSLATE_CFG			0x24c
61 #define PARF_DBI_BASE_ADDR_V2			0x350
62 #define PARF_DBI_BASE_ADDR_V2_HI		0x354
63 #define PARF_SLV_ADDR_SPACE_SIZE_V2		0x358
64 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI		0x35c
65 #define PARF_NO_SNOOP_OVERRIDE			0x3d4
66 #define PARF_ATU_BASE_ADDR			0x634
67 #define PARF_ATU_BASE_ADDR_HI			0x638
68 #define PARF_DEVICE_TYPE			0x1000
69 #define PARF_BDF_TO_SID_TABLE_N			0x2000
70 #define PARF_BDF_TO_SID_CFG			0x2c00
71 
72 /* ELBI registers */
73 #define ELBI_SYS_CTRL				0x04
74 
75 /* DBI registers */
76 #define AXI_MSTR_RESP_COMP_CTRL0		0x818
77 #define AXI_MSTR_RESP_COMP_CTRL1		0x81c
78 
79 /* MHI registers */
80 #define PARF_DEBUG_CNT_PM_LINKST_IN_L2		0xc04
81 #define PARF_DEBUG_CNT_PM_LINKST_IN_L1		0xc0c
82 #define PARF_DEBUG_CNT_PM_LINKST_IN_L0S		0xc10
83 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1	0xc84
84 #define PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2	0xc88
85 
86 /* PARF_SYS_CTRL register fields */
87 #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN	BIT(29)
88 #define MST_WAKEUP_EN				BIT(13)
89 #define SLV_WAKEUP_EN				BIT(12)
90 #define MSTR_ACLK_CGC_DIS			BIT(10)
91 #define SLV_ACLK_CGC_DIS			BIT(9)
92 #define CORE_CLK_CGC_DIS			BIT(6)
93 #define AUX_PWR_DET				BIT(4)
94 #define L23_CLK_RMV_DIS				BIT(2)
95 #define L1_CLK_RMV_DIS				BIT(1)
96 
97 /* PARF_PM_CTRL register fields */
98 #define REQ_NOT_ENTR_L1				BIT(5)
99 
100 /* PARF_PCS_DEEMPH register fields */
101 #define PCS_DEEMPH_TX_DEEMPH_GEN1(x)		FIELD_PREP(GENMASK(21, 16), x)
102 #define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x)	FIELD_PREP(GENMASK(13, 8), x)
103 #define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x)	FIELD_PREP(GENMASK(5, 0), x)
104 
105 /* PARF_PCS_SWING register fields */
106 #define PCS_SWING_TX_SWING_FULL(x)		FIELD_PREP(GENMASK(14, 8), x)
107 #define PCS_SWING_TX_SWING_LOW(x)		FIELD_PREP(GENMASK(6, 0), x)
108 
109 /* PARF_PHY_CTRL register fields */
110 #define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK	GENMASK(20, 16)
111 #define PHY_CTRL_PHY_TX0_TERM_OFFSET(x)		FIELD_PREP(PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK, x)
112 #define PHY_TEST_PWR_DOWN			BIT(0)
113 
114 /* PARF_PHY_REFCLK register fields */
115 #define PHY_REFCLK_SSP_EN			BIT(16)
116 #define PHY_REFCLK_USE_PAD			BIT(12)
117 
118 /* PARF_CONFIG_BITS register fields */
119 #define PHY_RX0_EQ(x)				FIELD_PREP(GENMASK(26, 24), x)
120 
121 /* PARF_SLV_ADDR_SPACE_SIZE register value */
122 #define SLV_ADDR_SPACE_SZ			0x80000000
123 
124 /* PARF_MHI_CLOCK_RESET_CTRL register fields */
125 #define AHB_CLK_EN				BIT(0)
126 #define MSTR_AXI_CLK_EN				BIT(1)
127 #define BYPASS					BIT(4)
128 
129 /* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
130 #define EN					BIT(31)
131 
132 /* PARF_LTSSM register fields */
133 #define LTSSM_EN				BIT(8)
134 
135 /* PARF_NO_SNOOP_OVERRIDE register fields */
136 #define WR_NO_SNOOP_OVERRIDE_EN			BIT(1)
137 #define RD_NO_SNOOP_OVERRIDE_EN			BIT(3)
138 
139 /* PARF_DEVICE_TYPE register fields */
140 #define DEVICE_TYPE_RC				0x4
141 
142 /* PARF_BDF_TO_SID_CFG fields */
143 #define BDF_TO_SID_BYPASS			BIT(0)
144 
145 /* ELBI_SYS_CTRL register fields */
146 #define ELBI_SYS_CTRL_LT_ENABLE			BIT(0)
147 
148 /* AXI_MSTR_RESP_COMP_CTRL0 register fields */
149 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K	0x4
150 #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K	0x5
151 
152 /* AXI_MSTR_RESP_COMP_CTRL1 register fields */
153 #define CFG_BRIDGE_SB_INIT			BIT(0)
154 
155 /* PCI_EXP_SLTCAP register fields */
156 #define PCIE_CAP_SLOT_POWER_LIMIT_VAL		FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250)
157 #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE		FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1)
158 #define PCIE_CAP_SLOT_VAL			(PCI_EXP_SLTCAP_ABP | \
159 						PCI_EXP_SLTCAP_PCP | \
160 						PCI_EXP_SLTCAP_MRLSP | \
161 						PCI_EXP_SLTCAP_AIP | \
162 						PCI_EXP_SLTCAP_PIP | \
163 						PCI_EXP_SLTCAP_HPS | \
164 						PCI_EXP_SLTCAP_EIP | \
165 						PCIE_CAP_SLOT_POWER_LIMIT_VAL | \
166 						PCIE_CAP_SLOT_POWER_LIMIT_SCALE)
167 
168 #define PERST_DELAY_US				1000
169 
170 #define QCOM_PCIE_CRC8_POLYNOMIAL		(BIT(2) | BIT(1) | BIT(0))
171 
172 #define QCOM_PCIE_LINK_SPEED_TO_BW(speed) \
173 		Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_get_link_speed(speed)))
174 
175 struct qcom_pcie_resources_1_0_0 {
176 	struct clk_bulk_data *clks;
177 	int num_clks;
178 	struct reset_control *core;
179 	struct regulator *vdda;
180 };
181 
182 #define QCOM_PCIE_2_1_0_MAX_RESETS		6
183 #define QCOM_PCIE_2_1_0_MAX_SUPPLY		3
184 struct qcom_pcie_resources_2_1_0 {
185 	struct clk_bulk_data *clks;
186 	int num_clks;
187 	struct reset_control_bulk_data resets[QCOM_PCIE_2_1_0_MAX_RESETS];
188 	int num_resets;
189 	struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
190 };
191 
192 #define QCOM_PCIE_2_3_2_MAX_SUPPLY		2
193 struct qcom_pcie_resources_2_3_2 {
194 	struct clk_bulk_data *clks;
195 	int num_clks;
196 	struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY];
197 };
198 
199 #define QCOM_PCIE_2_3_3_MAX_RESETS		7
200 struct qcom_pcie_resources_2_3_3 {
201 	struct clk_bulk_data *clks;
202 	int num_clks;
203 	struct reset_control_bulk_data rst[QCOM_PCIE_2_3_3_MAX_RESETS];
204 };
205 
206 #define QCOM_PCIE_2_4_0_MAX_RESETS		12
207 struct qcom_pcie_resources_2_4_0 {
208 	struct clk_bulk_data *clks;
209 	int num_clks;
210 	struct reset_control_bulk_data resets[QCOM_PCIE_2_4_0_MAX_RESETS];
211 	int num_resets;
212 };
213 
214 #define QCOM_PCIE_2_7_0_MAX_SUPPLIES		2
215 struct qcom_pcie_resources_2_7_0 {
216 	struct clk_bulk_data *clks;
217 	int num_clks;
218 	struct regulator_bulk_data supplies[QCOM_PCIE_2_7_0_MAX_SUPPLIES];
219 	struct reset_control *rst;
220 };
221 
222 struct qcom_pcie_resources_2_9_0 {
223 	struct clk_bulk_data *clks;
224 	int num_clks;
225 	struct reset_control *rst;
226 };
227 
228 union qcom_pcie_resources {
229 	struct qcom_pcie_resources_1_0_0 v1_0_0;
230 	struct qcom_pcie_resources_2_1_0 v2_1_0;
231 	struct qcom_pcie_resources_2_3_2 v2_3_2;
232 	struct qcom_pcie_resources_2_3_3 v2_3_3;
233 	struct qcom_pcie_resources_2_4_0 v2_4_0;
234 	struct qcom_pcie_resources_2_7_0 v2_7_0;
235 	struct qcom_pcie_resources_2_9_0 v2_9_0;
236 };
237 
238 struct qcom_pcie;
239 
240 struct qcom_pcie_ops {
241 	int (*get_resources)(struct qcom_pcie *pcie);
242 	int (*init)(struct qcom_pcie *pcie);
243 	int (*post_init)(struct qcom_pcie *pcie);
244 	void (*host_post_init)(struct qcom_pcie *pcie);
245 	void (*deinit)(struct qcom_pcie *pcie);
246 	void (*ltssm_enable)(struct qcom_pcie *pcie);
247 	int (*config_sid)(struct qcom_pcie *pcie);
248 };
249 
250  /**
251   * struct qcom_pcie_cfg - Per SoC config struct
252   * @ops: qcom PCIe ops structure
253   * @override_no_snoop: Override NO_SNOOP attribute in TLP to enable cache
254   * snooping
255   * @firmware_managed: Set if the Root Complex is firmware managed
256   */
257 struct qcom_pcie_cfg {
258 	const struct qcom_pcie_ops *ops;
259 	bool override_no_snoop;
260 	bool firmware_managed;
261 	bool no_l0s;
262 };
263 
264 struct qcom_pcie_perst {
265 	struct list_head list;
266 	struct gpio_desc *desc;
267 };
268 
269 struct qcom_pcie_port {
270 	struct list_head list;
271 	struct phy *phy;
272 	struct list_head perst;
273 };
274 
275 struct qcom_pcie {
276 	struct dw_pcie *pci;
277 	void __iomem *parf;			/* DT parf */
278 	void __iomem *mhi;
279 	union qcom_pcie_resources res;
280 	struct icc_path *icc_mem;
281 	struct icc_path *icc_cpu;
282 	const struct qcom_pcie_cfg *cfg;
283 	struct dentry *debugfs;
284 	struct list_head ports;
285 	bool suspended;
286 	bool use_pm_opp;
287 };
288 
289 #define to_qcom_pcie(x)		dev_get_drvdata((x)->dev)
290 
291 static void __qcom_pcie_perst_assert(struct qcom_pcie *pcie, bool assert)
292 {
293 	struct qcom_pcie_perst *perst;
294 	struct qcom_pcie_port *port;
295 	int val = assert ? 1 : 0;
296 
297 	list_for_each_entry(port, &pcie->ports, list) {
298 		list_for_each_entry(perst, &port->perst, list)
299 			gpiod_set_value_cansleep(perst->desc, val);
300 	}
301 
302 	usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500);
303 }
304 
305 static void qcom_pcie_perst_assert(struct qcom_pcie *pcie)
306 {
307 	__qcom_pcie_perst_assert(pcie, true);
308 }
309 
310 static void qcom_pcie_perst_deassert(struct qcom_pcie *pcie)
311 {
312 	/* Ensure that PERST# has been asserted for at least 100 ms */
313 	msleep(PCIE_T_PVPERL_MS);
314 	__qcom_pcie_perst_assert(pcie, false);
315 }
316 
317 static int qcom_pcie_start_link(struct dw_pcie *pci)
318 {
319 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
320 
321 	qcom_pcie_common_set_equalization(pci);
322 
323 	if (pcie_get_link_speed(pci->max_link_speed) == PCIE_SPEED_16_0GT)
324 		qcom_pcie_common_set_16gt_lane_margining(pci);
325 
326 	/* Enable Link Training state machine */
327 	if (pcie->cfg->ops->ltssm_enable)
328 		pcie->cfg->ops->ltssm_enable(pcie);
329 
330 	return 0;
331 }
332 
333 static void qcom_pcie_clear_aspm_l0s(struct dw_pcie *pci)
334 {
335 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
336 	u16 offset;
337 	u32 val;
338 
339 	if (!pcie->cfg->no_l0s)
340 		return;
341 
342 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
343 
344 	dw_pcie_dbi_ro_wr_en(pci);
345 
346 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
347 	val &= ~PCI_EXP_LNKCAP_ASPM_L0S;
348 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
349 
350 	dw_pcie_dbi_ro_wr_dis(pci);
351 }
352 
353 static void qcom_pcie_set_slot_nccs(struct dw_pcie *pci)
354 {
355 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
356 	u32 val;
357 
358 	dw_pcie_dbi_ro_wr_en(pci);
359 
360 	/*
361 	 * Qcom PCIe Root Ports do not support generating command completion
362 	 * notifications for the Hot-Plug commands. So set the NCCS field to
363 	 * avoid waiting for the completions.
364 	 */
365 	val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP);
366 	val |= PCI_EXP_SLTCAP_NCCS;
367 	writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP);
368 
369 	dw_pcie_dbi_ro_wr_dis(pci);
370 }
371 
372 static void qcom_pcie_configure_dbi_base(struct qcom_pcie *pcie)
373 {
374 	struct dw_pcie *pci = pcie->pci;
375 
376 	if (pci->dbi_phys_addr) {
377 		/*
378 		 * PARF_DBI_BASE_ADDR register is in CPU domain and require to
379 		 * be programmed with CPU physical address.
380 		 */
381 		writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
382 							PARF_DBI_BASE_ADDR);
383 		writel(SLV_ADDR_SPACE_SZ, pcie->parf +
384 						PARF_SLV_ADDR_SPACE_SIZE);
385 	}
386 }
387 
388 static void qcom_pcie_configure_dbi_atu_base(struct qcom_pcie *pcie)
389 {
390 	struct dw_pcie *pci = pcie->pci;
391 
392 	if (pci->dbi_phys_addr) {
393 		/*
394 		 * PARF_DBI_BASE_ADDR_V2 and PARF_ATU_BASE_ADDR registers are
395 		 * in CPU domain and require to be programmed with CPU
396 		 * physical addresses.
397 		 */
398 		writel(lower_32_bits(pci->dbi_phys_addr), pcie->parf +
399 							PARF_DBI_BASE_ADDR_V2);
400 		writel(upper_32_bits(pci->dbi_phys_addr), pcie->parf +
401 						PARF_DBI_BASE_ADDR_V2_HI);
402 
403 		if (pci->atu_phys_addr) {
404 			writel(lower_32_bits(pci->atu_phys_addr), pcie->parf +
405 							PARF_ATU_BASE_ADDR);
406 			writel(upper_32_bits(pci->atu_phys_addr), pcie->parf +
407 							PARF_ATU_BASE_ADDR_HI);
408 		}
409 
410 		writel(0x0, pcie->parf + PARF_SLV_ADDR_SPACE_SIZE_V2);
411 		writel(SLV_ADDR_SPACE_SZ, pcie->parf +
412 					PARF_SLV_ADDR_SPACE_SIZE_V2_HI);
413 	}
414 }
415 
416 static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie)
417 {
418 	struct dw_pcie *pci = pcie->pci;
419 	u32 val;
420 
421 	if (!pci->elbi_base) {
422 		dev_err(pci->dev, "ELBI is not present\n");
423 		return;
424 	}
425 	/* enable link training */
426 	val = readl(pci->elbi_base + ELBI_SYS_CTRL);
427 	val |= ELBI_SYS_CTRL_LT_ENABLE;
428 	writel(val, pci->elbi_base + ELBI_SYS_CTRL);
429 }
430 
431 static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
432 {
433 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
434 	struct dw_pcie *pci = pcie->pci;
435 	struct device *dev = pci->dev;
436 	bool is_apq = of_device_is_compatible(dev->of_node, "qcom,pcie-apq8064");
437 	int ret;
438 
439 	res->supplies[0].supply = "vdda";
440 	res->supplies[1].supply = "vdda_phy";
441 	res->supplies[2].supply = "vdda_refclk";
442 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
443 				      res->supplies);
444 	if (ret)
445 		return ret;
446 
447 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
448 	if (res->num_clks < 0) {
449 		dev_err(dev, "Failed to get clocks\n");
450 		return res->num_clks;
451 	}
452 
453 	res->resets[0].id = "pci";
454 	res->resets[1].id = "axi";
455 	res->resets[2].id = "ahb";
456 	res->resets[3].id = "por";
457 	res->resets[4].id = "phy";
458 	res->resets[5].id = "ext";
459 
460 	/* ext is optional on APQ8016 */
461 	res->num_resets = is_apq ? 5 : 6;
462 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
463 	if (ret < 0)
464 		return ret;
465 
466 	return 0;
467 }
468 
469 static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
470 {
471 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
472 
473 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
474 	reset_control_bulk_assert(res->num_resets, res->resets);
475 
476 	writel(1, pcie->parf + PARF_PHY_CTRL);
477 
478 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
479 }
480 
481 static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
482 {
483 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
484 	struct dw_pcie *pci = pcie->pci;
485 	struct device *dev = pci->dev;
486 	int ret;
487 
488 	/* reset the PCIe interface as uboot can leave it undefined state */
489 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
490 	if (ret < 0) {
491 		dev_err(dev, "cannot assert resets\n");
492 		return ret;
493 	}
494 
495 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
496 	if (ret < 0) {
497 		dev_err(dev, "cannot enable regulators\n");
498 		return ret;
499 	}
500 
501 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
502 	if (ret < 0) {
503 		dev_err(dev, "cannot deassert resets\n");
504 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
505 		return ret;
506 	}
507 
508 	return 0;
509 }
510 
511 static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
512 {
513 	struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
514 	struct dw_pcie *pci = pcie->pci;
515 	struct device *dev = pci->dev;
516 	struct device_node *node = dev->of_node;
517 	u32 val;
518 	int ret;
519 
520 	/* enable PCIe clocks and resets */
521 	val = readl(pcie->parf + PARF_PHY_CTRL);
522 	val &= ~PHY_TEST_PWR_DOWN;
523 	writel(val, pcie->parf + PARF_PHY_CTRL);
524 
525 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
526 	if (ret)
527 		return ret;
528 
529 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064") ||
530 	    of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) {
531 		writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
532 			       PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
533 			       PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
534 		       pcie->parf + PARF_PCS_DEEMPH);
535 		writel(PCS_SWING_TX_SWING_FULL(120) |
536 			       PCS_SWING_TX_SWING_LOW(120),
537 		       pcie->parf + PARF_PCS_SWING);
538 		writel(PHY_RX0_EQ(4), pcie->parf + PARF_CONFIG_BITS);
539 	}
540 
541 	if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
542 		/* set TX termination offset */
543 		val = readl(pcie->parf + PARF_PHY_CTRL);
544 		val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
545 		val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
546 		writel(val, pcie->parf + PARF_PHY_CTRL);
547 	}
548 
549 	/* enable external reference clock */
550 	val = readl(pcie->parf + PARF_PHY_REFCLK);
551 	/* USE_PAD is required only for ipq806x */
552 	if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
553 		val &= ~PHY_REFCLK_USE_PAD;
554 	val |= PHY_REFCLK_SSP_EN;
555 	writel(val, pcie->parf + PARF_PHY_REFCLK);
556 
557 	/* wait for clock acquisition */
558 	usleep_range(1000, 1500);
559 
560 	/* Set the Max TLP size to 2K, instead of using default of 4K */
561 	writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K,
562 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL0);
563 	writel(CFG_BRIDGE_SB_INIT,
564 	       pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1);
565 
566 	qcom_pcie_set_slot_nccs(pcie->pci);
567 
568 	return 0;
569 }
570 
571 static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
572 {
573 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
574 	struct dw_pcie *pci = pcie->pci;
575 	struct device *dev = pci->dev;
576 
577 	res->vdda = devm_regulator_get(dev, "vdda");
578 	if (IS_ERR(res->vdda))
579 		return PTR_ERR(res->vdda);
580 
581 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
582 	if (res->num_clks < 0) {
583 		dev_err(dev, "Failed to get clocks\n");
584 		return res->num_clks;
585 	}
586 
587 	res->core = devm_reset_control_get_exclusive(dev, "core");
588 	return PTR_ERR_OR_ZERO(res->core);
589 }
590 
591 static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
592 {
593 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
594 
595 	reset_control_assert(res->core);
596 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
597 	regulator_disable(res->vdda);
598 }
599 
600 static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
601 {
602 	struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0;
603 	struct dw_pcie *pci = pcie->pci;
604 	struct device *dev = pci->dev;
605 	int ret;
606 
607 	ret = reset_control_deassert(res->core);
608 	if (ret) {
609 		dev_err(dev, "cannot deassert core reset\n");
610 		return ret;
611 	}
612 
613 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
614 	if (ret) {
615 		dev_err(dev, "cannot prepare/enable clocks\n");
616 		goto err_assert_reset;
617 	}
618 
619 	ret = regulator_enable(res->vdda);
620 	if (ret) {
621 		dev_err(dev, "cannot enable vdda regulator\n");
622 		goto err_disable_clks;
623 	}
624 
625 	return 0;
626 
627 err_disable_clks:
628 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
629 err_assert_reset:
630 	reset_control_assert(res->core);
631 
632 	return ret;
633 }
634 
635 static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie)
636 {
637 	qcom_pcie_configure_dbi_base(pcie);
638 
639 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
640 		u32 val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
641 
642 		val |= EN;
643 		writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
644 	}
645 
646 	qcom_pcie_set_slot_nccs(pcie->pci);
647 
648 	return 0;
649 }
650 
651 static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie)
652 {
653 	u32 val;
654 
655 	/* enable link training */
656 	val = readl(pcie->parf + PARF_LTSSM);
657 	val |= LTSSM_EN;
658 	writel(val, pcie->parf + PARF_LTSSM);
659 }
660 
661 static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
662 {
663 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
664 	struct dw_pcie *pci = pcie->pci;
665 	struct device *dev = pci->dev;
666 	int ret;
667 
668 	res->supplies[0].supply = "vdda";
669 	res->supplies[1].supply = "vddpe-3v3";
670 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
671 				      res->supplies);
672 	if (ret)
673 		return ret;
674 
675 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
676 	if (res->num_clks < 0) {
677 		dev_err(dev, "Failed to get clocks\n");
678 		return res->num_clks;
679 	}
680 
681 	return 0;
682 }
683 
684 static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
685 {
686 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
687 
688 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
689 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
690 }
691 
692 static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
693 {
694 	struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2;
695 	struct dw_pcie *pci = pcie->pci;
696 	struct device *dev = pci->dev;
697 	int ret;
698 
699 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
700 	if (ret < 0) {
701 		dev_err(dev, "cannot enable regulators\n");
702 		return ret;
703 	}
704 
705 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
706 	if (ret) {
707 		dev_err(dev, "cannot prepare/enable clocks\n");
708 		regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
709 		return ret;
710 	}
711 
712 	return 0;
713 }
714 
715 static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie)
716 {
717 	u32 val;
718 
719 	/* enable PCIe clocks and resets */
720 	val = readl(pcie->parf + PARF_PHY_CTRL);
721 	val &= ~PHY_TEST_PWR_DOWN;
722 	writel(val, pcie->parf + PARF_PHY_CTRL);
723 
724 	qcom_pcie_configure_dbi_base(pcie);
725 
726 	/* MAC PHY_POWERDOWN MUX DISABLE  */
727 	val = readl(pcie->parf + PARF_SYS_CTRL);
728 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
729 	writel(val, pcie->parf + PARF_SYS_CTRL);
730 
731 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
732 	val |= BYPASS;
733 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
734 
735 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
736 	val |= EN;
737 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
738 
739 	qcom_pcie_set_slot_nccs(pcie->pci);
740 
741 	return 0;
742 }
743 
744 static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
745 {
746 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
747 	struct dw_pcie *pci = pcie->pci;
748 	struct device *dev = pci->dev;
749 	bool is_ipq = of_device_is_compatible(dev->of_node, "qcom,pcie-ipq4019");
750 	int ret;
751 
752 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
753 	if (res->num_clks < 0) {
754 		dev_err(dev, "Failed to get clocks\n");
755 		return res->num_clks;
756 	}
757 
758 	res->resets[0].id = "axi_m";
759 	res->resets[1].id = "axi_s";
760 	res->resets[2].id = "axi_m_sticky";
761 	res->resets[3].id = "pipe_sticky";
762 	res->resets[4].id = "pwr";
763 	res->resets[5].id = "ahb";
764 	res->resets[6].id = "pipe";
765 	res->resets[7].id = "axi_m_vmid";
766 	res->resets[8].id = "axi_s_xpu";
767 	res->resets[9].id = "parf";
768 	res->resets[10].id = "phy";
769 	res->resets[11].id = "phy_ahb";
770 
771 	res->num_resets = is_ipq ? 12 : 6;
772 
773 	ret = devm_reset_control_bulk_get_exclusive(dev, res->num_resets, res->resets);
774 	if (ret < 0)
775 		return ret;
776 
777 	return 0;
778 }
779 
780 static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie)
781 {
782 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
783 
784 	reset_control_bulk_assert(res->num_resets, res->resets);
785 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
786 }
787 
788 static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie)
789 {
790 	struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0;
791 	struct dw_pcie *pci = pcie->pci;
792 	struct device *dev = pci->dev;
793 	int ret;
794 
795 	ret = reset_control_bulk_assert(res->num_resets, res->resets);
796 	if (ret < 0) {
797 		dev_err(dev, "cannot assert resets\n");
798 		return ret;
799 	}
800 
801 	usleep_range(10000, 12000);
802 
803 	ret = reset_control_bulk_deassert(res->num_resets, res->resets);
804 	if (ret < 0) {
805 		dev_err(dev, "cannot deassert resets\n");
806 		return ret;
807 	}
808 
809 	usleep_range(10000, 12000);
810 
811 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
812 	if (ret) {
813 		reset_control_bulk_assert(res->num_resets, res->resets);
814 		return ret;
815 	}
816 
817 	return 0;
818 }
819 
820 static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
821 {
822 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
823 	struct dw_pcie *pci = pcie->pci;
824 	struct device *dev = pci->dev;
825 	int ret;
826 
827 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
828 	if (res->num_clks < 0) {
829 		dev_err(dev, "Failed to get clocks\n");
830 		return res->num_clks;
831 	}
832 
833 	res->rst[0].id = "axi_m";
834 	res->rst[1].id = "axi_s";
835 	res->rst[2].id = "pipe";
836 	res->rst[3].id = "axi_m_sticky";
837 	res->rst[4].id = "sticky";
838 	res->rst[5].id = "ahb";
839 	res->rst[6].id = "sleep";
840 
841 	ret = devm_reset_control_bulk_get_exclusive(dev, ARRAY_SIZE(res->rst), res->rst);
842 	if (ret < 0)
843 		return ret;
844 
845 	return 0;
846 }
847 
848 static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
849 {
850 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
851 
852 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
853 }
854 
855 static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
856 {
857 	struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3;
858 	struct dw_pcie *pci = pcie->pci;
859 	struct device *dev = pci->dev;
860 	int ret;
861 
862 	ret = reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
863 	if (ret < 0) {
864 		dev_err(dev, "cannot assert resets\n");
865 		return ret;
866 	}
867 
868 	usleep_range(2000, 2500);
869 
870 	ret = reset_control_bulk_deassert(ARRAY_SIZE(res->rst), res->rst);
871 	if (ret < 0) {
872 		dev_err(dev, "cannot deassert resets\n");
873 		return ret;
874 	}
875 
876 	/*
877 	 * Don't have a way to see if the reset has completed.
878 	 * Wait for some time.
879 	 */
880 	usleep_range(2000, 2500);
881 
882 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
883 	if (ret) {
884 		dev_err(dev, "cannot prepare/enable clocks\n");
885 		goto err_assert_resets;
886 	}
887 
888 	return 0;
889 
890 err_assert_resets:
891 	/*
892 	 * Not checking for failure, will anyway return
893 	 * the original failure in 'ret'.
894 	 */
895 	reset_control_bulk_assert(ARRAY_SIZE(res->rst), res->rst);
896 
897 	return ret;
898 }
899 
900 static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie)
901 {
902 	struct dw_pcie *pci = pcie->pci;
903 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
904 	u32 val;
905 
906 	val = readl(pcie->parf + PARF_PHY_CTRL);
907 	val &= ~PHY_TEST_PWR_DOWN;
908 	writel(val, pcie->parf + PARF_PHY_CTRL);
909 
910 	qcom_pcie_configure_dbi_atu_base(pcie);
911 
912 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS
913 		| SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
914 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
915 		pcie->parf + PARF_SYS_CTRL);
916 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
917 
918 	writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND);
919 
920 	dw_pcie_dbi_ro_wr_en(pci);
921 
922 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
923 
924 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
925 	val &= ~PCI_EXP_LNKCAP_ASPMS;
926 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
927 
928 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
929 		PCI_EXP_DEVCTL2);
930 
931 	dw_pcie_dbi_ro_wr_dis(pci);
932 
933 	return 0;
934 }
935 
936 static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
937 {
938 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
939 	struct dw_pcie *pci = pcie->pci;
940 	struct device *dev = pci->dev;
941 	int ret;
942 
943 	res->rst = devm_reset_control_array_get_exclusive(dev);
944 	if (IS_ERR(res->rst))
945 		return PTR_ERR(res->rst);
946 
947 	res->supplies[0].supply = "vdda";
948 	res->supplies[1].supply = "vddpe-3v3";
949 	ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies),
950 				      res->supplies);
951 	if (ret)
952 		return ret;
953 
954 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
955 	if (res->num_clks < 0) {
956 		dev_err(dev, "Failed to get clocks\n");
957 		return res->num_clks;
958 	}
959 
960 	return 0;
961 }
962 
963 static int qcom_pcie_init_2_7_0(struct qcom_pcie *pcie)
964 {
965 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
966 	struct dw_pcie *pci = pcie->pci;
967 	struct device *dev = pci->dev;
968 	u32 val;
969 	int ret;
970 
971 	ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies);
972 	if (ret < 0) {
973 		dev_err(dev, "cannot enable regulators\n");
974 		return ret;
975 	}
976 
977 	ret = clk_bulk_prepare_enable(res->num_clks, res->clks);
978 	if (ret < 0)
979 		goto err_disable_regulators;
980 
981 	ret = reset_control_assert(res->rst);
982 	if (ret) {
983 		dev_err(dev, "reset assert failed (%d)\n", ret);
984 		goto err_disable_clocks;
985 	}
986 
987 	usleep_range(1000, 1500);
988 
989 	ret = reset_control_deassert(res->rst);
990 	if (ret) {
991 		dev_err(dev, "reset deassert failed (%d)\n", ret);
992 		goto err_disable_clocks;
993 	}
994 
995 	/* Wait for reset to complete, required on SM8450 */
996 	usleep_range(1000, 1500);
997 
998 	/* configure PCIe to RC mode */
999 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
1000 
1001 	/* enable PCIe clocks and resets */
1002 	val = readl(pcie->parf + PARF_PHY_CTRL);
1003 	val &= ~PHY_TEST_PWR_DOWN;
1004 	writel(val, pcie->parf + PARF_PHY_CTRL);
1005 
1006 	qcom_pcie_configure_dbi_atu_base(pcie);
1007 
1008 	/* MAC PHY_POWERDOWN MUX DISABLE  */
1009 	val = readl(pcie->parf + PARF_SYS_CTRL);
1010 	val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN;
1011 	writel(val, pcie->parf + PARF_SYS_CTRL);
1012 
1013 	val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1014 	val |= BYPASS;
1015 	writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1016 
1017 	/* Enable L1 and L1SS */
1018 	val = readl(pcie->parf + PARF_PM_CTRL);
1019 	val &= ~REQ_NOT_ENTR_L1;
1020 	writel(val, pcie->parf + PARF_PM_CTRL);
1021 
1022 	pci->l1ss_support = true;
1023 
1024 	val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1025 	val |= EN;
1026 	writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2);
1027 
1028 	return 0;
1029 err_disable_clocks:
1030 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1031 err_disable_regulators:
1032 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1033 
1034 	return ret;
1035 }
1036 
1037 static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie)
1038 {
1039 	const struct qcom_pcie_cfg *pcie_cfg = pcie->cfg;
1040 
1041 	if (pcie_cfg->override_no_snoop)
1042 		writel(WR_NO_SNOOP_OVERRIDE_EN | RD_NO_SNOOP_OVERRIDE_EN,
1043 				pcie->parf + PARF_NO_SNOOP_OVERRIDE);
1044 
1045 	qcom_pcie_set_slot_nccs(pcie->pci);
1046 
1047 	return 0;
1048 }
1049 
1050 static int qcom_pcie_enable_aspm(struct pci_dev *pdev, void *userdata)
1051 {
1052 	/*
1053 	 * Downstream devices need to be in D0 state before enabling PCI PM
1054 	 * substates.
1055 	 */
1056 	pci_set_power_state_locked(pdev, PCI_D0);
1057 	pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
1058 
1059 	return 0;
1060 }
1061 
1062 static void qcom_pcie_host_post_init_2_7_0(struct qcom_pcie *pcie)
1063 {
1064 	struct dw_pcie_rp *pp = &pcie->pci->pp;
1065 
1066 	pci_walk_bus(pp->bridge->bus, qcom_pcie_enable_aspm, NULL);
1067 }
1068 
1069 static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie)
1070 {
1071 	struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0;
1072 
1073 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1074 
1075 	regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
1076 }
1077 
1078 static int qcom_pcie_config_sid_1_9_0(struct qcom_pcie *pcie)
1079 {
1080 	/* iommu map structure */
1081 	struct {
1082 		u32 bdf;
1083 		u32 phandle;
1084 		u32 smmu_sid;
1085 		u32 smmu_sid_len;
1086 	} *map;
1087 	void __iomem *bdf_to_sid_base = pcie->parf + PARF_BDF_TO_SID_TABLE_N;
1088 	struct device *dev = pcie->pci->dev;
1089 	u8 qcom_pcie_crc8_table[CRC8_TABLE_SIZE];
1090 	int i, nr_map, size = 0;
1091 	u32 smmu_sid_base;
1092 	u32 val;
1093 
1094 	of_get_property(dev->of_node, "iommu-map", &size);
1095 	if (!size)
1096 		return 0;
1097 
1098 	/* Enable BDF to SID translation by disabling bypass mode (default) */
1099 	val = readl(pcie->parf + PARF_BDF_TO_SID_CFG);
1100 	val &= ~BDF_TO_SID_BYPASS;
1101 	writel(val, pcie->parf + PARF_BDF_TO_SID_CFG);
1102 
1103 	map = kzalloc(size, GFP_KERNEL);
1104 	if (!map)
1105 		return -ENOMEM;
1106 
1107 	of_property_read_u32_array(dev->of_node, "iommu-map", (u32 *)map,
1108 				   size / sizeof(u32));
1109 
1110 	nr_map = size / (sizeof(*map));
1111 
1112 	crc8_populate_msb(qcom_pcie_crc8_table, QCOM_PCIE_CRC8_POLYNOMIAL);
1113 
1114 	/* Registers need to be zero out first */
1115 	memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
1116 
1117 	/* Extract the SMMU SID base from the first entry of iommu-map */
1118 	smmu_sid_base = map[0].smmu_sid;
1119 
1120 	/* Look for an available entry to hold the mapping */
1121 	for (i = 0; i < nr_map; i++) {
1122 		__be16 bdf_be = cpu_to_be16(map[i].bdf);
1123 		u32 val;
1124 		u8 hash;
1125 
1126 		hash = crc8(qcom_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be), 0);
1127 
1128 		val = readl(bdf_to_sid_base + hash * sizeof(u32));
1129 
1130 		/* If the register is already populated, look for next available entry */
1131 		while (val) {
1132 			u8 current_hash = hash++;
1133 			u8 next_mask = 0xff;
1134 
1135 			/* If NEXT field is NULL then update it with next hash */
1136 			if (!(val & next_mask)) {
1137 				val |= (u32)hash;
1138 				writel(val, bdf_to_sid_base + current_hash * sizeof(u32));
1139 			}
1140 
1141 			val = readl(bdf_to_sid_base + hash * sizeof(u32));
1142 		}
1143 
1144 		/* BDF [31:16] | SID [15:8] | NEXT [7:0] */
1145 		val = map[i].bdf << 16 | (map[i].smmu_sid - smmu_sid_base) << 8 | 0;
1146 		writel(val, bdf_to_sid_base + hash * sizeof(u32));
1147 	}
1148 
1149 	kfree(map);
1150 
1151 	return 0;
1152 }
1153 
1154 static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
1155 {
1156 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1157 	struct dw_pcie *pci = pcie->pci;
1158 	struct device *dev = pci->dev;
1159 
1160 	res->num_clks = devm_clk_bulk_get_all(dev, &res->clks);
1161 	if (res->num_clks < 0) {
1162 		dev_err(dev, "Failed to get clocks\n");
1163 		return res->num_clks;
1164 	}
1165 
1166 	res->rst = devm_reset_control_array_get_exclusive(dev);
1167 	if (IS_ERR(res->rst))
1168 		return PTR_ERR(res->rst);
1169 
1170 	return 0;
1171 }
1172 
1173 static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
1174 {
1175 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1176 
1177 	clk_bulk_disable_unprepare(res->num_clks, res->clks);
1178 }
1179 
1180 static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
1181 {
1182 	struct qcom_pcie_resources_2_9_0 *res = &pcie->res.v2_9_0;
1183 	struct device *dev = pcie->pci->dev;
1184 	int ret;
1185 
1186 	ret = reset_control_assert(res->rst);
1187 	if (ret) {
1188 		dev_err(dev, "reset assert failed (%d)\n", ret);
1189 		return ret;
1190 	}
1191 
1192 	/*
1193 	 * Delay periods before and after reset deassert are working values
1194 	 * from downstream Codeaurora kernel
1195 	 */
1196 	usleep_range(2000, 2500);
1197 
1198 	ret = reset_control_deassert(res->rst);
1199 	if (ret) {
1200 		dev_err(dev, "reset deassert failed (%d)\n", ret);
1201 		return ret;
1202 	}
1203 
1204 	usleep_range(2000, 2500);
1205 
1206 	return clk_bulk_prepare_enable(res->num_clks, res->clks);
1207 }
1208 
1209 static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie)
1210 {
1211 	struct dw_pcie *pci = pcie->pci;
1212 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1213 	u32 val;
1214 	int i;
1215 
1216 	val = readl(pcie->parf + PARF_PHY_CTRL);
1217 	val &= ~PHY_TEST_PWR_DOWN;
1218 	writel(val, pcie->parf + PARF_PHY_CTRL);
1219 
1220 	qcom_pcie_configure_dbi_atu_base(pcie);
1221 
1222 	writel(DEVICE_TYPE_RC, pcie->parf + PARF_DEVICE_TYPE);
1223 	writel(BYPASS | MSTR_AXI_CLK_EN | AHB_CLK_EN,
1224 		pcie->parf + PARF_MHI_CLOCK_RESET_CTRL);
1225 	writel(GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS |
1226 		GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL,
1227 		pci->dbi_base + GEN3_RELATED_OFF);
1228 
1229 	writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS |
1230 		SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS |
1231 		AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS,
1232 		pcie->parf + PARF_SYS_CTRL);
1233 
1234 	writel(0, pcie->parf + PARF_Q2A_FLUSH);
1235 
1236 	dw_pcie_dbi_ro_wr_en(pci);
1237 
1238 	writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP);
1239 
1240 	val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP);
1241 	val &= ~PCI_EXP_LNKCAP_ASPMS;
1242 	writel(val, pci->dbi_base + offset + PCI_EXP_LNKCAP);
1243 
1244 	writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset +
1245 			PCI_EXP_DEVCTL2);
1246 
1247 	dw_pcie_dbi_ro_wr_dis(pci);
1248 
1249 	for (i = 0; i < 256; i++)
1250 		writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i));
1251 
1252 	return 0;
1253 }
1254 
1255 static bool qcom_pcie_link_up(struct dw_pcie *pci)
1256 {
1257 	u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1258 	u16 val = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1259 
1260 	return val & PCI_EXP_LNKSTA_DLLLA;
1261 }
1262 
1263 static void qcom_pcie_phy_power_off(struct qcom_pcie *pcie)
1264 {
1265 	struct qcom_pcie_port *port;
1266 
1267 	list_for_each_entry(port, &pcie->ports, list)
1268 		phy_power_off(port->phy);
1269 }
1270 
1271 static int qcom_pcie_phy_power_on(struct qcom_pcie *pcie)
1272 {
1273 	struct qcom_pcie_port *port;
1274 	int ret;
1275 
1276 	list_for_each_entry(port, &pcie->ports, list) {
1277 		ret = phy_set_mode_ext(port->phy, PHY_MODE_PCIE, PHY_MODE_PCIE_RC);
1278 		if (ret)
1279 			return ret;
1280 
1281 		ret = phy_power_on(port->phy);
1282 		if (ret) {
1283 			qcom_pcie_phy_power_off(pcie);
1284 			return ret;
1285 		}
1286 	}
1287 
1288 	return 0;
1289 }
1290 
1291 static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1292 {
1293 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1294 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1295 	int ret;
1296 
1297 	qcom_pcie_perst_assert(pcie);
1298 
1299 	ret = pcie->cfg->ops->init(pcie);
1300 	if (ret)
1301 		return ret;
1302 
1303 	ret = qcom_pcie_phy_power_on(pcie);
1304 	if (ret)
1305 		goto err_deinit;
1306 
1307 	ret = pci_pwrctrl_create_devices(pci->dev);
1308 	if (ret)
1309 		goto err_disable_phy;
1310 
1311 	ret = pci_pwrctrl_power_on_devices(pci->dev);
1312 	if (ret)
1313 		goto err_pwrctrl_destroy;
1314 
1315 	if (pcie->cfg->ops->post_init) {
1316 		ret = pcie->cfg->ops->post_init(pcie);
1317 		if (ret)
1318 			goto err_pwrctrl_power_off;
1319 	}
1320 
1321 	qcom_pcie_clear_aspm_l0s(pcie->pci);
1322 	dw_pcie_remove_capability(pcie->pci, PCI_CAP_ID_MSIX);
1323 	dw_pcie_remove_ext_capability(pcie->pci, PCI_EXT_CAP_ID_DPC);
1324 
1325 	qcom_pcie_perst_deassert(pcie);
1326 
1327 	if (pcie->cfg->ops->config_sid) {
1328 		ret = pcie->cfg->ops->config_sid(pcie);
1329 		if (ret)
1330 			goto err_assert_reset;
1331 	}
1332 
1333 	return 0;
1334 
1335 err_assert_reset:
1336 	qcom_pcie_perst_assert(pcie);
1337 err_pwrctrl_power_off:
1338 	pci_pwrctrl_power_off_devices(pci->dev);
1339 err_pwrctrl_destroy:
1340 	if (ret != -EPROBE_DEFER)
1341 		pci_pwrctrl_destroy_devices(pci->dev);
1342 err_disable_phy:
1343 	qcom_pcie_phy_power_off(pcie);
1344 err_deinit:
1345 	pcie->cfg->ops->deinit(pcie);
1346 
1347 	return ret;
1348 }
1349 
1350 static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
1351 {
1352 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1353 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1354 
1355 	qcom_pcie_perst_assert(pcie);
1356 
1357 	/*
1358 	 * No need to destroy pwrctrl devices as this function only gets called
1359 	 * during system suspend as of now.
1360 	 */
1361 	pci_pwrctrl_power_off_devices(pci->dev);
1362 	qcom_pcie_phy_power_off(pcie);
1363 	pcie->cfg->ops->deinit(pcie);
1364 }
1365 
1366 static void qcom_pcie_host_post_init(struct dw_pcie_rp *pp)
1367 {
1368 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
1369 	struct qcom_pcie *pcie = to_qcom_pcie(pci);
1370 
1371 	if (pcie->cfg->ops->host_post_init)
1372 		pcie->cfg->ops->host_post_init(pcie);
1373 }
1374 
1375 static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
1376 	.init		= qcom_pcie_host_init,
1377 	.deinit		= qcom_pcie_host_deinit,
1378 	.post_init	= qcom_pcie_host_post_init,
1379 };
1380 
1381 /* Qcom IP rev.: 2.1.0	Synopsys IP rev.: 4.01a */
1382 static const struct qcom_pcie_ops ops_2_1_0 = {
1383 	.get_resources = qcom_pcie_get_resources_2_1_0,
1384 	.init = qcom_pcie_init_2_1_0,
1385 	.post_init = qcom_pcie_post_init_2_1_0,
1386 	.deinit = qcom_pcie_deinit_2_1_0,
1387 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1388 };
1389 
1390 /* Qcom IP rev.: 1.0.0	Synopsys IP rev.: 4.11a */
1391 static const struct qcom_pcie_ops ops_1_0_0 = {
1392 	.get_resources = qcom_pcie_get_resources_1_0_0,
1393 	.init = qcom_pcie_init_1_0_0,
1394 	.post_init = qcom_pcie_post_init_1_0_0,
1395 	.deinit = qcom_pcie_deinit_1_0_0,
1396 	.ltssm_enable = qcom_pcie_2_1_0_ltssm_enable,
1397 };
1398 
1399 /* Qcom IP rev.: 2.3.2	Synopsys IP rev.: 4.21a */
1400 static const struct qcom_pcie_ops ops_2_3_2 = {
1401 	.get_resources = qcom_pcie_get_resources_2_3_2,
1402 	.init = qcom_pcie_init_2_3_2,
1403 	.post_init = qcom_pcie_post_init_2_3_2,
1404 	.deinit = qcom_pcie_deinit_2_3_2,
1405 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1406 };
1407 
1408 /* Qcom IP rev.: 2.4.0	Synopsys IP rev.: 4.20a */
1409 static const struct qcom_pcie_ops ops_2_4_0 = {
1410 	.get_resources = qcom_pcie_get_resources_2_4_0,
1411 	.init = qcom_pcie_init_2_4_0,
1412 	.post_init = qcom_pcie_post_init_2_3_2,
1413 	.deinit = qcom_pcie_deinit_2_4_0,
1414 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1415 };
1416 
1417 /* Qcom IP rev.: 2.3.3	Synopsys IP rev.: 4.30a */
1418 static const struct qcom_pcie_ops ops_2_3_3 = {
1419 	.get_resources = qcom_pcie_get_resources_2_3_3,
1420 	.init = qcom_pcie_init_2_3_3,
1421 	.post_init = qcom_pcie_post_init_2_3_3,
1422 	.deinit = qcom_pcie_deinit_2_3_3,
1423 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1424 };
1425 
1426 /* Qcom IP rev.: 2.7.0	Synopsys IP rev.: 4.30a */
1427 static const struct qcom_pcie_ops ops_2_7_0 = {
1428 	.get_resources = qcom_pcie_get_resources_2_7_0,
1429 	.init = qcom_pcie_init_2_7_0,
1430 	.post_init = qcom_pcie_post_init_2_7_0,
1431 	.deinit = qcom_pcie_deinit_2_7_0,
1432 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1433 };
1434 
1435 /* Qcom IP rev.: 1.9.0 */
1436 static const struct qcom_pcie_ops ops_1_9_0 = {
1437 	.get_resources = qcom_pcie_get_resources_2_7_0,
1438 	.init = qcom_pcie_init_2_7_0,
1439 	.post_init = qcom_pcie_post_init_2_7_0,
1440 	.host_post_init = qcom_pcie_host_post_init_2_7_0,
1441 	.deinit = qcom_pcie_deinit_2_7_0,
1442 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1443 	.config_sid = qcom_pcie_config_sid_1_9_0,
1444 };
1445 
1446 /* Qcom IP rev.: 1.21.0  Synopsys IP rev.: 5.60a */
1447 static const struct qcom_pcie_ops ops_1_21_0 = {
1448 	.get_resources = qcom_pcie_get_resources_2_7_0,
1449 	.init = qcom_pcie_init_2_7_0,
1450 	.post_init = qcom_pcie_post_init_2_7_0,
1451 	.host_post_init = qcom_pcie_host_post_init_2_7_0,
1452 	.deinit = qcom_pcie_deinit_2_7_0,
1453 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1454 };
1455 
1456 /* Qcom IP rev.: 2.9.0  Synopsys IP rev.: 5.00a */
1457 static const struct qcom_pcie_ops ops_2_9_0 = {
1458 	.get_resources = qcom_pcie_get_resources_2_9_0,
1459 	.init = qcom_pcie_init_2_9_0,
1460 	.post_init = qcom_pcie_post_init_2_9_0,
1461 	.deinit = qcom_pcie_deinit_2_9_0,
1462 	.ltssm_enable = qcom_pcie_2_3_2_ltssm_enable,
1463 };
1464 
1465 static const struct qcom_pcie_cfg cfg_1_0_0 = {
1466 	.ops = &ops_1_0_0,
1467 };
1468 
1469 static const struct qcom_pcie_cfg cfg_1_9_0 = {
1470 	.ops = &ops_1_9_0,
1471 };
1472 
1473 static const struct qcom_pcie_cfg cfg_1_34_0 = {
1474 	.ops = &ops_1_9_0,
1475 	.override_no_snoop = true,
1476 };
1477 
1478 static const struct qcom_pcie_cfg cfg_2_1_0 = {
1479 	.ops = &ops_2_1_0,
1480 };
1481 
1482 static const struct qcom_pcie_cfg cfg_2_3_2 = {
1483 	.ops = &ops_2_3_2,
1484 	.no_l0s = true,
1485 };
1486 
1487 static const struct qcom_pcie_cfg cfg_2_3_3 = {
1488 	.ops = &ops_2_3_3,
1489 };
1490 
1491 static const struct qcom_pcie_cfg cfg_2_4_0 = {
1492 	.ops = &ops_2_4_0,
1493 };
1494 
1495 static const struct qcom_pcie_cfg cfg_2_7_0 = {
1496 	.ops = &ops_2_7_0,
1497 };
1498 
1499 static const struct qcom_pcie_cfg cfg_2_9_0 = {
1500 	.ops = &ops_2_9_0,
1501 };
1502 
1503 static const struct qcom_pcie_cfg cfg_sc8280xp = {
1504 	.ops = &ops_1_21_0,
1505 	.no_l0s = true,
1506 };
1507 
1508 static const struct qcom_pcie_cfg cfg_fw_managed = {
1509 	.firmware_managed = true,
1510 };
1511 
1512 static const struct dw_pcie_ops dw_pcie_ops = {
1513 	.link_up = qcom_pcie_link_up,
1514 	.start_link = qcom_pcie_start_link,
1515 };
1516 
1517 static int qcom_pcie_icc_init(struct qcom_pcie *pcie)
1518 {
1519 	struct dw_pcie *pci = pcie->pci;
1520 	int ret;
1521 
1522 	pcie->icc_mem = devm_of_icc_get(pci->dev, "pcie-mem");
1523 	if (IS_ERR(pcie->icc_mem))
1524 		return PTR_ERR(pcie->icc_mem);
1525 
1526 	pcie->icc_cpu = devm_of_icc_get(pci->dev, "cpu-pcie");
1527 	if (IS_ERR(pcie->icc_cpu))
1528 		return PTR_ERR(pcie->icc_cpu);
1529 	/*
1530 	 * Some Qualcomm platforms require interconnect bandwidth constraints
1531 	 * to be set before enabling interconnect clocks.
1532 	 *
1533 	 * Set an initial peak bandwidth corresponding to single-lane Gen 1
1534 	 * for the pcie-mem path.
1535 	 */
1536 	ret = icc_set_bw(pcie->icc_mem, 0, QCOM_PCIE_LINK_SPEED_TO_BW(1));
1537 	if (ret) {
1538 		dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1539 			ret);
1540 		return ret;
1541 	}
1542 
1543 	/*
1544 	 * Since the CPU-PCIe path is only used for activities like register
1545 	 * access of the host controller and endpoint Config/BAR space access,
1546 	 * HW team has recommended to use a minimal bandwidth of 1KBps just to
1547 	 * keep the path active.
1548 	 */
1549 	ret = icc_set_bw(pcie->icc_cpu, 0, kBps_to_icc(1));
1550 	if (ret) {
1551 		dev_err(pci->dev, "Failed to set bandwidth for CPU-PCIe interconnect path: %d\n",
1552 			ret);
1553 		icc_set_bw(pcie->icc_mem, 0, 0);
1554 		return ret;
1555 	}
1556 
1557 	return 0;
1558 }
1559 
1560 static void qcom_pcie_icc_opp_update(struct qcom_pcie *pcie)
1561 {
1562 	u32 offset, status, width, speed;
1563 	struct dw_pcie *pci = pcie->pci;
1564 	struct dev_pm_opp_key key = {};
1565 	unsigned long freq_kbps;
1566 	struct dev_pm_opp *opp;
1567 	int ret, freq_mbps;
1568 
1569 	offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
1570 	status = readw(pci->dbi_base + offset + PCI_EXP_LNKSTA);
1571 
1572 	/* Only update constraints if link is up. */
1573 	if (!(status & PCI_EXP_LNKSTA_DLLLA))
1574 		return;
1575 
1576 	speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, status);
1577 	width = FIELD_GET(PCI_EXP_LNKSTA_NLW, status);
1578 
1579 	if (pcie->icc_mem) {
1580 		ret = icc_set_bw(pcie->icc_mem, 0,
1581 				 width * QCOM_PCIE_LINK_SPEED_TO_BW(speed));
1582 		if (ret) {
1583 			dev_err(pci->dev, "Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
1584 				ret);
1585 		}
1586 	} else if (pcie->use_pm_opp) {
1587 		freq_mbps = pcie_dev_speed_mbps(pcie_get_link_speed(speed));
1588 		if (freq_mbps < 0)
1589 			return;
1590 
1591 		freq_kbps = freq_mbps * KILO;
1592 		opp = dev_pm_opp_find_level_exact(pci->dev, speed);
1593 		if (IS_ERR(opp)) {
1594 			 /* opp-level is not defined use only frequency */
1595 			opp = dev_pm_opp_find_freq_exact(pci->dev, freq_kbps * width,
1596 							 true);
1597 		} else {
1598 			/* put opp-level OPP */
1599 			dev_pm_opp_put(opp);
1600 
1601 			key.freq = freq_kbps * width;
1602 			key.level = speed;
1603 			key.bw = 0;
1604 			opp = dev_pm_opp_find_key_exact(pci->dev, &key, true);
1605 		}
1606 		if (!IS_ERR(opp)) {
1607 			ret = dev_pm_opp_set_opp(pci->dev, opp);
1608 			if (ret)
1609 				dev_err(pci->dev, "Failed to set OPP for freq (%lu): %d\n",
1610 					freq_kbps * width, ret);
1611 			dev_pm_opp_put(opp);
1612 		}
1613 	}
1614 }
1615 
1616 static int qcom_pcie_link_transition_count(struct seq_file *s, void *data)
1617 {
1618 	struct qcom_pcie *pcie = (struct qcom_pcie *)dev_get_drvdata(s->private);
1619 
1620 	seq_printf(s, "L0s transition count: %u\n",
1621 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L0S));
1622 
1623 	seq_printf(s, "L1 transition count: %u\n",
1624 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L1));
1625 
1626 	seq_printf(s, "L1.1 transition count: %u\n",
1627 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1));
1628 
1629 	seq_printf(s, "L1.2 transition count: %u\n",
1630 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2));
1631 
1632 	seq_printf(s, "L2 transition count: %u\n",
1633 		   readl_relaxed(pcie->mhi + PARF_DEBUG_CNT_PM_LINKST_IN_L2));
1634 
1635 	return 0;
1636 }
1637 
1638 static void qcom_pcie_init_debugfs(struct qcom_pcie *pcie)
1639 {
1640 	struct dw_pcie *pci = pcie->pci;
1641 	struct device *dev = pci->dev;
1642 	char *name;
1643 
1644 	name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1645 	if (!name)
1646 		return;
1647 
1648 	pcie->debugfs = debugfs_create_dir(name, NULL);
1649 	debugfs_create_devm_seqfile(dev, "link_transition_count", pcie->debugfs,
1650 				    qcom_pcie_link_transition_count);
1651 }
1652 
1653 static void qcom_pci_free_msi(void *ptr)
1654 {
1655 	struct dw_pcie_rp *pp = (struct dw_pcie_rp *)ptr;
1656 
1657 	if (pp && pp->use_imsi_rx)
1658 		dw_pcie_free_msi(pp);
1659 }
1660 
1661 static int qcom_pcie_ecam_host_init(struct pci_config_window *cfg)
1662 {
1663 	struct device *dev = cfg->parent;
1664 	struct dw_pcie_rp *pp;
1665 	struct dw_pcie *pci;
1666 	int ret;
1667 
1668 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1669 	if (!pci)
1670 		return -ENOMEM;
1671 
1672 	pci->dev = dev;
1673 	pp = &pci->pp;
1674 	pci->dbi_base = cfg->win;
1675 	pp->num_vectors = MSI_DEF_NUM_VECTORS;
1676 
1677 	ret = dw_pcie_msi_host_init(pp);
1678 	if (ret)
1679 		return ret;
1680 
1681 	pp->use_imsi_rx = true;
1682 	dw_pcie_msi_init(pp);
1683 
1684 	return devm_add_action_or_reset(dev, qcom_pci_free_msi, pp);
1685 }
1686 
1687 static const struct pci_ecam_ops pci_qcom_ecam_ops = {
1688 	.init		= qcom_pcie_ecam_host_init,
1689 	.pci_ops	= {
1690 		.map_bus	= pci_ecam_map_bus,
1691 		.read		= pci_generic_config_read,
1692 		.write		= pci_generic_config_write,
1693 	}
1694 };
1695 
1696 /* Parse PERST# from all nodes in depth first manner starting from @np */
1697 static int qcom_pcie_parse_perst(struct qcom_pcie *pcie,
1698 				 struct qcom_pcie_port *port,
1699 				 struct device_node *np)
1700 {
1701 	struct device *dev = pcie->pci->dev;
1702 	struct qcom_pcie_perst *perst;
1703 	struct gpio_desc *reset;
1704 	int ret;
1705 
1706 	if (!of_find_property(np, "reset-gpios", NULL))
1707 		goto parse_child_node;
1708 
1709 	reset = devm_fwnode_gpiod_get(dev, of_fwnode_handle(np), "reset",
1710 				      GPIOD_OUT_HIGH, "PERST#");
1711 	if (IS_ERR(reset)) {
1712 		/*
1713 		 * FIXME: GPIOLIB currently supports exclusive GPIO access only.
1714 		 * Non exclusive access is broken. But shared PERST# requires
1715 		 * non-exclusive access. So once GPIOLIB properly supports it,
1716 		 * implement it here.
1717 		 */
1718 		if (PTR_ERR(reset) == -EBUSY)
1719 			dev_err(dev, "Shared PERST# is not supported\n");
1720 
1721 		return PTR_ERR(reset);
1722 	}
1723 
1724 	perst = devm_kzalloc(dev, sizeof(*perst), GFP_KERNEL);
1725 	if (!perst)
1726 		return -ENOMEM;
1727 
1728 	INIT_LIST_HEAD(&perst->list);
1729 	perst->desc = reset;
1730 	list_add_tail(&perst->list, &port->perst);
1731 
1732 parse_child_node:
1733 	for_each_available_child_of_node_scoped(np, child) {
1734 		ret = qcom_pcie_parse_perst(pcie, port, child);
1735 		if (ret)
1736 			return ret;
1737 	}
1738 
1739 	return 0;
1740 }
1741 
1742 static int qcom_pcie_parse_port(struct qcom_pcie *pcie, struct device_node *node)
1743 {
1744 	struct device *dev = pcie->pci->dev;
1745 	struct qcom_pcie_port *port;
1746 	struct phy *phy;
1747 	int ret;
1748 
1749 	phy = devm_of_phy_get(dev, node, NULL);
1750 	if (IS_ERR(phy))
1751 		return PTR_ERR(phy);
1752 
1753 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1754 	if (!port)
1755 		return -ENOMEM;
1756 
1757 	ret = phy_init(phy);
1758 	if (ret)
1759 		return ret;
1760 
1761 	INIT_LIST_HEAD(&port->perst);
1762 
1763 	ret = qcom_pcie_parse_perst(pcie, port, node);
1764 	if (ret)
1765 		return ret;
1766 
1767 	port->phy = phy;
1768 	INIT_LIST_HEAD(&port->list);
1769 	list_add_tail(&port->list, &pcie->ports);
1770 
1771 	return 0;
1772 }
1773 
1774 static int qcom_pcie_parse_ports(struct qcom_pcie *pcie)
1775 {
1776 	struct qcom_pcie_perst *perst, *tmp_perst;
1777 	struct qcom_pcie_port *port, *tmp_port;
1778 	struct device *dev = pcie->pci->dev;
1779 	int ret = -ENODEV;
1780 
1781 	for_each_available_child_of_node_scoped(dev->of_node, of_port) {
1782 		if (!of_node_is_type(of_port, "pci"))
1783 			continue;
1784 		ret = qcom_pcie_parse_port(pcie, of_port);
1785 		if (ret)
1786 			goto err_port_del;
1787 	}
1788 
1789 	return ret;
1790 
1791 err_port_del:
1792 	list_for_each_entry_safe(port, tmp_port, &pcie->ports, list) {
1793 		list_for_each_entry_safe(perst, tmp_perst, &port->perst, list)
1794 			list_del(&perst->list);
1795 		phy_exit(port->phy);
1796 		list_del(&port->list);
1797 	}
1798 
1799 	return ret;
1800 }
1801 
1802 static int qcom_pcie_parse_legacy_binding(struct qcom_pcie *pcie)
1803 {
1804 	struct device *dev = pcie->pci->dev;
1805 	struct qcom_pcie_perst *perst;
1806 	struct qcom_pcie_port *port;
1807 	struct gpio_desc *reset;
1808 	struct phy *phy;
1809 	int ret;
1810 
1811 	phy = devm_phy_optional_get(dev, "pciephy");
1812 	if (IS_ERR(phy))
1813 		return PTR_ERR(phy);
1814 
1815 	reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
1816 	if (IS_ERR(reset))
1817 		return PTR_ERR(reset);
1818 
1819 	ret = phy_init(phy);
1820 	if (ret)
1821 		return ret;
1822 
1823 	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
1824 	if (!port)
1825 		return -ENOMEM;
1826 
1827 	perst = devm_kzalloc(dev, sizeof(*perst), GFP_KERNEL);
1828 	if (!perst)
1829 		return -ENOMEM;
1830 
1831 	port->phy = phy;
1832 	INIT_LIST_HEAD(&port->list);
1833 	list_add_tail(&port->list, &pcie->ports);
1834 
1835 	perst->desc = reset;
1836 	INIT_LIST_HEAD(&port->perst);
1837 	INIT_LIST_HEAD(&perst->list);
1838 	list_add_tail(&perst->list, &port->perst);
1839 
1840 	return 0;
1841 }
1842 
1843 static int qcom_pcie_probe(struct platform_device *pdev)
1844 {
1845 	struct qcom_pcie_perst *perst, *tmp_perst;
1846 	struct qcom_pcie_port *port, *tmp_port;
1847 	const struct qcom_pcie_cfg *pcie_cfg;
1848 	unsigned long max_freq = ULONG_MAX;
1849 	struct device *dev = &pdev->dev;
1850 	struct dev_pm_opp *opp;
1851 	struct qcom_pcie *pcie;
1852 	struct dw_pcie_rp *pp;
1853 	struct resource *res;
1854 	struct dw_pcie *pci;
1855 	int ret;
1856 
1857 	pcie_cfg = of_device_get_match_data(dev);
1858 	if (!pcie_cfg) {
1859 		dev_err(dev, "No platform data\n");
1860 		return -ENODATA;
1861 	}
1862 
1863 	if (!pcie_cfg->firmware_managed && !pcie_cfg->ops) {
1864 		dev_err(dev, "No platform ops\n");
1865 		return -ENODATA;
1866 	}
1867 
1868 	pm_runtime_enable(dev);
1869 	ret = pm_runtime_get_sync(dev);
1870 	if (ret < 0)
1871 		goto err_pm_runtime_put;
1872 
1873 	if (pcie_cfg->firmware_managed) {
1874 		struct pci_host_bridge *bridge;
1875 		struct pci_config_window *cfg;
1876 
1877 		bridge = devm_pci_alloc_host_bridge(dev, 0);
1878 		if (!bridge) {
1879 			ret = -ENOMEM;
1880 			goto err_pm_runtime_put;
1881 		}
1882 
1883 		/* Parse and map our ECAM configuration space area */
1884 		cfg = pci_host_common_ecam_create(dev, bridge,
1885 				&pci_qcom_ecam_ops);
1886 		if (IS_ERR(cfg)) {
1887 			ret = PTR_ERR(cfg);
1888 			goto err_pm_runtime_put;
1889 		}
1890 
1891 		bridge->sysdata = cfg;
1892 		bridge->ops = (struct pci_ops *)&pci_qcom_ecam_ops.pci_ops;
1893 		bridge->msi_domain = true;
1894 
1895 		ret = pci_host_probe(bridge);
1896 		if (ret)
1897 			goto err_pm_runtime_put;
1898 
1899 		return 0;
1900 	}
1901 
1902 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
1903 	if (!pcie) {
1904 		ret = -ENOMEM;
1905 		goto err_pm_runtime_put;
1906 	}
1907 
1908 	pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL);
1909 	if (!pci) {
1910 		ret = -ENOMEM;
1911 		goto err_pm_runtime_put;
1912 	}
1913 
1914 	INIT_LIST_HEAD(&pcie->ports);
1915 
1916 	pci->dev = dev;
1917 	pci->ops = &dw_pcie_ops;
1918 	pp = &pci->pp;
1919 
1920 	pcie->pci = pci;
1921 
1922 	pcie->cfg = pcie_cfg;
1923 
1924 	pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
1925 	if (IS_ERR(pcie->parf)) {
1926 		ret = PTR_ERR(pcie->parf);
1927 		goto err_pm_runtime_put;
1928 	}
1929 
1930 	/* MHI region is optional */
1931 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mhi");
1932 	if (res) {
1933 		pcie->mhi = devm_ioremap_resource(dev, res);
1934 		if (IS_ERR(pcie->mhi)) {
1935 			ret = PTR_ERR(pcie->mhi);
1936 			goto err_pm_runtime_put;
1937 		}
1938 	}
1939 
1940 	/* OPP table is optional */
1941 	ret = devm_pm_opp_of_add_table(dev);
1942 	if (ret && ret != -ENODEV) {
1943 		dev_err_probe(dev, ret, "Failed to add OPP table\n");
1944 		goto err_pm_runtime_put;
1945 	}
1946 
1947 	/*
1948 	 * Before the PCIe link is initialized, vote for highest OPP in the OPP
1949 	 * table, so that we are voting for maximum voltage corner for the
1950 	 * link to come up in maximum supported speed. At the end of the
1951 	 * probe(), OPP will be updated using qcom_pcie_icc_opp_update().
1952 	 */
1953 	if (!ret) {
1954 		opp = dev_pm_opp_find_freq_floor(dev, &max_freq);
1955 		if (IS_ERR(opp)) {
1956 			ret = PTR_ERR(opp);
1957 			dev_err_probe(pci->dev, ret,
1958 				      "Unable to find max freq OPP\n");
1959 			goto err_pm_runtime_put;
1960 		} else {
1961 			ret = dev_pm_opp_set_opp(dev, opp);
1962 		}
1963 
1964 		dev_pm_opp_put(opp);
1965 		if (ret) {
1966 			dev_err_probe(pci->dev, ret,
1967 				      "Failed to set OPP for freq %lu\n",
1968 				      max_freq);
1969 			goto err_pm_runtime_put;
1970 		}
1971 
1972 		pcie->use_pm_opp = true;
1973 	} else {
1974 		/* Skip ICC init if OPP is supported as it is handled by OPP */
1975 		ret = qcom_pcie_icc_init(pcie);
1976 		if (ret)
1977 			goto err_pm_runtime_put;
1978 	}
1979 
1980 	ret = pcie->cfg->ops->get_resources(pcie);
1981 	if (ret)
1982 		goto err_pm_runtime_put;
1983 
1984 	pp->ops = &qcom_pcie_dw_ops;
1985 
1986 	ret = qcom_pcie_parse_ports(pcie);
1987 	if (ret) {
1988 		if (ret != -ENODEV) {
1989 			dev_err_probe(pci->dev, ret,
1990 				      "Failed to parse Root Port: %d\n", ret);
1991 			goto err_pm_runtime_put;
1992 		}
1993 
1994 		/*
1995 		 * In the case of properties not populated in Root Port node,
1996 		 * fallback to the legacy method of parsing the Host Bridge
1997 		 * node. This is to maintain DT backwards compatibility.
1998 		 */
1999 		ret = qcom_pcie_parse_legacy_binding(pcie);
2000 		if (ret)
2001 			goto err_pm_runtime_put;
2002 	}
2003 
2004 	platform_set_drvdata(pdev, pcie);
2005 
2006 	ret = dw_pcie_host_init(pp);
2007 	if (ret) {
2008 		dev_err_probe(dev, ret, "cannot initialize host\n");
2009 		goto err_phy_exit;
2010 	}
2011 
2012 	qcom_pcie_icc_opp_update(pcie);
2013 
2014 	if (pcie->mhi)
2015 		qcom_pcie_init_debugfs(pcie);
2016 
2017 	return 0;
2018 
2019 err_phy_exit:
2020 	list_for_each_entry_safe(port, tmp_port, &pcie->ports, list) {
2021 		list_for_each_entry_safe(perst, tmp_perst, &port->perst, list)
2022 			list_del(&perst->list);
2023 		phy_exit(port->phy);
2024 		list_del(&port->list);
2025 	}
2026 err_pm_runtime_put:
2027 	pm_runtime_put(dev);
2028 	pm_runtime_disable(dev);
2029 
2030 	return ret;
2031 }
2032 
2033 static int qcom_pcie_suspend_noirq(struct device *dev)
2034 {
2035 	struct qcom_pcie *pcie;
2036 	int ret = 0;
2037 
2038 	pcie = dev_get_drvdata(dev);
2039 	if (!pcie)
2040 		return 0;
2041 
2042 	/*
2043 	 * Set minimum bandwidth required to keep data path functional during
2044 	 * suspend.
2045 	 */
2046 	if (pcie->icc_mem) {
2047 		ret = icc_set_bw(pcie->icc_mem, 0, kBps_to_icc(1));
2048 		if (ret) {
2049 			dev_err(dev,
2050 				"Failed to set bandwidth for PCIe-MEM interconnect path: %d\n",
2051 				ret);
2052 			return ret;
2053 		}
2054 	}
2055 
2056 	/*
2057 	 * Turn OFF the resources only for controllers without active PCIe
2058 	 * devices. For controllers with active devices, the resources are kept
2059 	 * ON and the link is expected to be in L0/L1 (sub)states.
2060 	 *
2061 	 * Turning OFF the resources for controllers with active PCIe devices
2062 	 * will trigger access violation during the end of the suspend cycle,
2063 	 * as kernel tries to access the PCIe devices config space for masking
2064 	 * MSIs.
2065 	 *
2066 	 * Also, it is not desirable to put the link into L2/L3 state as that
2067 	 * implies VDD supply will be removed and the devices may go into
2068 	 * powerdown state. This will affect the lifetime of the storage devices
2069 	 * like NVMe.
2070 	 */
2071 	if (!dw_pcie_link_up(pcie->pci)) {
2072 		qcom_pcie_host_deinit(&pcie->pci->pp);
2073 		pcie->suspended = true;
2074 	}
2075 
2076 	/*
2077 	 * Only disable CPU-PCIe interconnect path if the suspend is non-S2RAM.
2078 	 * Because on some platforms, DBI access can happen very late during the
2079 	 * S2RAM and a non-active CPU-PCIe interconnect path may lead to NoC
2080 	 * error.
2081 	 */
2082 	if (pm_suspend_target_state != PM_SUSPEND_MEM) {
2083 		ret = icc_disable(pcie->icc_cpu);
2084 		if (ret)
2085 			dev_err(dev, "Failed to disable CPU-PCIe interconnect path: %d\n", ret);
2086 
2087 		if (pcie->use_pm_opp)
2088 			dev_pm_opp_set_opp(pcie->pci->dev, NULL);
2089 	}
2090 	return ret;
2091 }
2092 
2093 static int qcom_pcie_resume_noirq(struct device *dev)
2094 {
2095 	struct qcom_pcie *pcie;
2096 	int ret;
2097 
2098 	pcie = dev_get_drvdata(dev);
2099 	if (!pcie)
2100 		return 0;
2101 
2102 	if (pm_suspend_target_state != PM_SUSPEND_MEM) {
2103 		ret = icc_enable(pcie->icc_cpu);
2104 		if (ret) {
2105 			dev_err(dev, "Failed to enable CPU-PCIe interconnect path: %d\n", ret);
2106 			return ret;
2107 		}
2108 	}
2109 
2110 	if (pcie->suspended) {
2111 		ret = qcom_pcie_host_init(&pcie->pci->pp);
2112 		if (ret)
2113 			return ret;
2114 
2115 		pcie->suspended = false;
2116 	}
2117 
2118 	qcom_pcie_icc_opp_update(pcie);
2119 
2120 	return 0;
2121 }
2122 
2123 static const struct of_device_id qcom_pcie_match[] = {
2124 	{ .compatible = "qcom,pcie-apq8064", .data = &cfg_2_1_0 },
2125 	{ .compatible = "qcom,pcie-apq8084", .data = &cfg_1_0_0 },
2126 	{ .compatible = "qcom,pcie-ipq4019", .data = &cfg_2_4_0 },
2127 	{ .compatible = "qcom,pcie-ipq5018", .data = &cfg_2_9_0 },
2128 	{ .compatible = "qcom,pcie-ipq6018", .data = &cfg_2_9_0 },
2129 	{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
2130 	{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
2131 	{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
2132 	{ .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
2133 	{ .compatible = "qcom,pcie-ipq9574", .data = &cfg_2_9_0 },
2134 	{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
2135 	{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
2136 	{ .compatible = "qcom,pcie-sa8255p", .data = &cfg_fw_managed },
2137 	{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_sc8280xp },
2138 	{ .compatible = "qcom,pcie-sa8775p", .data = &cfg_1_34_0},
2139 	{ .compatible = "qcom,pcie-sc7280", .data = &cfg_1_9_0 },
2140 	{ .compatible = "qcom,pcie-sc8180x", .data = &cfg_1_9_0 },
2141 	{ .compatible = "qcom,pcie-sc8280xp", .data = &cfg_sc8280xp },
2142 	{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
2143 	{ .compatible = "qcom,pcie-sdx55", .data = &cfg_1_9_0 },
2144 	{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
2145 	{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
2146 	{ .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
2147 	{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
2148 	{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
2149 	{ .compatible = "qcom,pcie-sm8550", .data = &cfg_1_9_0 },
2150 	{ .compatible = "qcom,pcie-x1e80100", .data = &cfg_sc8280xp },
2151 	{ }
2152 };
2153 
2154 static void qcom_fixup_class(struct pci_dev *dev)
2155 {
2156 	dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
2157 }
2158 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0101, qcom_fixup_class);
2159 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0104, qcom_fixup_class);
2160 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0106, qcom_fixup_class);
2161 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0107, qcom_fixup_class);
2162 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x0302, qcom_fixup_class);
2163 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1000, qcom_fixup_class);
2164 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, 0x1001, qcom_fixup_class);
2165 
2166 static const struct dev_pm_ops qcom_pcie_pm_ops = {
2167 	NOIRQ_SYSTEM_SLEEP_PM_OPS(qcom_pcie_suspend_noirq, qcom_pcie_resume_noirq)
2168 };
2169 
2170 static struct platform_driver qcom_pcie_driver = {
2171 	.probe = qcom_pcie_probe,
2172 	.driver = {
2173 		.name = "qcom-pcie",
2174 		.suppress_bind_attrs = true,
2175 		.of_match_table = qcom_pcie_match,
2176 		.pm = &qcom_pcie_pm_ops,
2177 		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
2178 	},
2179 };
2180 builtin_platform_driver(qcom_pcie_driver);
2181