1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * PCIe host controller driver for the following SoCs
4 * Tegra194
5 * Tegra234
6 *
7 * Copyright (C) 2019-2022 NVIDIA Corporation.
8 *
9 * Author: Vidya Sagar <vidyas@nvidia.com>
10 */
11
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/gpio.h>
16 #include <linux/gpio/consumer.h>
17 #include <linux/interconnect.h>
18 #include <linux/interrupt.h>
19 #include <linux/iopoll.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/of_gpio.h>
24 #include <linux/of_pci.h>
25 #include <linux/pci.h>
26 #include <linux/phy/phy.h>
27 #include <linux/pinctrl/consumer.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/random.h>
31 #include <linux/reset.h>
32 #include <linux/resource.h>
33 #include <linux/types.h>
34 #include "pcie-designware.h"
35 #include <soc/tegra/bpmp.h>
36 #include <soc/tegra/bpmp-abi.h>
37 #include "../../pci.h"
38
39 #define TEGRA194_DWC_IP_VER 0x490A
40 #define TEGRA234_DWC_IP_VER 0x562A
41
42 #define APPL_PINMUX 0x0
43 #define APPL_PINMUX_PEX_RST BIT(0)
44 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2)
45 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3)
46 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4)
47 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5)
48
49 #define APPL_CTRL 0x4
50 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6)
51 #define APPL_CTRL_LTSSM_EN BIT(7)
52 #define APPL_CTRL_HW_HOT_RST_EN BIT(20)
53 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0)
54 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22
55 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1
56 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2
57
58 #define APPL_INTR_EN_L0_0 0x8
59 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0)
60 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4)
61 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8)
62 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15)
63 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19)
64 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30)
65 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31)
66
67 #define APPL_INTR_STATUS_L0 0xC
68 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0)
69 #define APPL_INTR_STATUS_L0_INT_INT BIT(8)
70 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15)
71 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16)
72 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18)
73
74 #define APPL_INTR_EN_L1_0_0 0x1C
75 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1)
76 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3)
77 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30)
78
79 #define APPL_INTR_STATUS_L1_0_0 0x20
80 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1)
81 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3)
82 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30)
83
84 #define APPL_INTR_STATUS_L1_1 0x2C
85 #define APPL_INTR_STATUS_L1_2 0x30
86 #define APPL_INTR_STATUS_L1_3 0x34
87 #define APPL_INTR_STATUS_L1_6 0x3C
88 #define APPL_INTR_STATUS_L1_7 0x40
89 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1)
90
91 #define APPL_INTR_EN_L1_8_0 0x44
92 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2)
93 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3)
94 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11)
95 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15)
96
97 #define APPL_INTR_STATUS_L1_8_0 0x4C
98 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6)
99 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2)
100 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3)
101
102 #define APPL_INTR_STATUS_L1_9 0x54
103 #define APPL_INTR_STATUS_L1_10 0x58
104 #define APPL_INTR_STATUS_L1_11 0x64
105 #define APPL_INTR_STATUS_L1_13 0x74
106 #define APPL_INTR_STATUS_L1_14 0x78
107 #define APPL_INTR_STATUS_L1_15 0x7C
108 #define APPL_INTR_STATUS_L1_17 0x88
109
110 #define APPL_INTR_EN_L1_18 0x90
111 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2)
112 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
113 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
114
115 #define APPL_INTR_STATUS_L1_18 0x94
116 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2)
117 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1)
118 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0)
119
120 #define APPL_MSI_CTRL_1 0xAC
121
122 #define APPL_MSI_CTRL_2 0xB0
123
124 #define APPL_LEGACY_INTX 0xB8
125
126 #define APPL_LTR_MSG_1 0xC4
127 #define LTR_MSG_REQ BIT(15)
128 #define LTR_MST_NO_SNOOP_SHIFT 16
129
130 #define APPL_LTR_MSG_2 0xC8
131 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3)
132
133 #define APPL_LINK_STATUS 0xCC
134 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0)
135
136 #define APPL_DEBUG 0xD0
137 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21)
138 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11
139 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3)
140 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3
141 #define LTSSM_STATE_PRE_DETECT 5
142
143 #define APPL_RADM_STATUS 0xE4
144 #define APPL_PM_XMT_TURNOFF_STATE BIT(0)
145
146 #define APPL_DM_TYPE 0x100
147 #define APPL_DM_TYPE_MASK GENMASK(3, 0)
148 #define APPL_DM_TYPE_RP 0x4
149 #define APPL_DM_TYPE_EP 0x0
150
151 #define APPL_CFG_BASE_ADDR 0x104
152 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12)
153
154 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108
155 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18)
156
157 #define APPL_CFG_MISC 0x110
158 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14)
159 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10)
160 #define APPL_CFG_MISC_ARCACHE_SHIFT 10
161 #define APPL_CFG_MISC_ARCACHE_VAL 3
162
163 #define APPL_CFG_SLCG_OVERRIDE 0x114
164 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0)
165
166 #define APPL_CAR_RESET_OVRD 0x12C
167 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0)
168
169 #define IO_BASE_IO_DECODE BIT(0)
170 #define IO_BASE_IO_DECODE_BIT8 BIT(8)
171
172 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0)
173 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16)
174
175 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718
176 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19)
177
178 #define N_FTS_VAL 52
179 #define FTS_VAL 52
180
181 #define GEN3_EQ_CONTROL_OFF 0x8a8
182 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT 8
183 #define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK GENMASK(23, 8)
184 #define GEN3_EQ_CONTROL_OFF_FB_MODE_MASK GENMASK(3, 0)
185
186 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0
187 #define AMBA_ERROR_RESPONSE_CRS_SHIFT 3
188 #define AMBA_ERROR_RESPONSE_CRS_MASK GENMASK(1, 0)
189 #define AMBA_ERROR_RESPONSE_CRS_OKAY 0
190 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFFFFFF 1
191 #define AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 2
192
193 #define MSIX_ADDR_MATCH_LOW_OFF 0x940
194 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0)
195 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2)
196
197 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944
198 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0)
199
200 #define PORT_LOGIC_MSIX_DOORBELL 0x948
201
202 #define CAP_SPCIE_CAP_OFF 0x154
203 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0)
204 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8)
205 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8
206
207 #define PME_ACK_TIMEOUT 10000
208
209 #define LTSSM_TIMEOUT 50000 /* 50ms */
210
211 #define GEN3_GEN4_EQ_PRESET_INIT 5
212
213 #define GEN1_CORE_CLK_FREQ 62500000
214 #define GEN2_CORE_CLK_FREQ 125000000
215 #define GEN3_CORE_CLK_FREQ 250000000
216 #define GEN4_CORE_CLK_FREQ 500000000
217
218 #define LTR_MSG_TIMEOUT (100 * 1000)
219
220 #define PERST_DEBOUNCE_TIME (5 * 1000)
221
222 #define EP_STATE_DISABLED 0
223 #define EP_STATE_ENABLED 1
224
225 static const unsigned int pcie_gen_freq[] = {
226 GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
227 GEN1_CORE_CLK_FREQ,
228 GEN2_CORE_CLK_FREQ,
229 GEN3_CORE_CLK_FREQ,
230 GEN4_CORE_CLK_FREQ
231 };
232
233 struct tegra_pcie_dw_of_data {
234 u32 version;
235 enum dw_pcie_device_mode mode;
236 bool has_msix_doorbell_access_fix;
237 bool has_sbr_reset_fix;
238 bool has_l1ss_exit_fix;
239 bool has_ltr_req_fix;
240 u32 cdm_chk_int_en_bit;
241 u32 gen4_preset_vec;
242 u8 n_fts[2];
243 };
244
245 struct tegra_pcie_dw {
246 struct device *dev;
247 struct resource *appl_res;
248 struct resource *dbi_res;
249 struct resource *atu_dma_res;
250 void __iomem *appl_base;
251 struct clk *core_clk;
252 struct reset_control *core_apb_rst;
253 struct reset_control *core_rst;
254 struct dw_pcie pci;
255 struct tegra_bpmp *bpmp;
256
257 struct tegra_pcie_dw_of_data *of_data;
258
259 bool supports_clkreq;
260 bool enable_cdm_check;
261 bool enable_srns;
262 bool link_state;
263 bool update_fc_fixup;
264 bool enable_ext_refclk;
265 u8 init_link_width;
266 u32 msi_ctrl_int;
267 u32 num_lanes;
268 u32 cid;
269 u32 cfg_link_cap_l1sub;
270 u32 ras_des_cap;
271 u32 pcie_cap_base;
272 u32 aspm_cmrt;
273 u32 aspm_pwr_on_t;
274 u32 aspm_l0s_enter_lat;
275
276 struct regulator *pex_ctl_supply;
277 struct regulator *slot_ctl_3v3;
278 struct regulator *slot_ctl_12v;
279
280 unsigned int phy_count;
281 struct phy **phys;
282
283 struct dentry *debugfs;
284
285 /* Endpoint mode specific */
286 struct gpio_desc *pex_rst_gpiod;
287 struct gpio_desc *pex_refclk_sel_gpiod;
288 unsigned int pex_rst_irq;
289 int ep_state;
290 long link_status;
291 struct icc_path *icc_path;
292 };
293
to_tegra_pcie(struct dw_pcie * pci)294 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
295 {
296 return container_of(pci, struct tegra_pcie_dw, pci);
297 }
298
appl_writel(struct tegra_pcie_dw * pcie,const u32 value,const u32 reg)299 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
300 const u32 reg)
301 {
302 writel_relaxed(value, pcie->appl_base + reg);
303 }
304
appl_readl(struct tegra_pcie_dw * pcie,const u32 reg)305 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
306 {
307 return readl_relaxed(pcie->appl_base + reg);
308 }
309
310 struct tegra_pcie_soc {
311 enum dw_pcie_device_mode mode;
312 };
313
tegra_pcie_icc_set(struct tegra_pcie_dw * pcie)314 static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
315 {
316 struct dw_pcie *pci = &pcie->pci;
317 u32 val, speed, width;
318
319 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
320
321 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
322 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
323
324 val = width * (PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]) / BITS_PER_BYTE);
325
326 if (icc_set_bw(pcie->icc_path, MBps_to_icc(val), 0))
327 dev_err(pcie->dev, "can't set bw[%u]\n", val);
328
329 if (speed >= ARRAY_SIZE(pcie_gen_freq))
330 speed = 0;
331
332 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
333 }
334
apply_bad_link_workaround(struct dw_pcie_rp * pp)335 static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
336 {
337 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
338 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
339 u32 current_link_width;
340 u16 val;
341
342 /*
343 * NOTE:- Since this scenario is uncommon and link as such is not
344 * stable anyway, not waiting to confirm if link is really
345 * transitioning to Gen-2 speed
346 */
347 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
348 if (val & PCI_EXP_LNKSTA_LBMS) {
349 current_link_width = (val & PCI_EXP_LNKSTA_NLW) >>
350 PCI_EXP_LNKSTA_NLW_SHIFT;
351 if (pcie->init_link_width > current_link_width) {
352 dev_warn(pci->dev, "PCIe link is bad, width reduced\n");
353 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
354 PCI_EXP_LNKCTL2);
355 val &= ~PCI_EXP_LNKCTL2_TLS;
356 val |= PCI_EXP_LNKCTL2_TLS_2_5GT;
357 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
358 PCI_EXP_LNKCTL2, val);
359
360 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
361 PCI_EXP_LNKCTL);
362 val |= PCI_EXP_LNKCTL_RL;
363 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
364 PCI_EXP_LNKCTL, val);
365 }
366 }
367 }
368
tegra_pcie_rp_irq_handler(int irq,void * arg)369 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
370 {
371 struct tegra_pcie_dw *pcie = arg;
372 struct dw_pcie *pci = &pcie->pci;
373 struct dw_pcie_rp *pp = &pci->pp;
374 u32 val, status_l0, status_l1;
375 u16 val_w;
376
377 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
378 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
379 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
380 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
381 if (!pcie->of_data->has_sbr_reset_fix &&
382 status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) {
383 /* SBR & Surprise Link Down WAR */
384 val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
385 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
386 appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
387 udelay(1);
388 val = appl_readl(pcie, APPL_CAR_RESET_OVRD);
389 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N;
390 appl_writel(pcie, val, APPL_CAR_RESET_OVRD);
391
392 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
393 val |= PORT_LOGIC_SPEED_CHANGE;
394 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
395 }
396 }
397
398 if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) {
399 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0);
400 if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) {
401 appl_writel(pcie,
402 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS,
403 APPL_INTR_STATUS_L1_8_0);
404 apply_bad_link_workaround(pp);
405 }
406 if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) {
407 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
408 PCI_EXP_LNKSTA);
409 val_w |= PCI_EXP_LNKSTA_LBMS;
410 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base +
411 PCI_EXP_LNKSTA, val_w);
412
413 appl_writel(pcie,
414 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS,
415 APPL_INTR_STATUS_L1_8_0);
416
417 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
418 PCI_EXP_LNKSTA);
419 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w &
420 PCI_EXP_LNKSTA_CLS);
421 }
422 }
423
424 if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) {
425 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18);
426 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS);
427 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) {
428 dev_info(pci->dev, "CDM check complete\n");
429 val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE;
430 }
431 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) {
432 dev_err(pci->dev, "CDM comparison mismatch\n");
433 val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR;
434 }
435 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) {
436 dev_err(pci->dev, "CDM Logic error\n");
437 val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR;
438 }
439 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val);
440 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR);
441 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val);
442 }
443
444 return IRQ_HANDLED;
445 }
446
pex_ep_event_hot_rst_done(struct tegra_pcie_dw * pcie)447 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
448 {
449 u32 val;
450
451 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
452 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
453 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
454 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
455 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
456 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
457 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
458 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
459 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
460 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
461 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
462 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
463 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
464 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
465 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
466 appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2);
467
468 val = appl_readl(pcie, APPL_CTRL);
469 val |= APPL_CTRL_LTSSM_EN;
470 appl_writel(pcie, val, APPL_CTRL);
471 }
472
tegra_pcie_ep_irq_thread(int irq,void * arg)473 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
474 {
475 struct tegra_pcie_dw *pcie = arg;
476 struct dw_pcie_ep *ep = &pcie->pci.ep;
477 struct dw_pcie *pci = &pcie->pci;
478 u32 val;
479
480 if (test_and_clear_bit(0, &pcie->link_status))
481 dw_pcie_ep_linkup(ep);
482
483 tegra_pcie_icc_set(pcie);
484
485 if (pcie->of_data->has_ltr_req_fix)
486 return IRQ_HANDLED;
487
488 /* If EP doesn't advertise L1SS, just return */
489 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
490 if (!(val & (PCI_L1SS_CAP_ASPM_L1_1 | PCI_L1SS_CAP_ASPM_L1_2)))
491 return IRQ_HANDLED;
492
493 /* Check if BME is set to '1' */
494 val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
495 if (val & PCI_COMMAND_MASTER) {
496 ktime_t timeout;
497
498 /* 110us for both snoop and no-snoop */
499 val = 110 | (2 << PCI_LTR_SCALE_SHIFT) | LTR_MSG_REQ;
500 val |= (val << LTR_MST_NO_SNOOP_SHIFT);
501 appl_writel(pcie, val, APPL_LTR_MSG_1);
502
503 /* Send LTR upstream */
504 val = appl_readl(pcie, APPL_LTR_MSG_2);
505 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
506 appl_writel(pcie, val, APPL_LTR_MSG_2);
507
508 timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT);
509 for (;;) {
510 val = appl_readl(pcie, APPL_LTR_MSG_2);
511 if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE))
512 break;
513 if (ktime_after(ktime_get(), timeout))
514 break;
515 usleep_range(1000, 1100);
516 }
517 if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)
518 dev_err(pcie->dev, "Failed to send LTR message\n");
519 }
520
521 return IRQ_HANDLED;
522 }
523
tegra_pcie_ep_hard_irq(int irq,void * arg)524 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
525 {
526 struct tegra_pcie_dw *pcie = arg;
527 int spurious = 1;
528 u32 status_l0, status_l1, link_status;
529
530 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0);
531 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) {
532 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0);
533 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0);
534
535 if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE)
536 pex_ep_event_hot_rst_done(pcie);
537
538 if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) {
539 link_status = appl_readl(pcie, APPL_LINK_STATUS);
540 if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
541 dev_dbg(pcie->dev, "Link is up with Host\n");
542 set_bit(0, &pcie->link_status);
543 return IRQ_WAKE_THREAD;
544 }
545 }
546
547 spurious = 0;
548 }
549
550 if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) {
551 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15);
552 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15);
553
554 if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED)
555 return IRQ_WAKE_THREAD;
556
557 spurious = 0;
558 }
559
560 if (spurious) {
561 dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n",
562 status_l0);
563 appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0);
564 }
565
566 return IRQ_HANDLED;
567 }
568
tegra_pcie_dw_rd_own_conf(struct pci_bus * bus,u32 devfn,int where,int size,u32 * val)569 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
570 int size, u32 *val)
571 {
572 struct dw_pcie_rp *pp = bus->sysdata;
573 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
574 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
575
576 /*
577 * This is an endpoint mode specific register happen to appear even
578 * when controller is operating in root port mode and system hangs
579 * when it is accessed with link being in ASPM-L1 state.
580 * So skip accessing it altogether
581 */
582 if (!pcie->of_data->has_msix_doorbell_access_fix &&
583 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) {
584 *val = 0x00000000;
585 return PCIBIOS_SUCCESSFUL;
586 }
587
588 return pci_generic_config_read(bus, devfn, where, size, val);
589 }
590
tegra_pcie_dw_wr_own_conf(struct pci_bus * bus,u32 devfn,int where,int size,u32 val)591 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
592 int size, u32 val)
593 {
594 struct dw_pcie_rp *pp = bus->sysdata;
595 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
596 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
597
598 /*
599 * This is an endpoint mode specific register happen to appear even
600 * when controller is operating in root port mode and system hangs
601 * when it is accessed with link being in ASPM-L1 state.
602 * So skip accessing it altogether
603 */
604 if (!pcie->of_data->has_msix_doorbell_access_fix &&
605 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL)
606 return PCIBIOS_SUCCESSFUL;
607
608 return pci_generic_config_write(bus, devfn, where, size, val);
609 }
610
611 static struct pci_ops tegra_pci_ops = {
612 .map_bus = dw_pcie_own_conf_map_bus,
613 .read = tegra_pcie_dw_rd_own_conf,
614 .write = tegra_pcie_dw_wr_own_conf,
615 };
616
617 #if defined(CONFIG_PCIEASPM)
disable_aspm_l11(struct tegra_pcie_dw * pcie)618 static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
619 {
620 u32 val;
621
622 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
623 val &= ~PCI_L1SS_CAP_ASPM_L1_1;
624 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
625 }
626
disable_aspm_l12(struct tegra_pcie_dw * pcie)627 static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
628 {
629 u32 val;
630
631 val = dw_pcie_readl_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub);
632 val &= ~PCI_L1SS_CAP_ASPM_L1_2;
633 dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
634 }
635
event_counter_prog(struct tegra_pcie_dw * pcie,u32 event)636 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
637 {
638 u32 val;
639
640 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
641 PCIE_RAS_DES_EVENT_COUNTER_CONTROL);
642 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT);
643 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
644 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT;
645 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
646 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
647 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
648 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap +
649 PCIE_RAS_DES_EVENT_COUNTER_DATA);
650
651 return val;
652 }
653
aspm_state_cnt(struct seq_file * s,void * data)654 static int aspm_state_cnt(struct seq_file *s, void *data)
655 {
656 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
657 dev_get_drvdata(s->private);
658 u32 val;
659
660 seq_printf(s, "Tx L0s entry count : %u\n",
661 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S));
662
663 seq_printf(s, "Rx L0s entry count : %u\n",
664 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S));
665
666 seq_printf(s, "Link L1 entry count : %u\n",
667 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1));
668
669 seq_printf(s, "Link L1.1 entry count : %u\n",
670 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1));
671
672 seq_printf(s, "Link L1.2 entry count : %u\n",
673 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2));
674
675 /* Clear all counters */
676 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
677 PCIE_RAS_DES_EVENT_COUNTER_CONTROL,
678 EVENT_COUNTER_ALL_CLEAR);
679
680 /* Re-enable counting */
681 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
682 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
683 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap +
684 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
685
686 return 0;
687 }
688
init_host_aspm(struct tegra_pcie_dw * pcie)689 static void init_host_aspm(struct tegra_pcie_dw *pcie)
690 {
691 struct dw_pcie *pci = &pcie->pci;
692 u32 val;
693
694 val = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS);
695 pcie->cfg_link_cap_l1sub = val + PCI_L1SS_CAP;
696
697 pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci,
698 PCI_EXT_CAP_ID_VNDR);
699
700 /* Enable ASPM counters */
701 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT;
702 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT;
703 dw_pcie_writel_dbi(pci, pcie->ras_des_cap +
704 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val);
705
706 /* Program T_cmrt and T_pwr_on values */
707 val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub);
708 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE);
709 val |= (pcie->aspm_cmrt << 8);
710 val |= (pcie->aspm_pwr_on_t << 19);
711 dw_pcie_writel_dbi(pci, pcie->cfg_link_cap_l1sub, val);
712
713 /* Program L0s and L1 entrance latencies */
714 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR);
715 val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK;
716 val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT);
717 val |= PORT_AFR_ENTER_ASPM;
718 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
719 }
720
init_debugfs(struct tegra_pcie_dw * pcie)721 static void init_debugfs(struct tegra_pcie_dw *pcie)
722 {
723 debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
724 aspm_state_cnt);
725 }
726 #else
disable_aspm_l12(struct tegra_pcie_dw * pcie)727 static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
disable_aspm_l11(struct tegra_pcie_dw * pcie)728 static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
init_host_aspm(struct tegra_pcie_dw * pcie)729 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
init_debugfs(struct tegra_pcie_dw * pcie)730 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
731 #endif
732
tegra_pcie_enable_system_interrupts(struct dw_pcie_rp * pp)733 static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp)
734 {
735 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
736 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
737 u32 val;
738 u16 val_w;
739
740 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
741 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
742 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
743
744 if (!pcie->of_data->has_sbr_reset_fix) {
745 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
746 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN;
747 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
748 }
749
750 if (pcie->enable_cdm_check) {
751 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
752 val |= pcie->of_data->cdm_chk_int_en_bit;
753 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
754
755 val = appl_readl(pcie, APPL_INTR_EN_L1_18);
756 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR;
757 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR;
758 appl_writel(pcie, val, APPL_INTR_EN_L1_18);
759 }
760
761 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
762 PCI_EXP_LNKSTA);
763 pcie->init_link_width = (val_w & PCI_EXP_LNKSTA_NLW) >>
764 PCI_EXP_LNKSTA_NLW_SHIFT;
765
766 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base +
767 PCI_EXP_LNKCTL);
768 val_w |= PCI_EXP_LNKCTL_LBMIE;
769 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL,
770 val_w);
771 }
772
tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp * pp)773 static void tegra_pcie_enable_legacy_interrupts(struct dw_pcie_rp *pp)
774 {
775 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
776 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
777 u32 val;
778
779 /* Enable legacy interrupt generation */
780 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
781 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
782 val |= APPL_INTR_EN_L0_0_INT_INT_EN;
783 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
784
785 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0);
786 val |= APPL_INTR_EN_L1_8_INTX_EN;
787 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN;
788 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN;
789 if (IS_ENABLED(CONFIG_PCIEAER))
790 val |= APPL_INTR_EN_L1_8_AER_INT_EN;
791 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0);
792 }
793
tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp * pp)794 static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp)
795 {
796 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
797 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
798 u32 val;
799
800 /* Enable MSI interrupt generation */
801 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
802 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN;
803 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN;
804 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
805 }
806
tegra_pcie_enable_interrupts(struct dw_pcie_rp * pp)807 static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp)
808 {
809 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
810 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
811
812 /* Clear interrupt statuses before enabling interrupts */
813 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
814 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
815 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
816 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
817 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
818 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
819 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
820 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
821 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
822 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
823 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
824 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
825 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
826 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
827 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
828
829 tegra_pcie_enable_system_interrupts(pp);
830 tegra_pcie_enable_legacy_interrupts(pp);
831 if (IS_ENABLED(CONFIG_PCI_MSI))
832 tegra_pcie_enable_msi_interrupts(pp);
833 }
834
config_gen3_gen4_eq_presets(struct tegra_pcie_dw * pcie)835 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
836 {
837 struct dw_pcie *pci = &pcie->pci;
838 u32 val, offset, i;
839
840 /* Program init preset */
841 for (i = 0; i < pcie->num_lanes; i++) {
842 val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2));
843 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK;
844 val |= GEN3_GEN4_EQ_PRESET_INIT;
845 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK;
846 val |= (GEN3_GEN4_EQ_PRESET_INIT <<
847 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT);
848 dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val);
849
850 offset = dw_pcie_find_ext_capability(pci,
851 PCI_EXT_CAP_ID_PL_16GT) +
852 PCI_PL_16GT_LE_CTRL;
853 val = dw_pcie_readb_dbi(pci, offset + i);
854 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK;
855 val |= GEN3_GEN4_EQ_PRESET_INIT;
856 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK;
857 val |= (GEN3_GEN4_EQ_PRESET_INIT <<
858 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT);
859 dw_pcie_writeb_dbi(pci, offset + i, val);
860 }
861
862 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
863 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
864 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
865
866 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
867 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
868 val |= (0x3ff << GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
869 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
870 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
871
872 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
873 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
874 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT);
875 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
876
877 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF);
878 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_MASK;
879 val |= (pcie->of_data->gen4_preset_vec <<
880 GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC_SHIFT);
881 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE_MASK;
882 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val);
883
884 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
885 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK;
886 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
887 }
888
tegra_pcie_dw_host_init(struct dw_pcie_rp * pp)889 static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
890 {
891 struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
892 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
893 u32 val;
894 u16 val_16;
895
896 pp->bridge->ops = &tegra_pci_ops;
897
898 if (!pcie->pcie_cap_base)
899 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
900 PCI_CAP_ID_EXP);
901
902 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE);
903 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8);
904 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val);
905
906 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE);
907 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE;
908 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE;
909 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val);
910
911 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
912
913 /* Enable as 0xFFFF0001 response for CRS */
914 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT);
915 val &= ~(AMBA_ERROR_RESPONSE_CRS_MASK << AMBA_ERROR_RESPONSE_CRS_SHIFT);
916 val |= (AMBA_ERROR_RESPONSE_CRS_OKAY_FFFF0001 <<
917 AMBA_ERROR_RESPONSE_CRS_SHIFT);
918 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val);
919
920 /* Configure Max lane width from DT */
921 val = dw_pcie_readl_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP);
922 val &= ~PCI_EXP_LNKCAP_MLW;
923 val |= (pcie->num_lanes << PCI_EXP_LNKSTA_NLW_SHIFT);
924 dw_pcie_writel_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKCAP, val);
925
926 /* Clear Slot Clock Configuration bit if SRNS configuration */
927 if (pcie->enable_srns) {
928 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
929 PCI_EXP_LNKSTA);
930 val_16 &= ~PCI_EXP_LNKSTA_SLC;
931 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
932 val_16);
933 }
934
935 config_gen3_gen4_eq_presets(pcie);
936
937 init_host_aspm(pcie);
938
939 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
940 if (!pcie->supports_clkreq) {
941 disable_aspm_l11(pcie);
942 disable_aspm_l12(pcie);
943 }
944
945 if (!pcie->of_data->has_l1ss_exit_fix) {
946 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
947 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
948 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
949 }
950
951 if (pcie->update_fc_fixup) {
952 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
953 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
954 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
955 }
956
957 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
958
959 return 0;
960 }
961
tegra_pcie_dw_start_link(struct dw_pcie * pci)962 static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
963 {
964 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
965 struct dw_pcie_rp *pp = &pci->pp;
966 u32 val, offset, tmp;
967 bool retry = true;
968
969 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
970 enable_irq(pcie->pex_rst_irq);
971 return 0;
972 }
973
974 retry_link:
975 /* Assert RST */
976 val = appl_readl(pcie, APPL_PINMUX);
977 val &= ~APPL_PINMUX_PEX_RST;
978 appl_writel(pcie, val, APPL_PINMUX);
979
980 usleep_range(100, 200);
981
982 /* Enable LTSSM */
983 val = appl_readl(pcie, APPL_CTRL);
984 val |= APPL_CTRL_LTSSM_EN;
985 appl_writel(pcie, val, APPL_CTRL);
986
987 /* De-assert RST */
988 val = appl_readl(pcie, APPL_PINMUX);
989 val |= APPL_PINMUX_PEX_RST;
990 appl_writel(pcie, val, APPL_PINMUX);
991
992 msleep(100);
993
994 if (dw_pcie_wait_for_link(pci)) {
995 if (!retry)
996 return 0;
997 /*
998 * There are some endpoints which can't get the link up if
999 * root port has Data Link Feature (DLF) enabled.
1000 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info
1001 * on Scaled Flow Control and DLF.
1002 * So, need to confirm that is indeed the case here and attempt
1003 * link up once again with DLF disabled.
1004 */
1005 val = appl_readl(pcie, APPL_DEBUG);
1006 val &= APPL_DEBUG_LTSSM_STATE_MASK;
1007 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT;
1008 tmp = appl_readl(pcie, APPL_LINK_STATUS);
1009 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP;
1010 if (!(val == 0x11 && !tmp)) {
1011 /* Link is down for all good reasons */
1012 return 0;
1013 }
1014
1015 dev_info(pci->dev, "Link is down in DLL");
1016 dev_info(pci->dev, "Trying again with DLFE disabled\n");
1017 /* Disable LTSSM */
1018 val = appl_readl(pcie, APPL_CTRL);
1019 val &= ~APPL_CTRL_LTSSM_EN;
1020 appl_writel(pcie, val, APPL_CTRL);
1021
1022 reset_control_assert(pcie->core_rst);
1023 reset_control_deassert(pcie->core_rst);
1024
1025 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF);
1026 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP);
1027 val &= ~PCI_DLF_EXCHANGE_ENABLE;
1028 dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val);
1029
1030 tegra_pcie_dw_host_init(pp);
1031 dw_pcie_setup_rc(pp);
1032
1033 retry = false;
1034 goto retry_link;
1035 }
1036
1037 tegra_pcie_icc_set(pcie);
1038
1039 tegra_pcie_enable_interrupts(pp);
1040
1041 return 0;
1042 }
1043
tegra_pcie_dw_link_up(struct dw_pcie * pci)1044 static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
1045 {
1046 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1047 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
1048
1049 return !!(val & PCI_EXP_LNKSTA_DLLLA);
1050 }
1051
tegra_pcie_dw_stop_link(struct dw_pcie * pci)1052 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
1053 {
1054 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1055
1056 disable_irq(pcie->pex_rst_irq);
1057 }
1058
1059 static const struct dw_pcie_ops tegra_dw_pcie_ops = {
1060 .link_up = tegra_pcie_dw_link_up,
1061 .start_link = tegra_pcie_dw_start_link,
1062 .stop_link = tegra_pcie_dw_stop_link,
1063 };
1064
1065 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
1066 .host_init = tegra_pcie_dw_host_init,
1067 };
1068
tegra_pcie_disable_phy(struct tegra_pcie_dw * pcie)1069 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
1070 {
1071 unsigned int phy_count = pcie->phy_count;
1072
1073 while (phy_count--) {
1074 phy_power_off(pcie->phys[phy_count]);
1075 phy_exit(pcie->phys[phy_count]);
1076 }
1077 }
1078
tegra_pcie_enable_phy(struct tegra_pcie_dw * pcie)1079 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
1080 {
1081 unsigned int i;
1082 int ret;
1083
1084 for (i = 0; i < pcie->phy_count; i++) {
1085 ret = phy_init(pcie->phys[i]);
1086 if (ret < 0)
1087 goto phy_power_off;
1088
1089 ret = phy_power_on(pcie->phys[i]);
1090 if (ret < 0)
1091 goto phy_exit;
1092 }
1093
1094 return 0;
1095
1096 phy_power_off:
1097 while (i--) {
1098 phy_power_off(pcie->phys[i]);
1099 phy_exit:
1100 phy_exit(pcie->phys[i]);
1101 }
1102
1103 return ret;
1104 }
1105
tegra_pcie_dw_parse_dt(struct tegra_pcie_dw * pcie)1106 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
1107 {
1108 struct platform_device *pdev = to_platform_device(pcie->dev);
1109 struct device_node *np = pcie->dev->of_node;
1110 int ret;
1111
1112 pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
1113 if (!pcie->dbi_res) {
1114 dev_err(pcie->dev, "Failed to find \"dbi\" region\n");
1115 return -ENODEV;
1116 }
1117
1118 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt);
1119 if (ret < 0) {
1120 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret);
1121 return ret;
1122 }
1123
1124 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us",
1125 &pcie->aspm_pwr_on_t);
1126 if (ret < 0)
1127 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n",
1128 ret);
1129
1130 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us",
1131 &pcie->aspm_l0s_enter_lat);
1132 if (ret < 0)
1133 dev_info(pcie->dev,
1134 "Failed to read ASPM L0s Entrance latency: %d\n", ret);
1135
1136 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes);
1137 if (ret < 0) {
1138 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret);
1139 return ret;
1140 }
1141
1142 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid);
1143 if (ret) {
1144 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret);
1145 return ret;
1146 }
1147
1148 ret = of_property_count_strings(np, "phy-names");
1149 if (ret < 0) {
1150 dev_err(pcie->dev, "Failed to find PHY entries: %d\n",
1151 ret);
1152 return ret;
1153 }
1154 pcie->phy_count = ret;
1155
1156 if (of_property_read_bool(np, "nvidia,update-fc-fixup"))
1157 pcie->update_fc_fixup = true;
1158
1159 /* RP using an external REFCLK is supported only in Tegra234 */
1160 if (pcie->of_data->version == TEGRA194_DWC_IP_VER) {
1161 if (pcie->of_data->mode == DW_PCIE_EP_TYPE)
1162 pcie->enable_ext_refclk = true;
1163 } else {
1164 pcie->enable_ext_refclk =
1165 of_property_read_bool(pcie->dev->of_node,
1166 "nvidia,enable-ext-refclk");
1167 }
1168
1169 pcie->supports_clkreq =
1170 of_property_read_bool(pcie->dev->of_node, "supports-clkreq");
1171
1172 pcie->enable_cdm_check =
1173 of_property_read_bool(np, "snps,enable-cdm-check");
1174
1175 if (pcie->of_data->version == TEGRA234_DWC_IP_VER)
1176 pcie->enable_srns =
1177 of_property_read_bool(np, "nvidia,enable-srns");
1178
1179 if (pcie->of_data->mode == DW_PCIE_RC_TYPE)
1180 return 0;
1181
1182 /* Endpoint mode specific DT entries */
1183 pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN);
1184 if (IS_ERR(pcie->pex_rst_gpiod)) {
1185 int err = PTR_ERR(pcie->pex_rst_gpiod);
1186 const char *level = KERN_ERR;
1187
1188 if (err == -EPROBE_DEFER)
1189 level = KERN_DEBUG;
1190
1191 dev_printk(level, pcie->dev,
1192 dev_fmt("Failed to get PERST GPIO: %d\n"),
1193 err);
1194 return err;
1195 }
1196
1197 pcie->pex_refclk_sel_gpiod = devm_gpiod_get(pcie->dev,
1198 "nvidia,refclk-select",
1199 GPIOD_OUT_HIGH);
1200 if (IS_ERR(pcie->pex_refclk_sel_gpiod)) {
1201 int err = PTR_ERR(pcie->pex_refclk_sel_gpiod);
1202 const char *level = KERN_ERR;
1203
1204 if (err == -EPROBE_DEFER)
1205 level = KERN_DEBUG;
1206
1207 dev_printk(level, pcie->dev,
1208 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"),
1209 err);
1210 pcie->pex_refclk_sel_gpiod = NULL;
1211 }
1212
1213 return 0;
1214 }
1215
tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw * pcie,bool enable)1216 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
1217 bool enable)
1218 {
1219 struct mrq_uphy_response resp;
1220 struct tegra_bpmp_message msg;
1221 struct mrq_uphy_request req;
1222
1223 /*
1224 * Controller-5 doesn't need to have its state set by BPMP-FW in
1225 * Tegra194
1226 */
1227 if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5)
1228 return 0;
1229
1230 memset(&req, 0, sizeof(req));
1231 memset(&resp, 0, sizeof(resp));
1232
1233 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE;
1234 req.controller_state.pcie_controller = pcie->cid;
1235 req.controller_state.enable = enable;
1236
1237 memset(&msg, 0, sizeof(msg));
1238 msg.mrq = MRQ_UPHY;
1239 msg.tx.data = &req;
1240 msg.tx.size = sizeof(req);
1241 msg.rx.data = &resp;
1242 msg.rx.size = sizeof(resp);
1243
1244 return tegra_bpmp_transfer(pcie->bpmp, &msg);
1245 }
1246
tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw * pcie,bool enable)1247 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
1248 bool enable)
1249 {
1250 struct mrq_uphy_response resp;
1251 struct tegra_bpmp_message msg;
1252 struct mrq_uphy_request req;
1253
1254 memset(&req, 0, sizeof(req));
1255 memset(&resp, 0, sizeof(resp));
1256
1257 if (enable) {
1258 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT;
1259 req.ep_ctrlr_pll_init.ep_controller = pcie->cid;
1260 } else {
1261 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF;
1262 req.ep_ctrlr_pll_off.ep_controller = pcie->cid;
1263 }
1264
1265 memset(&msg, 0, sizeof(msg));
1266 msg.mrq = MRQ_UPHY;
1267 msg.tx.data = &req;
1268 msg.tx.size = sizeof(req);
1269 msg.rx.data = &resp;
1270 msg.rx.size = sizeof(resp);
1271
1272 return tegra_bpmp_transfer(pcie->bpmp, &msg);
1273 }
1274
tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw * pcie)1275 static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
1276 {
1277 struct dw_pcie_rp *pp = &pcie->pci.pp;
1278 struct pci_bus *child, *root_bus = NULL;
1279 struct pci_dev *pdev;
1280
1281 /*
1282 * link doesn't go into L2 state with some of the endpoints with Tegra
1283 * if they are not in D0 state. So, need to make sure that immediate
1284 * downstream devices are in D0 state before sending PME_TurnOff to put
1285 * link into L2 state.
1286 * This is as per PCI Express Base r4.0 v1.0 September 27-2017,
1287 * 5.2 Link State Power Management (Page #428).
1288 */
1289
1290 list_for_each_entry(child, &pp->bridge->bus->children, node) {
1291 /* Bring downstream devices to D0 if they are not already in */
1292 if (child->parent == pp->bridge->bus) {
1293 root_bus = child;
1294 break;
1295 }
1296 }
1297
1298 if (!root_bus) {
1299 dev_err(pcie->dev, "Failed to find downstream devices\n");
1300 return;
1301 }
1302
1303 list_for_each_entry(pdev, &root_bus->devices, bus_list) {
1304 if (PCI_SLOT(pdev->devfn) == 0) {
1305 if (pci_set_power_state(pdev, PCI_D0))
1306 dev_err(pcie->dev,
1307 "Failed to transition %s to D0 state\n",
1308 dev_name(&pdev->dev));
1309 }
1310 }
1311 }
1312
tegra_pcie_get_slot_regulators(struct tegra_pcie_dw * pcie)1313 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
1314 {
1315 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
1316 if (IS_ERR(pcie->slot_ctl_3v3)) {
1317 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV)
1318 return PTR_ERR(pcie->slot_ctl_3v3);
1319
1320 pcie->slot_ctl_3v3 = NULL;
1321 }
1322
1323 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v");
1324 if (IS_ERR(pcie->slot_ctl_12v)) {
1325 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV)
1326 return PTR_ERR(pcie->slot_ctl_12v);
1327
1328 pcie->slot_ctl_12v = NULL;
1329 }
1330
1331 return 0;
1332 }
1333
tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw * pcie)1334 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
1335 {
1336 int ret;
1337
1338 if (pcie->slot_ctl_3v3) {
1339 ret = regulator_enable(pcie->slot_ctl_3v3);
1340 if (ret < 0) {
1341 dev_err(pcie->dev,
1342 "Failed to enable 3.3V slot supply: %d\n", ret);
1343 return ret;
1344 }
1345 }
1346
1347 if (pcie->slot_ctl_12v) {
1348 ret = regulator_enable(pcie->slot_ctl_12v);
1349 if (ret < 0) {
1350 dev_err(pcie->dev,
1351 "Failed to enable 12V slot supply: %d\n", ret);
1352 goto fail_12v_enable;
1353 }
1354 }
1355
1356 /*
1357 * According to PCI Express Card Electromechanical Specification
1358 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive)
1359 * should be a minimum of 100ms.
1360 */
1361 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v)
1362 msleep(100);
1363
1364 return 0;
1365
1366 fail_12v_enable:
1367 if (pcie->slot_ctl_3v3)
1368 regulator_disable(pcie->slot_ctl_3v3);
1369 return ret;
1370 }
1371
tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw * pcie)1372 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
1373 {
1374 if (pcie->slot_ctl_12v)
1375 regulator_disable(pcie->slot_ctl_12v);
1376 if (pcie->slot_ctl_3v3)
1377 regulator_disable(pcie->slot_ctl_3v3);
1378 }
1379
tegra_pcie_config_controller(struct tegra_pcie_dw * pcie,bool en_hw_hot_rst)1380 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
1381 bool en_hw_hot_rst)
1382 {
1383 int ret;
1384 u32 val;
1385
1386 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1387 if (ret) {
1388 dev_err(pcie->dev,
1389 "Failed to enable controller %u: %d\n", pcie->cid, ret);
1390 return ret;
1391 }
1392
1393 if (pcie->enable_ext_refclk) {
1394 ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1395 if (ret) {
1396 dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret);
1397 goto fail_pll_init;
1398 }
1399 }
1400
1401 ret = tegra_pcie_enable_slot_regulators(pcie);
1402 if (ret < 0)
1403 goto fail_slot_reg_en;
1404
1405 ret = regulator_enable(pcie->pex_ctl_supply);
1406 if (ret < 0) {
1407 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret);
1408 goto fail_reg_en;
1409 }
1410
1411 ret = clk_prepare_enable(pcie->core_clk);
1412 if (ret) {
1413 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret);
1414 goto fail_core_clk;
1415 }
1416
1417 ret = reset_control_deassert(pcie->core_apb_rst);
1418 if (ret) {
1419 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n",
1420 ret);
1421 goto fail_core_apb_rst;
1422 }
1423
1424 if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) {
1425 /* Enable HW_HOT_RST mode */
1426 val = appl_readl(pcie, APPL_CTRL);
1427 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
1428 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1429 val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN <<
1430 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
1431 val |= APPL_CTRL_HW_HOT_RST_EN;
1432 appl_writel(pcie, val, APPL_CTRL);
1433 }
1434
1435 ret = tegra_pcie_enable_phy(pcie);
1436 if (ret) {
1437 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret);
1438 goto fail_phy;
1439 }
1440
1441 /* Update CFG base address */
1442 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1443 APPL_CFG_BASE_ADDR);
1444
1445 /* Configure this core for RP mode operation */
1446 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE);
1447
1448 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1449
1450 val = appl_readl(pcie, APPL_CTRL);
1451 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL);
1452
1453 val = appl_readl(pcie, APPL_CFG_MISC);
1454 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1455 appl_writel(pcie, val, APPL_CFG_MISC);
1456
1457 if (pcie->enable_srns || pcie->enable_ext_refclk) {
1458 /*
1459 * When Tegra PCIe RP is using external clock, it cannot supply
1460 * same clock to its downstream hierarchy. Hence, gate PCIe RP
1461 * REFCLK out pads when RP & EP are using separate clocks or RP
1462 * is using an external REFCLK.
1463 */
1464 val = appl_readl(pcie, APPL_PINMUX);
1465 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1466 val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1467 appl_writel(pcie, val, APPL_PINMUX);
1468 }
1469
1470 if (!pcie->supports_clkreq) {
1471 val = appl_readl(pcie, APPL_PINMUX);
1472 val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN;
1473 val &= ~APPL_PINMUX_CLKREQ_OVERRIDE;
1474 appl_writel(pcie, val, APPL_PINMUX);
1475 }
1476
1477 /* Update iATU_DMA base address */
1478 appl_writel(pcie,
1479 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1480 APPL_CFG_IATU_DMA_BASE_ADDR);
1481
1482 reset_control_deassert(pcie->core_rst);
1483
1484 return ret;
1485
1486 fail_phy:
1487 reset_control_assert(pcie->core_apb_rst);
1488 fail_core_apb_rst:
1489 clk_disable_unprepare(pcie->core_clk);
1490 fail_core_clk:
1491 regulator_disable(pcie->pex_ctl_supply);
1492 fail_reg_en:
1493 tegra_pcie_disable_slot_regulators(pcie);
1494 fail_slot_reg_en:
1495 if (pcie->enable_ext_refclk)
1496 tegra_pcie_bpmp_set_pll_state(pcie, false);
1497 fail_pll_init:
1498 tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1499
1500 return ret;
1501 }
1502
tegra_pcie_unconfig_controller(struct tegra_pcie_dw * pcie)1503 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
1504 {
1505 int ret;
1506
1507 ret = reset_control_assert(pcie->core_rst);
1508 if (ret)
1509 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret);
1510
1511 tegra_pcie_disable_phy(pcie);
1512
1513 ret = reset_control_assert(pcie->core_apb_rst);
1514 if (ret)
1515 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret);
1516
1517 clk_disable_unprepare(pcie->core_clk);
1518
1519 ret = regulator_disable(pcie->pex_ctl_supply);
1520 if (ret)
1521 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret);
1522
1523 tegra_pcie_disable_slot_regulators(pcie);
1524
1525 if (pcie->enable_ext_refclk) {
1526 ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1527 if (ret)
1528 dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret);
1529 }
1530
1531 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1532 if (ret)
1533 dev_err(pcie->dev, "Failed to disable controller %d: %d\n",
1534 pcie->cid, ret);
1535 }
1536
tegra_pcie_init_controller(struct tegra_pcie_dw * pcie)1537 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
1538 {
1539 struct dw_pcie *pci = &pcie->pci;
1540 struct dw_pcie_rp *pp = &pci->pp;
1541 int ret;
1542
1543 ret = tegra_pcie_config_controller(pcie, false);
1544 if (ret < 0)
1545 return ret;
1546
1547 pp->ops = &tegra_pcie_dw_host_ops;
1548
1549 ret = dw_pcie_host_init(pp);
1550 if (ret < 0) {
1551 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret);
1552 goto fail_host_init;
1553 }
1554
1555 return 0;
1556
1557 fail_host_init:
1558 tegra_pcie_unconfig_controller(pcie);
1559 return ret;
1560 }
1561
tegra_pcie_try_link_l2(struct tegra_pcie_dw * pcie)1562 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1563 {
1564 u32 val;
1565
1566 if (!tegra_pcie_dw_link_up(&pcie->pci))
1567 return 0;
1568
1569 val = appl_readl(pcie, APPL_RADM_STATUS);
1570 val |= APPL_PM_XMT_TURNOFF_STATE;
1571 appl_writel(pcie, val, APPL_RADM_STATUS);
1572
1573 return readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG, val,
1574 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT,
1575 1, PME_ACK_TIMEOUT);
1576 }
1577
tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw * pcie)1578 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
1579 {
1580 u32 data;
1581 int err;
1582
1583 if (!tegra_pcie_dw_link_up(&pcie->pci)) {
1584 dev_dbg(pcie->dev, "PCIe link is not up...!\n");
1585 return;
1586 }
1587
1588 /*
1589 * PCIe controller exits from L2 only if reset is applied, so
1590 * controller doesn't handle interrupts. But in cases where
1591 * L2 entry fails, PERST# is asserted which can trigger surprise
1592 * link down AER. However this function call happens in
1593 * suspend_noirq(), so AER interrupt will not be processed.
1594 * Disable all interrupts to avoid such a scenario.
1595 */
1596 appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0);
1597
1598 if (tegra_pcie_try_link_l2(pcie)) {
1599 dev_info(pcie->dev, "Link didn't transition to L2 state\n");
1600 /*
1601 * TX lane clock freq will reset to Gen1 only if link is in L2
1602 * or detect state.
1603 * So apply pex_rst to end point to force RP to go into detect
1604 * state
1605 */
1606 data = appl_readl(pcie, APPL_PINMUX);
1607 data &= ~APPL_PINMUX_PEX_RST;
1608 appl_writel(pcie, data, APPL_PINMUX);
1609
1610 /*
1611 * Some cards do not go to detect state even after de-asserting
1612 * PERST#. So, de-assert LTSSM to bring link to detect state.
1613 */
1614 data = readl(pcie->appl_base + APPL_CTRL);
1615 data &= ~APPL_CTRL_LTSSM_EN;
1616 writel(data, pcie->appl_base + APPL_CTRL);
1617
1618 err = readl_poll_timeout_atomic(pcie->appl_base + APPL_DEBUG,
1619 data,
1620 ((data &
1621 APPL_DEBUG_LTSSM_STATE_MASK) >>
1622 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1623 LTSSM_STATE_PRE_DETECT,
1624 1, LTSSM_TIMEOUT);
1625 if (err)
1626 dev_info(pcie->dev, "Link didn't go to detect state\n");
1627 }
1628 /*
1629 * DBI registers may not be accessible after this as PLL-E would be
1630 * down depending on how CLKREQ is pulled by end point
1631 */
1632 data = appl_readl(pcie, APPL_PINMUX);
1633 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE);
1634 /* Cut REFCLK to slot */
1635 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1636 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1637 appl_writel(pcie, data, APPL_PINMUX);
1638 }
1639
tegra_pcie_deinit_controller(struct tegra_pcie_dw * pcie)1640 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
1641 {
1642 tegra_pcie_downstream_dev_to_D0(pcie);
1643 dw_pcie_host_deinit(&pcie->pci.pp);
1644 tegra_pcie_dw_pme_turnoff(pcie);
1645 tegra_pcie_unconfig_controller(pcie);
1646 }
1647
tegra_pcie_config_rp(struct tegra_pcie_dw * pcie)1648 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
1649 {
1650 struct device *dev = pcie->dev;
1651 char *name;
1652 int ret;
1653
1654 pm_runtime_enable(dev);
1655
1656 ret = pm_runtime_get_sync(dev);
1657 if (ret < 0) {
1658 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1659 ret);
1660 goto fail_pm_get_sync;
1661 }
1662
1663 ret = pinctrl_pm_select_default_state(dev);
1664 if (ret < 0) {
1665 dev_err(dev, "Failed to configure sideband pins: %d\n", ret);
1666 goto fail_pm_get_sync;
1667 }
1668
1669 ret = tegra_pcie_init_controller(pcie);
1670 if (ret < 0) {
1671 dev_err(dev, "Failed to initialize controller: %d\n", ret);
1672 goto fail_pm_get_sync;
1673 }
1674
1675 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
1676 if (!pcie->link_state) {
1677 ret = -ENOMEDIUM;
1678 goto fail_host_init;
1679 }
1680
1681 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node);
1682 if (!name) {
1683 ret = -ENOMEM;
1684 goto fail_host_init;
1685 }
1686
1687 pcie->debugfs = debugfs_create_dir(name, NULL);
1688 init_debugfs(pcie);
1689
1690 return ret;
1691
1692 fail_host_init:
1693 tegra_pcie_deinit_controller(pcie);
1694 fail_pm_get_sync:
1695 pm_runtime_put_sync(dev);
1696 pm_runtime_disable(dev);
1697 return ret;
1698 }
1699
pex_ep_event_pex_rst_assert(struct tegra_pcie_dw * pcie)1700 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
1701 {
1702 u32 val;
1703 int ret;
1704
1705 if (pcie->ep_state == EP_STATE_DISABLED)
1706 return;
1707
1708 /* Disable LTSSM */
1709 val = appl_readl(pcie, APPL_CTRL);
1710 val &= ~APPL_CTRL_LTSSM_EN;
1711 appl_writel(pcie, val, APPL_CTRL);
1712
1713 ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val,
1714 ((val & APPL_DEBUG_LTSSM_STATE_MASK) >>
1715 APPL_DEBUG_LTSSM_STATE_SHIFT) ==
1716 LTSSM_STATE_PRE_DETECT,
1717 1, LTSSM_TIMEOUT);
1718 if (ret)
1719 dev_err(pcie->dev, "Failed to go Detect state: %d\n", ret);
1720
1721 reset_control_assert(pcie->core_rst);
1722
1723 tegra_pcie_disable_phy(pcie);
1724
1725 reset_control_assert(pcie->core_apb_rst);
1726
1727 clk_disable_unprepare(pcie->core_clk);
1728
1729 pm_runtime_put_sync(pcie->dev);
1730
1731 if (pcie->enable_ext_refclk) {
1732 ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1733 if (ret)
1734 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n",
1735 ret);
1736 }
1737
1738 ret = tegra_pcie_bpmp_set_pll_state(pcie, false);
1739 if (ret)
1740 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", ret);
1741
1742 pcie->ep_state = EP_STATE_DISABLED;
1743 dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
1744 }
1745
pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw * pcie)1746 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
1747 {
1748 struct dw_pcie *pci = &pcie->pci;
1749 struct dw_pcie_ep *ep = &pci->ep;
1750 struct device *dev = pcie->dev;
1751 u32 val;
1752 int ret;
1753 u16 val_16;
1754
1755 if (pcie->ep_state == EP_STATE_ENABLED)
1756 return;
1757
1758 ret = pm_runtime_resume_and_get(dev);
1759 if (ret < 0) {
1760 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n",
1761 ret);
1762 return;
1763 }
1764
1765 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true);
1766 if (ret) {
1767 dev_err(pcie->dev, "Failed to enable controller %u: %d\n",
1768 pcie->cid, ret);
1769 goto fail_set_ctrl_state;
1770 }
1771
1772 if (pcie->enable_ext_refclk) {
1773 ret = tegra_pcie_bpmp_set_pll_state(pcie, true);
1774 if (ret) {
1775 dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n",
1776 ret);
1777 goto fail_pll_init;
1778 }
1779 }
1780
1781 ret = clk_prepare_enable(pcie->core_clk);
1782 if (ret) {
1783 dev_err(dev, "Failed to enable core clock: %d\n", ret);
1784 goto fail_core_clk_enable;
1785 }
1786
1787 ret = reset_control_deassert(pcie->core_apb_rst);
1788 if (ret) {
1789 dev_err(dev, "Failed to deassert core APB reset: %d\n", ret);
1790 goto fail_core_apb_rst;
1791 }
1792
1793 ret = tegra_pcie_enable_phy(pcie);
1794 if (ret) {
1795 dev_err(dev, "Failed to enable PHY: %d\n", ret);
1796 goto fail_phy;
1797 }
1798
1799 /* Clear any stale interrupt statuses */
1800 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
1801 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0);
1802 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1);
1803 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2);
1804 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3);
1805 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6);
1806 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7);
1807 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0);
1808 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9);
1809 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10);
1810 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11);
1811 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13);
1812 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14);
1813 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15);
1814 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17);
1815
1816 /* configure this core for EP mode operation */
1817 val = appl_readl(pcie, APPL_DM_TYPE);
1818 val &= ~APPL_DM_TYPE_MASK;
1819 val |= APPL_DM_TYPE_EP;
1820 appl_writel(pcie, val, APPL_DM_TYPE);
1821
1822 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE);
1823
1824 val = appl_readl(pcie, APPL_CTRL);
1825 val |= APPL_CTRL_SYS_PRE_DET_STATE;
1826 val |= APPL_CTRL_HW_HOT_RST_EN;
1827 appl_writel(pcie, val, APPL_CTRL);
1828
1829 val = appl_readl(pcie, APPL_CFG_MISC);
1830 val |= APPL_CFG_MISC_SLV_EP_MODE;
1831 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT);
1832 appl_writel(pcie, val, APPL_CFG_MISC);
1833
1834 val = appl_readl(pcie, APPL_PINMUX);
1835 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN;
1836 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE;
1837 appl_writel(pcie, val, APPL_PINMUX);
1838
1839 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK,
1840 APPL_CFG_BASE_ADDR);
1841
1842 appl_writel(pcie, pcie->atu_dma_res->start &
1843 APPL_CFG_IATU_DMA_BASE_ADDR_MASK,
1844 APPL_CFG_IATU_DMA_BASE_ADDR);
1845
1846 val = appl_readl(pcie, APPL_INTR_EN_L0_0);
1847 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN;
1848 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN;
1849 val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN;
1850 appl_writel(pcie, val, APPL_INTR_EN_L0_0);
1851
1852 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0);
1853 val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN;
1854 val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN;
1855 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0);
1856
1857 reset_control_deassert(pcie->core_rst);
1858
1859 if (pcie->update_fc_fixup) {
1860 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF);
1861 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT;
1862 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
1863 }
1864
1865 config_gen3_gen4_eq_presets(pcie);
1866
1867 init_host_aspm(pcie);
1868
1869 /* Disable ASPM-L1SS advertisement if there is no CLKREQ routing */
1870 if (!pcie->supports_clkreq) {
1871 disable_aspm_l11(pcie);
1872 disable_aspm_l12(pcie);
1873 }
1874
1875 if (!pcie->of_data->has_l1ss_exit_fix) {
1876 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF);
1877 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL;
1878 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
1879 }
1880
1881 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
1882 PCI_CAP_ID_EXP);
1883
1884 /* Clear Slot Clock Configuration bit if SRNS configuration */
1885 if (pcie->enable_srns) {
1886 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base +
1887 PCI_EXP_LNKSTA);
1888 val_16 &= ~PCI_EXP_LNKSTA_SLC;
1889 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA,
1890 val_16);
1891 }
1892
1893 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
1894
1895 val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK);
1896 val |= MSIX_ADDR_MATCH_LOW_OFF_EN;
1897 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val);
1898 val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK);
1899 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val);
1900
1901 ret = dw_pcie_ep_init_complete(ep);
1902 if (ret) {
1903 dev_err(dev, "Failed to complete initialization: %d\n", ret);
1904 goto fail_init_complete;
1905 }
1906
1907 dw_pcie_ep_init_notify(ep);
1908
1909 /* Program the private control to allow sending LTR upstream */
1910 if (pcie->of_data->has_ltr_req_fix) {
1911 val = appl_readl(pcie, APPL_LTR_MSG_2);
1912 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE;
1913 appl_writel(pcie, val, APPL_LTR_MSG_2);
1914 }
1915
1916 /* Enable LTSSM */
1917 val = appl_readl(pcie, APPL_CTRL);
1918 val |= APPL_CTRL_LTSSM_EN;
1919 appl_writel(pcie, val, APPL_CTRL);
1920
1921 pcie->ep_state = EP_STATE_ENABLED;
1922 dev_dbg(dev, "Initialization of endpoint is completed\n");
1923
1924 return;
1925
1926 fail_init_complete:
1927 reset_control_assert(pcie->core_rst);
1928 tegra_pcie_disable_phy(pcie);
1929 fail_phy:
1930 reset_control_assert(pcie->core_apb_rst);
1931 fail_core_apb_rst:
1932 clk_disable_unprepare(pcie->core_clk);
1933 fail_core_clk_enable:
1934 tegra_pcie_bpmp_set_pll_state(pcie, false);
1935 fail_pll_init:
1936 tegra_pcie_bpmp_set_ctrl_state(pcie, false);
1937 fail_set_ctrl_state:
1938 pm_runtime_put_sync(dev);
1939 }
1940
tegra_pcie_ep_pex_rst_irq(int irq,void * arg)1941 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
1942 {
1943 struct tegra_pcie_dw *pcie = arg;
1944
1945 if (gpiod_get_value(pcie->pex_rst_gpiod))
1946 pex_ep_event_pex_rst_assert(pcie);
1947 else
1948 pex_ep_event_pex_rst_deassert(pcie);
1949
1950 return IRQ_HANDLED;
1951 }
1952
tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw * pcie,u16 irq)1953 static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
1954 {
1955 /* Tegra194 supports only INTA */
1956 if (irq > 1)
1957 return -EINVAL;
1958
1959 appl_writel(pcie, 1, APPL_LEGACY_INTX);
1960 usleep_range(1000, 2000);
1961 appl_writel(pcie, 0, APPL_LEGACY_INTX);
1962 return 0;
1963 }
1964
tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw * pcie,u16 irq)1965 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
1966 {
1967 if (unlikely(irq > 31))
1968 return -EINVAL;
1969
1970 appl_writel(pcie, BIT(irq), APPL_MSI_CTRL_1);
1971
1972 return 0;
1973 }
1974
tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw * pcie,u16 irq)1975 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
1976 {
1977 struct dw_pcie_ep *ep = &pcie->pci.ep;
1978
1979 writel(irq, ep->msi_mem);
1980
1981 return 0;
1982 }
1983
tegra_pcie_ep_raise_irq(struct dw_pcie_ep * ep,u8 func_no,enum pci_epc_irq_type type,u16 interrupt_num)1984 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
1985 enum pci_epc_irq_type type,
1986 u16 interrupt_num)
1987 {
1988 struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
1989 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
1990
1991 switch (type) {
1992 case PCI_EPC_IRQ_LEGACY:
1993 return tegra_pcie_ep_raise_legacy_irq(pcie, interrupt_num);
1994
1995 case PCI_EPC_IRQ_MSI:
1996 return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num);
1997
1998 case PCI_EPC_IRQ_MSIX:
1999 return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num);
2000
2001 default:
2002 dev_err(pci->dev, "Unknown IRQ type\n");
2003 return -EPERM;
2004 }
2005
2006 return 0;
2007 }
2008
2009 static const struct pci_epc_features tegra_pcie_epc_features = {
2010 .linkup_notifier = true,
2011 .core_init_notifier = true,
2012 .msi_capable = false,
2013 .msix_capable = false,
2014 .reserved_bar = 1 << BAR_2 | 1 << BAR_3 | 1 << BAR_4 | 1 << BAR_5,
2015 .bar_fixed_64bit = 1 << BAR_0,
2016 .bar_fixed_size[0] = SZ_1M,
2017 };
2018
2019 static const struct pci_epc_features*
tegra_pcie_ep_get_features(struct dw_pcie_ep * ep)2020 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep)
2021 {
2022 return &tegra_pcie_epc_features;
2023 }
2024
2025 static const struct dw_pcie_ep_ops pcie_ep_ops = {
2026 .raise_irq = tegra_pcie_ep_raise_irq,
2027 .get_features = tegra_pcie_ep_get_features,
2028 };
2029
tegra_pcie_config_ep(struct tegra_pcie_dw * pcie,struct platform_device * pdev)2030 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
2031 struct platform_device *pdev)
2032 {
2033 struct dw_pcie *pci = &pcie->pci;
2034 struct device *dev = pcie->dev;
2035 struct dw_pcie_ep *ep;
2036 char *name;
2037 int ret;
2038
2039 ep = &pci->ep;
2040 ep->ops = &pcie_ep_ops;
2041
2042 ep->page_size = SZ_64K;
2043
2044 ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME);
2045 if (ret < 0) {
2046 dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n",
2047 ret);
2048 return ret;
2049 }
2050
2051 ret = gpiod_to_irq(pcie->pex_rst_gpiod);
2052 if (ret < 0) {
2053 dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret);
2054 return ret;
2055 }
2056 pcie->pex_rst_irq = (unsigned int)ret;
2057
2058 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq",
2059 pcie->cid);
2060 if (!name) {
2061 dev_err(dev, "Failed to create PERST IRQ string\n");
2062 return -ENOMEM;
2063 }
2064
2065 irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN);
2066
2067 pcie->ep_state = EP_STATE_DISABLED;
2068
2069 ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL,
2070 tegra_pcie_ep_pex_rst_irq,
2071 IRQF_TRIGGER_RISING |
2072 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2073 name, (void *)pcie);
2074 if (ret < 0) {
2075 dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret);
2076 return ret;
2077 }
2078
2079 pm_runtime_enable(dev);
2080
2081 ret = dw_pcie_ep_init(ep);
2082 if (ret) {
2083 dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n",
2084 ret);
2085 pm_runtime_disable(dev);
2086 return ret;
2087 }
2088
2089 return 0;
2090 }
2091
tegra_pcie_dw_probe(struct platform_device * pdev)2092 static int tegra_pcie_dw_probe(struct platform_device *pdev)
2093 {
2094 const struct tegra_pcie_dw_of_data *data;
2095 struct device *dev = &pdev->dev;
2096 struct resource *atu_dma_res;
2097 struct tegra_pcie_dw *pcie;
2098 struct dw_pcie_rp *pp;
2099 struct dw_pcie *pci;
2100 struct phy **phys;
2101 char *name;
2102 int ret;
2103 u32 i;
2104
2105 data = of_device_get_match_data(dev);
2106
2107 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
2108 if (!pcie)
2109 return -ENOMEM;
2110
2111 pci = &pcie->pci;
2112 pci->dev = &pdev->dev;
2113 pci->ops = &tegra_dw_pcie_ops;
2114 pcie->dev = &pdev->dev;
2115 pcie->of_data = (struct tegra_pcie_dw_of_data *)data;
2116 pci->n_fts[0] = pcie->of_data->n_fts[0];
2117 pci->n_fts[1] = pcie->of_data->n_fts[1];
2118 pp = &pci->pp;
2119 pp->num_vectors = MAX_MSI_IRQS;
2120
2121 ret = tegra_pcie_dw_parse_dt(pcie);
2122 if (ret < 0) {
2123 const char *level = KERN_ERR;
2124
2125 if (ret == -EPROBE_DEFER)
2126 level = KERN_DEBUG;
2127
2128 dev_printk(level, dev,
2129 dev_fmt("Failed to parse device tree: %d\n"),
2130 ret);
2131 return ret;
2132 }
2133
2134 ret = tegra_pcie_get_slot_regulators(pcie);
2135 if (ret < 0) {
2136 const char *level = KERN_ERR;
2137
2138 if (ret == -EPROBE_DEFER)
2139 level = KERN_DEBUG;
2140
2141 dev_printk(level, dev,
2142 dev_fmt("Failed to get slot regulators: %d\n"),
2143 ret);
2144 return ret;
2145 }
2146
2147 if (pcie->pex_refclk_sel_gpiod)
2148 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1);
2149
2150 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl");
2151 if (IS_ERR(pcie->pex_ctl_supply)) {
2152 ret = PTR_ERR(pcie->pex_ctl_supply);
2153 if (ret != -EPROBE_DEFER)
2154 dev_err(dev, "Failed to get regulator: %ld\n",
2155 PTR_ERR(pcie->pex_ctl_supply));
2156 return ret;
2157 }
2158
2159 pcie->core_clk = devm_clk_get(dev, "core");
2160 if (IS_ERR(pcie->core_clk)) {
2161 dev_err(dev, "Failed to get core clock: %ld\n",
2162 PTR_ERR(pcie->core_clk));
2163 return PTR_ERR(pcie->core_clk);
2164 }
2165
2166 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2167 "appl");
2168 if (!pcie->appl_res) {
2169 dev_err(dev, "Failed to find \"appl\" region\n");
2170 return -ENODEV;
2171 }
2172
2173 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res);
2174 if (IS_ERR(pcie->appl_base))
2175 return PTR_ERR(pcie->appl_base);
2176
2177 pcie->core_apb_rst = devm_reset_control_get(dev, "apb");
2178 if (IS_ERR(pcie->core_apb_rst)) {
2179 dev_err(dev, "Failed to get APB reset: %ld\n",
2180 PTR_ERR(pcie->core_apb_rst));
2181 return PTR_ERR(pcie->core_apb_rst);
2182 }
2183
2184 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL);
2185 if (!phys)
2186 return -ENOMEM;
2187
2188 for (i = 0; i < pcie->phy_count; i++) {
2189 name = kasprintf(GFP_KERNEL, "p2u-%u", i);
2190 if (!name) {
2191 dev_err(dev, "Failed to create P2U string\n");
2192 return -ENOMEM;
2193 }
2194 phys[i] = devm_phy_get(dev, name);
2195 kfree(name);
2196 if (IS_ERR(phys[i])) {
2197 ret = PTR_ERR(phys[i]);
2198 if (ret != -EPROBE_DEFER)
2199 dev_err(dev, "Failed to get PHY: %d\n", ret);
2200 return ret;
2201 }
2202 }
2203
2204 pcie->phys = phys;
2205
2206 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2207 "atu_dma");
2208 if (!atu_dma_res) {
2209 dev_err(dev, "Failed to find \"atu_dma\" region\n");
2210 return -ENODEV;
2211 }
2212 pcie->atu_dma_res = atu_dma_res;
2213
2214 pci->atu_size = resource_size(atu_dma_res);
2215 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res);
2216 if (IS_ERR(pci->atu_base))
2217 return PTR_ERR(pci->atu_base);
2218
2219 pcie->core_rst = devm_reset_control_get(dev, "core");
2220 if (IS_ERR(pcie->core_rst)) {
2221 dev_err(dev, "Failed to get core reset: %ld\n",
2222 PTR_ERR(pcie->core_rst));
2223 return PTR_ERR(pcie->core_rst);
2224 }
2225
2226 pp->irq = platform_get_irq_byname(pdev, "intr");
2227 if (pp->irq < 0)
2228 return pp->irq;
2229
2230 pcie->bpmp = tegra_bpmp_get(dev);
2231 if (IS_ERR(pcie->bpmp))
2232 return PTR_ERR(pcie->bpmp);
2233
2234 platform_set_drvdata(pdev, pcie);
2235
2236 pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
2237 ret = PTR_ERR_OR_ZERO(pcie->icc_path);
2238 if (ret) {
2239 tegra_bpmp_put(pcie->bpmp);
2240 dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
2241 return ret;
2242 }
2243
2244 switch (pcie->of_data->mode) {
2245 case DW_PCIE_RC_TYPE:
2246 ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
2247 IRQF_SHARED, "tegra-pcie-intr", pcie);
2248 if (ret) {
2249 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2250 ret);
2251 goto fail;
2252 }
2253
2254 ret = tegra_pcie_config_rp(pcie);
2255 if (ret && ret != -ENOMEDIUM)
2256 goto fail;
2257 else
2258 return 0;
2259 break;
2260
2261 case DW_PCIE_EP_TYPE:
2262 ret = devm_request_threaded_irq(dev, pp->irq,
2263 tegra_pcie_ep_hard_irq,
2264 tegra_pcie_ep_irq_thread,
2265 IRQF_SHARED | IRQF_ONESHOT,
2266 "tegra-pcie-ep-intr", pcie);
2267 if (ret) {
2268 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq,
2269 ret);
2270 goto fail;
2271 }
2272
2273 ret = tegra_pcie_config_ep(pcie, pdev);
2274 if (ret < 0)
2275 goto fail;
2276 break;
2277
2278 default:
2279 dev_err(dev, "Invalid PCIe device type %d\n",
2280 pcie->of_data->mode);
2281 }
2282
2283 fail:
2284 tegra_bpmp_put(pcie->bpmp);
2285 return ret;
2286 }
2287
tegra_pcie_dw_remove(struct platform_device * pdev)2288 static void tegra_pcie_dw_remove(struct platform_device *pdev)
2289 {
2290 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2291
2292 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
2293 if (!pcie->link_state)
2294 return;
2295
2296 debugfs_remove_recursive(pcie->debugfs);
2297 tegra_pcie_deinit_controller(pcie);
2298 pm_runtime_put_sync(pcie->dev);
2299 } else {
2300 disable_irq(pcie->pex_rst_irq);
2301 pex_ep_event_pex_rst_assert(pcie);
2302 }
2303
2304 pm_runtime_disable(pcie->dev);
2305 tegra_bpmp_put(pcie->bpmp);
2306 if (pcie->pex_refclk_sel_gpiod)
2307 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0);
2308 }
2309
tegra_pcie_dw_suspend_late(struct device * dev)2310 static int tegra_pcie_dw_suspend_late(struct device *dev)
2311 {
2312 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2313 u32 val;
2314
2315 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
2316 dev_err(dev, "Failed to Suspend as Tegra PCIe is in EP mode\n");
2317 return -EPERM;
2318 }
2319
2320 if (!pcie->link_state)
2321 return 0;
2322
2323 /* Enable HW_HOT_RST mode */
2324 if (!pcie->of_data->has_sbr_reset_fix) {
2325 val = appl_readl(pcie, APPL_CTRL);
2326 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2327 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2328 val |= APPL_CTRL_HW_HOT_RST_EN;
2329 appl_writel(pcie, val, APPL_CTRL);
2330 }
2331
2332 return 0;
2333 }
2334
tegra_pcie_dw_suspend_noirq(struct device * dev)2335 static int tegra_pcie_dw_suspend_noirq(struct device *dev)
2336 {
2337 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2338
2339 if (!pcie->link_state)
2340 return 0;
2341
2342 tegra_pcie_downstream_dev_to_D0(pcie);
2343 tegra_pcie_dw_pme_turnoff(pcie);
2344 tegra_pcie_unconfig_controller(pcie);
2345
2346 return 0;
2347 }
2348
tegra_pcie_dw_resume_noirq(struct device * dev)2349 static int tegra_pcie_dw_resume_noirq(struct device *dev)
2350 {
2351 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2352 int ret;
2353
2354 if (!pcie->link_state)
2355 return 0;
2356
2357 ret = tegra_pcie_config_controller(pcie, true);
2358 if (ret < 0)
2359 return ret;
2360
2361 ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
2362 if (ret < 0) {
2363 dev_err(dev, "Failed to init host: %d\n", ret);
2364 goto fail_host_init;
2365 }
2366
2367 dw_pcie_setup_rc(&pcie->pci.pp);
2368
2369 ret = tegra_pcie_dw_start_link(&pcie->pci);
2370 if (ret < 0)
2371 goto fail_host_init;
2372
2373 return 0;
2374
2375 fail_host_init:
2376 tegra_pcie_unconfig_controller(pcie);
2377 return ret;
2378 }
2379
tegra_pcie_dw_resume_early(struct device * dev)2380 static int tegra_pcie_dw_resume_early(struct device *dev)
2381 {
2382 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
2383 u32 val;
2384
2385 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
2386 dev_err(dev, "Suspend is not supported in EP mode");
2387 return -ENOTSUPP;
2388 }
2389
2390 if (!pcie->link_state)
2391 return 0;
2392
2393 /* Disable HW_HOT_RST mode */
2394 if (!pcie->of_data->has_sbr_reset_fix) {
2395 val = appl_readl(pcie, APPL_CTRL);
2396 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK <<
2397 APPL_CTRL_HW_HOT_RST_MODE_SHIFT);
2398 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST <<
2399 APPL_CTRL_HW_HOT_RST_MODE_SHIFT;
2400 val &= ~APPL_CTRL_HW_HOT_RST_EN;
2401 appl_writel(pcie, val, APPL_CTRL);
2402 }
2403
2404 return 0;
2405 }
2406
tegra_pcie_dw_shutdown(struct platform_device * pdev)2407 static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
2408 {
2409 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
2410
2411 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) {
2412 if (!pcie->link_state)
2413 return;
2414
2415 debugfs_remove_recursive(pcie->debugfs);
2416 tegra_pcie_downstream_dev_to_D0(pcie);
2417
2418 disable_irq(pcie->pci.pp.irq);
2419 if (IS_ENABLED(CONFIG_PCI_MSI))
2420 disable_irq(pcie->pci.pp.msi_irq[0]);
2421
2422 tegra_pcie_dw_pme_turnoff(pcie);
2423 tegra_pcie_unconfig_controller(pcie);
2424 pm_runtime_put_sync(pcie->dev);
2425 } else {
2426 disable_irq(pcie->pex_rst_irq);
2427 pex_ep_event_pex_rst_assert(pcie);
2428 }
2429 }
2430
2431 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = {
2432 .version = TEGRA194_DWC_IP_VER,
2433 .mode = DW_PCIE_RC_TYPE,
2434 .cdm_chk_int_en_bit = BIT(19),
2435 /* Gen4 - 5, 6, 8 and 9 presets enabled */
2436 .gen4_preset_vec = 0x360,
2437 .n_fts = { 52, 52 },
2438 };
2439
2440 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = {
2441 .version = TEGRA194_DWC_IP_VER,
2442 .mode = DW_PCIE_EP_TYPE,
2443 .cdm_chk_int_en_bit = BIT(19),
2444 /* Gen4 - 5, 6, 8 and 9 presets enabled */
2445 .gen4_preset_vec = 0x360,
2446 .n_fts = { 52, 52 },
2447 };
2448
2449 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = {
2450 .version = TEGRA234_DWC_IP_VER,
2451 .mode = DW_PCIE_RC_TYPE,
2452 .has_msix_doorbell_access_fix = true,
2453 .has_sbr_reset_fix = true,
2454 .has_l1ss_exit_fix = true,
2455 .cdm_chk_int_en_bit = BIT(18),
2456 /* Gen4 - 6, 8 and 9 presets enabled */
2457 .gen4_preset_vec = 0x340,
2458 .n_fts = { 52, 80 },
2459 };
2460
2461 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = {
2462 .version = TEGRA234_DWC_IP_VER,
2463 .mode = DW_PCIE_EP_TYPE,
2464 .has_l1ss_exit_fix = true,
2465 .has_ltr_req_fix = true,
2466 .cdm_chk_int_en_bit = BIT(18),
2467 /* Gen4 - 6, 8 and 9 presets enabled */
2468 .gen4_preset_vec = 0x340,
2469 .n_fts = { 52, 80 },
2470 };
2471
2472 static const struct of_device_id tegra_pcie_dw_of_match[] = {
2473 {
2474 .compatible = "nvidia,tegra194-pcie",
2475 .data = &tegra194_pcie_dw_rc_of_data,
2476 },
2477 {
2478 .compatible = "nvidia,tegra194-pcie-ep",
2479 .data = &tegra194_pcie_dw_ep_of_data,
2480 },
2481 {
2482 .compatible = "nvidia,tegra234-pcie",
2483 .data = &tegra234_pcie_dw_rc_of_data,
2484 },
2485 {
2486 .compatible = "nvidia,tegra234-pcie-ep",
2487 .data = &tegra234_pcie_dw_ep_of_data,
2488 },
2489 {}
2490 };
2491
2492 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
2493 .suspend_late = tegra_pcie_dw_suspend_late,
2494 .suspend_noirq = tegra_pcie_dw_suspend_noirq,
2495 .resume_noirq = tegra_pcie_dw_resume_noirq,
2496 .resume_early = tegra_pcie_dw_resume_early,
2497 };
2498
2499 static struct platform_driver tegra_pcie_dw_driver = {
2500 .probe = tegra_pcie_dw_probe,
2501 .remove_new = tegra_pcie_dw_remove,
2502 .shutdown = tegra_pcie_dw_shutdown,
2503 .driver = {
2504 .name = "tegra194-pcie",
2505 .pm = &tegra_pcie_dw_pm_ops,
2506 .of_match_table = tegra_pcie_dw_of_match,
2507 },
2508 };
2509 module_platform_driver(tegra_pcie_dw_driver);
2510
2511 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
2512
2513 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
2514 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
2515 MODULE_LICENSE("GPL v2");
2516