1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * PCIe host controller driver for the following SoCs 4 * Tegra194 5 * Tegra234 6 * 7 * Copyright (C) 2019-2022 NVIDIA Corporation. 8 * 9 * Author: Vidya Sagar <vidyas@nvidia.com> 10 */ 11 12 #include <linux/bitfield.h> 13 #include <linux/clk.h> 14 #include <linux/debugfs.h> 15 #include <linux/delay.h> 16 #include <linux/gpio/consumer.h> 17 #include <linux/interconnect.h> 18 #include <linux/interrupt.h> 19 #include <linux/iopoll.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_pci.h> 24 #include <linux/pci.h> 25 #include <linux/phy/phy.h> 26 #include <linux/pinctrl/consumer.h> 27 #include <linux/platform_device.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/random.h> 30 #include <linux/reset.h> 31 #include <linux/resource.h> 32 #include <linux/types.h> 33 #include "pcie-designware.h" 34 #include <soc/tegra/bpmp.h> 35 #include <soc/tegra/bpmp-abi.h> 36 #include "../../pci.h" 37 38 #define TEGRA194_DWC_IP_VER DW_PCIE_VER_500A 39 #define TEGRA234_DWC_IP_VER DW_PCIE_VER_562A 40 41 #define APPL_PINMUX 0x0 42 #define APPL_PINMUX_PEX_RST BIT(0) 43 #define APPL_PINMUX_CLKREQ_OVERRIDE_EN BIT(2) 44 #define APPL_PINMUX_CLKREQ_OVERRIDE BIT(3) 45 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN BIT(4) 46 #define APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE BIT(5) 47 #define APPL_PINMUX_CLKREQ_DEFAULT_VALUE BIT(13) 48 49 #define APPL_CTRL 0x4 50 #define APPL_CTRL_SYS_PRE_DET_STATE BIT(6) 51 #define APPL_CTRL_LTSSM_EN BIT(7) 52 #define APPL_CTRL_HW_HOT_RST_EN BIT(20) 53 #define APPL_CTRL_HW_HOT_RST_MODE_MASK GENMASK(1, 0) 54 #define APPL_CTRL_HW_HOT_RST_MODE_SHIFT 22 55 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST 0x1 56 #define APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN 0x2 57 58 #define APPL_INTR_EN_L0_0 0x8 59 #define APPL_INTR_EN_L0_0_LINK_STATE_INT_EN BIT(0) 60 #define APPL_INTR_EN_L0_0_MSI_RCV_INT_EN BIT(4) 61 #define APPL_INTR_EN_L0_0_INT_INT_EN BIT(8) 62 #define APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN BIT(15) 63 #define APPL_INTR_EN_L0_0_CDM_REG_CHK_INT_EN BIT(19) 64 #define APPL_INTR_EN_L0_0_SYS_INTR_EN BIT(30) 65 #define APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN BIT(31) 66 67 #define APPL_INTR_STATUS_L0 0xC 68 #define APPL_INTR_STATUS_L0_LINK_STATE_INT BIT(0) 69 #define APPL_INTR_STATUS_L0_INT_INT BIT(8) 70 #define APPL_INTR_STATUS_L0_PCI_CMD_EN_INT BIT(15) 71 #define APPL_INTR_STATUS_L0_PEX_RST_INT BIT(16) 72 #define APPL_INTR_STATUS_L0_CDM_REG_CHK_INT BIT(18) 73 74 #define APPL_INTR_EN_L1_0_0 0x1C 75 #define APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN BIT(1) 76 #define APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN BIT(3) 77 #define APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN BIT(30) 78 79 #define APPL_INTR_STATUS_L1_0_0 0x20 80 #define APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED BIT(1) 81 #define APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED BIT(3) 82 #define APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE BIT(30) 83 84 #define APPL_INTR_STATUS_L1_1 0x2C 85 #define APPL_INTR_STATUS_L1_2 0x30 86 #define APPL_INTR_STATUS_L1_3 0x34 87 #define APPL_INTR_STATUS_L1_6 0x3C 88 #define APPL_INTR_STATUS_L1_7 0x40 89 #define APPL_INTR_STATUS_L1_15_CFG_BME_CHGED BIT(1) 90 91 #define APPL_INTR_EN_L1_8_0 0x44 92 #define APPL_INTR_EN_L1_8_BW_MGT_INT_EN BIT(2) 93 #define APPL_INTR_EN_L1_8_AUTO_BW_INT_EN BIT(3) 94 #define APPL_INTR_EN_L1_8_EDMA_INT_EN BIT(6) 95 #define APPL_INTR_EN_L1_8_INTX_EN BIT(11) 96 #define APPL_INTR_EN_L1_8_AER_INT_EN BIT(15) 97 98 #define APPL_INTR_STATUS_L1_8_0 0x4C 99 #define APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK GENMASK(11, 6) 100 #define APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS BIT(2) 101 #define APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS BIT(3) 102 103 #define APPL_INTR_STATUS_L1_9 0x54 104 #define APPL_INTR_STATUS_L1_10 0x58 105 #define APPL_INTR_STATUS_L1_11 0x64 106 #define APPL_INTR_STATUS_L1_13 0x74 107 #define APPL_INTR_STATUS_L1_14 0x78 108 #define APPL_INTR_STATUS_L1_15 0x7C 109 #define APPL_INTR_STATUS_L1_17 0x88 110 111 #define APPL_INTR_EN_L1_18 0x90 112 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMPLT BIT(2) 113 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 114 #define APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 115 116 #define APPL_INTR_STATUS_L1_18 0x94 117 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT BIT(2) 118 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR BIT(1) 119 #define APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR BIT(0) 120 121 #define APPL_MSI_CTRL_1 0xAC 122 123 #define APPL_MSI_CTRL_2 0xB0 124 125 #define APPL_LEGACY_INTX 0xB8 126 127 #define APPL_LTR_MSG_1 0xC4 128 #define LTR_MSG_REQ BIT(15) 129 #define LTR_NOSNOOP_MSG_REQ BIT(31) 130 131 #define APPL_LTR_MSG_2 0xC8 132 #define APPL_LTR_MSG_2_LTR_MSG_REQ_STATE BIT(3) 133 134 #define APPL_LINK_STATUS 0xCC 135 #define APPL_LINK_STATUS_RDLH_LINK_UP BIT(0) 136 137 #define APPL_DEBUG 0xD0 138 #define APPL_DEBUG_PM_LINKST_IN_L2_LAT BIT(21) 139 #define APPL_DEBUG_PM_LINKST_IN_L0 0x11 140 #define APPL_DEBUG_LTSSM_STATE_MASK GENMASK(8, 3) 141 #define APPL_DEBUG_LTSSM_STATE_SHIFT 3 142 #define LTSSM_STATE_DETECT_QUIET 0x00 143 #define LTSSM_STATE_DETECT_ACT 0x08 144 #define LTSSM_STATE_PRE_DETECT_QUIET 0x28 145 #define LTSSM_STATE_DETECT_WAIT 0x30 146 #define LTSSM_STATE_L2_IDLE 0xa8 147 148 #define APPL_RADM_STATUS 0xE4 149 #define APPL_PM_XMT_TURNOFF_STATE BIT(0) 150 151 #define APPL_DM_TYPE 0x100 152 #define APPL_DM_TYPE_MASK GENMASK(3, 0) 153 #define APPL_DM_TYPE_RP 0x4 154 #define APPL_DM_TYPE_EP 0x0 155 156 #define APPL_CFG_BASE_ADDR 0x104 157 #define APPL_CFG_BASE_ADDR_MASK GENMASK(31, 12) 158 159 #define APPL_CFG_IATU_DMA_BASE_ADDR 0x108 160 #define APPL_CFG_IATU_DMA_BASE_ADDR_MASK GENMASK(31, 18) 161 162 #define APPL_CFG_MISC 0x110 163 #define APPL_CFG_MISC_SLV_EP_MODE BIT(14) 164 #define APPL_CFG_MISC_ARCACHE_MASK GENMASK(13, 10) 165 #define APPL_CFG_MISC_ARCACHE_SHIFT 10 166 #define APPL_CFG_MISC_ARCACHE_VAL 3 167 168 #define APPL_CFG_SLCG_OVERRIDE 0x114 169 #define APPL_CFG_SLCG_OVERRIDE_SLCG_EN_MASTER BIT(0) 170 171 #define APPL_CAR_RESET_OVRD 0x12C 172 #define APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N BIT(0) 173 174 #define IO_BASE_IO_DECODE BIT(0) 175 #define IO_BASE_IO_DECODE_BIT8 BIT(8) 176 177 #define CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE BIT(0) 178 #define CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE BIT(16) 179 180 #define CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF 0x718 181 #define CFG_TIMER_CTRL_ACK_NAK_SHIFT (19) 182 183 #define N_FTS_VAL 52 184 #define FTS_VAL 52 185 186 #define PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT 0x8D0 187 #define AMBA_ERROR_RESPONSE_RRS_SHIFT 3 188 #define AMBA_ERROR_RESPONSE_RRS_MASK GENMASK(1, 0) 189 #define AMBA_ERROR_RESPONSE_RRS_OKAY 0 190 #define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFFFFFF 1 191 #define AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 2 192 193 #define MSIX_ADDR_MATCH_LOW_OFF 0x940 194 #define MSIX_ADDR_MATCH_LOW_OFF_EN BIT(0) 195 #define MSIX_ADDR_MATCH_LOW_OFF_MASK GENMASK(31, 2) 196 197 #define MSIX_ADDR_MATCH_HIGH_OFF 0x944 198 #define MSIX_ADDR_MATCH_HIGH_OFF_MASK GENMASK(31, 0) 199 200 #define PORT_LOGIC_MSIX_DOORBELL 0x948 201 202 #define CAP_SPCIE_CAP_OFF 0x154 203 #define CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK GENMASK(3, 0) 204 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK GENMASK(11, 8) 205 #define CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT 8 206 207 #define LTSSM_DELAY_US 10000 /* 10 ms */ 208 #define LTSSM_TIMEOUT_US 120000 /* 120 ms */ 209 210 #define GEN3_GEN4_EQ_PRESET_INIT 5 211 212 #define GEN1_CORE_CLK_FREQ 62500000 213 #define GEN2_CORE_CLK_FREQ 125000000 214 #define GEN3_CORE_CLK_FREQ 250000000 215 #define GEN4_CORE_CLK_FREQ 500000000 216 217 #define LTR_MSG_TIMEOUT (100 * 1000) 218 219 #define PERST_DEBOUNCE_TIME (5 * 1000) 220 221 #define EP_STATE_DISABLED 0 222 #define EP_STATE_ENABLED 1 223 224 static const unsigned int pcie_gen_freq[] = { 225 GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */ 226 GEN1_CORE_CLK_FREQ, 227 GEN2_CORE_CLK_FREQ, 228 GEN3_CORE_CLK_FREQ, 229 GEN4_CORE_CLK_FREQ 230 }; 231 232 struct tegra_pcie_dw_of_data { 233 u32 version; 234 enum dw_pcie_device_mode mode; 235 bool has_msix_doorbell_access_fix; 236 bool has_sbr_reset_fix; 237 bool has_l1ss_exit_fix; 238 bool has_ltr_req_fix; 239 bool disable_l1_2; 240 u32 cdm_chk_int_en_bit; 241 u32 gen4_preset_vec; 242 u8 n_fts[2]; 243 }; 244 245 struct tegra_pcie_dw { 246 struct device *dev; 247 struct resource *appl_res; 248 struct resource *dbi_res; 249 struct resource *atu_dma_res; 250 void __iomem *appl_base; 251 struct clk *core_clk; 252 struct clk *core_clk_m; 253 struct reset_control *core_apb_rst; 254 struct reset_control *core_rst; 255 struct dw_pcie pci; 256 struct tegra_bpmp *bpmp; 257 258 struct tegra_pcie_dw_of_data *of_data; 259 260 bool supports_clkreq; 261 bool enable_cdm_check; 262 bool enable_srns; 263 bool link_state; 264 bool update_fc_fixup; 265 bool enable_ext_refclk; 266 u8 init_link_width; 267 u32 msi_ctrl_int; 268 u32 num_lanes; 269 u32 cid; 270 u32 ras_des_cap; 271 u32 pcie_cap_base; 272 u32 aspm_cmrt; 273 u32 aspm_pwr_on_t; 274 u32 aspm_l0s_enter_lat; 275 276 struct regulator *pex_ctl_supply; 277 struct regulator *slot_ctl_3v3; 278 struct regulator *slot_ctl_12v; 279 280 unsigned int phy_count; 281 struct phy **phys; 282 283 struct dentry *debugfs; 284 285 /* Endpoint mode specific */ 286 struct gpio_desc *pex_rst_gpiod; 287 struct gpio_desc *pex_refclk_sel_gpiod; 288 unsigned int pex_rst_irq; 289 int ep_state; 290 long link_status; 291 struct icc_path *icc_path; 292 }; 293 294 static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci) 295 { 296 return container_of(pci, struct tegra_pcie_dw, pci); 297 } 298 299 static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value, 300 const u32 reg) 301 { 302 writel_relaxed(value, pcie->appl_base + reg); 303 } 304 305 static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg) 306 { 307 return readl_relaxed(pcie->appl_base + reg); 308 } 309 310 static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie) 311 { 312 struct dw_pcie *pci = &pcie->pci; 313 u32 val, speed, width; 314 315 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 316 317 speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val); 318 width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val); 319 320 val = width * PCIE_SPEED2MBS_ENC(pcie_get_link_speed(speed)); 321 322 if (icc_set_bw(pcie->icc_path, Mbps_to_icc(val), 0)) 323 dev_err(pcie->dev, "can't set bw[%u]\n", val); 324 325 if (speed >= ARRAY_SIZE(pcie_gen_freq)) 326 speed = 0; 327 328 clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); 329 } 330 331 static void apply_bad_link_workaround(struct dw_pcie_rp *pp) 332 { 333 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 334 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 335 u32 current_link_width; 336 u16 val; 337 338 /* 339 * NOTE:- Since this scenario is uncommon and link as such is not 340 * stable anyway, not waiting to confirm if link is really 341 * transitioning to Gen-2 speed 342 */ 343 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 344 if (val & PCI_EXP_LNKSTA_LBMS) { 345 current_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val); 346 if (pcie->init_link_width > current_link_width) { 347 dev_warn(pci->dev, "PCIe link is bad, width reduced\n"); 348 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 349 PCI_EXP_LNKCTL2); 350 val &= ~PCI_EXP_LNKCTL2_TLS; 351 val |= PCI_EXP_LNKCTL2_TLS_2_5GT; 352 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 353 PCI_EXP_LNKCTL2, val); 354 355 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 356 PCI_EXP_LNKCTL); 357 val |= PCI_EXP_LNKCTL_RL; 358 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 359 PCI_EXP_LNKCTL, val); 360 } 361 } 362 } 363 364 static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg) 365 { 366 struct tegra_pcie_dw *pcie = arg; 367 struct dw_pcie *pci = &pcie->pci; 368 struct dw_pcie_rp *pp = &pci->pp; 369 u32 val, status_l0, status_l1; 370 u16 val_w; 371 372 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); 373 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 374 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 375 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); 376 if (!pcie->of_data->has_sbr_reset_fix && 377 status_l1 & APPL_INTR_STATUS_L1_0_0_LINK_REQ_RST_NOT_CHGED) { 378 /* SBR & Surprise Link Down WAR */ 379 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 380 val &= ~APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 381 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 382 udelay(1); 383 val = appl_readl(pcie, APPL_CAR_RESET_OVRD); 384 val |= APPL_CAR_RESET_OVRD_CYA_OVERRIDE_CORE_RST_N; 385 appl_writel(pcie, val, APPL_CAR_RESET_OVRD); 386 387 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 388 val |= PORT_LOGIC_SPEED_CHANGE; 389 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 390 } 391 } 392 393 if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { 394 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); 395 if (status_l1 & APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS) { 396 appl_writel(pcie, 397 APPL_INTR_STATUS_L1_8_0_AUTO_BW_INT_STS, 398 APPL_INTR_STATUS_L1_8_0); 399 apply_bad_link_workaround(pp); 400 } 401 if (status_l1 & APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS) { 402 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 403 PCI_EXP_LNKSTA); 404 val_w |= PCI_EXP_LNKSTA_LBMS; 405 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + 406 PCI_EXP_LNKSTA, val_w); 407 408 appl_writel(pcie, 409 APPL_INTR_STATUS_L1_8_0_BW_MGT_INT_STS, 410 APPL_INTR_STATUS_L1_8_0); 411 412 val_w = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 413 PCI_EXP_LNKSTA); 414 dev_dbg(pci->dev, "Link Speed : Gen-%u\n", val_w & 415 PCI_EXP_LNKSTA_CLS); 416 } 417 } 418 419 if (status_l0 & APPL_INTR_STATUS_L0_CDM_REG_CHK_INT) { 420 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_18); 421 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS); 422 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMPLT) { 423 dev_info(pci->dev, "CDM check complete\n"); 424 val |= PCIE_PL_CHK_REG_CHK_REG_COMPLETE; 425 } 426 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_CMP_ERR) { 427 dev_err(pci->dev, "CDM comparison mismatch\n"); 428 val |= PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR; 429 } 430 if (status_l1 & APPL_INTR_STATUS_L1_18_CDM_REG_CHK_LOGIC_ERR) { 431 dev_err(pci->dev, "CDM Logic error\n"); 432 val |= PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR; 433 } 434 dw_pcie_writel_dbi(pci, PCIE_PL_CHK_REG_CONTROL_STATUS, val); 435 val = dw_pcie_readl_dbi(pci, PCIE_PL_CHK_REG_ERR_ADDR); 436 dev_err(pci->dev, "CDM Error Address Offset = 0x%08X\n", val); 437 } 438 439 return IRQ_HANDLED; 440 } 441 442 static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie) 443 { 444 u32 val; 445 446 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 447 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 448 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 449 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 450 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 451 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 452 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 453 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 454 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 455 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 456 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 457 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 458 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 459 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 460 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 461 appl_writel(pcie, 0xFFFFFFFF, APPL_MSI_CTRL_2); 462 463 val = appl_readl(pcie, APPL_CTRL); 464 val |= APPL_CTRL_LTSSM_EN; 465 appl_writel(pcie, val, APPL_CTRL); 466 } 467 468 static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) 469 { 470 struct tegra_pcie_dw *pcie = arg; 471 struct dw_pcie_ep *ep = &pcie->pci.ep; 472 struct dw_pcie *pci = &pcie->pci; 473 u32 val; 474 475 if (test_and_clear_bit(0, &pcie->link_status)) 476 dw_pcie_ep_linkup(ep); 477 478 tegra_pcie_icc_set(pcie); 479 480 if (pcie->of_data->has_ltr_req_fix) 481 return IRQ_HANDLED; 482 483 /* If EP doesn't advertise L1SS, just return */ 484 if (!pci->l1ss_support) 485 return IRQ_HANDLED; 486 487 /* Check if BME is set to '1' */ 488 val = dw_pcie_readl_dbi(pci, PCI_COMMAND); 489 if (val & PCI_COMMAND_MASTER) { 490 ktime_t timeout; 491 492 /* Send LTR upstream */ 493 val = appl_readl(pcie, APPL_LTR_MSG_2); 494 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 495 appl_writel(pcie, val, APPL_LTR_MSG_2); 496 497 timeout = ktime_add_us(ktime_get(), LTR_MSG_TIMEOUT); 498 for (;;) { 499 val = appl_readl(pcie, APPL_LTR_MSG_2); 500 if (!(val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE)) 501 break; 502 if (ktime_after(ktime_get(), timeout)) 503 break; 504 usleep_range(1000, 1100); 505 } 506 if (val & APPL_LTR_MSG_2_LTR_MSG_REQ_STATE) 507 dev_err(pcie->dev, "Failed to send LTR message\n"); 508 } 509 510 return IRQ_HANDLED; 511 } 512 513 static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg) 514 { 515 struct tegra_pcie_dw *pcie = arg; 516 int spurious = 1; 517 u32 status_l0, status_l1, link_status; 518 519 status_l0 = appl_readl(pcie, APPL_INTR_STATUS_L0); 520 if (status_l0 & APPL_INTR_STATUS_L0_LINK_STATE_INT) { 521 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_0_0); 522 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_0_0); 523 524 if (status_l1 & APPL_INTR_STATUS_L1_0_0_HOT_RESET_DONE) 525 pex_ep_event_hot_rst_done(pcie); 526 527 if (status_l1 & APPL_INTR_STATUS_L1_0_0_RDLH_LINK_UP_CHGED) { 528 link_status = appl_readl(pcie, APPL_LINK_STATUS); 529 if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) { 530 dev_dbg(pcie->dev, "Link is up with Host\n"); 531 set_bit(0, &pcie->link_status); 532 return IRQ_WAKE_THREAD; 533 } 534 } 535 536 spurious = 0; 537 } 538 539 if (status_l0 & APPL_INTR_STATUS_L0_PCI_CMD_EN_INT) { 540 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_15); 541 appl_writel(pcie, status_l1, APPL_INTR_STATUS_L1_15); 542 543 if (status_l1 & APPL_INTR_STATUS_L1_15_CFG_BME_CHGED) 544 return IRQ_WAKE_THREAD; 545 546 spurious = 0; 547 } 548 549 if (status_l0 & APPL_INTR_STATUS_L0_INT_INT) { 550 status_l1 = appl_readl(pcie, APPL_INTR_STATUS_L1_8_0); 551 552 /* 553 * Interrupt is handled by DMA driver; don't treat it as 554 * spurious 555 */ 556 if (status_l1 & APPL_INTR_STATUS_L1_8_0_EDMA_INT_MASK) 557 spurious = 0; 558 } 559 560 if (spurious) { 561 dev_warn(pcie->dev, "Random interrupt (STATUS = 0x%08X)\n", 562 status_l0); 563 appl_writel(pcie, status_l0, APPL_INTR_STATUS_L0); 564 } 565 566 return IRQ_HANDLED; 567 } 568 569 static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where, 570 int size, u32 *val) 571 { 572 struct dw_pcie_rp *pp = bus->sysdata; 573 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 574 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 575 576 /* 577 * This is an endpoint mode specific register happen to appear even 578 * when controller is operating in root port mode and system hangs 579 * when it is accessed with link being in ASPM-L1 state. 580 * So skip accessing it altogether 581 */ 582 if (!pcie->of_data->has_msix_doorbell_access_fix && 583 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) { 584 *val = 0x00000000; 585 return PCIBIOS_SUCCESSFUL; 586 } 587 588 return pci_generic_config_read(bus, devfn, where, size, val); 589 } 590 591 static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where, 592 int size, u32 val) 593 { 594 struct dw_pcie_rp *pp = bus->sysdata; 595 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 596 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 597 598 /* 599 * This is an endpoint mode specific register happen to appear even 600 * when controller is operating in root port mode and system hangs 601 * when it is accessed with link being in ASPM-L1 state. 602 * So skip accessing it altogether 603 */ 604 if (!pcie->of_data->has_msix_doorbell_access_fix && 605 !PCI_SLOT(devfn) && where == PORT_LOGIC_MSIX_DOORBELL) 606 return PCIBIOS_SUCCESSFUL; 607 608 return pci_generic_config_write(bus, devfn, where, size, val); 609 } 610 611 static struct pci_ops tegra_pci_ops = { 612 .map_bus = dw_pcie_own_conf_map_bus, 613 .read = tegra_pcie_dw_rd_own_conf, 614 .write = tegra_pcie_dw_wr_own_conf, 615 }; 616 617 #if defined(CONFIG_PCIEASPM) 618 static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event) 619 { 620 u32 val; 621 622 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + 623 PCIE_RAS_DES_EVENT_COUNTER_CONTROL); 624 val &= ~(EVENT_COUNTER_EVENT_SEL_MASK << EVENT_COUNTER_EVENT_SEL_SHIFT); 625 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 626 val |= event << EVENT_COUNTER_EVENT_SEL_SHIFT; 627 val |= EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 628 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 629 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 630 val = dw_pcie_readl_dbi(&pcie->pci, pcie->ras_des_cap + 631 PCIE_RAS_DES_EVENT_COUNTER_DATA); 632 633 return val; 634 } 635 636 static int aspm_state_cnt(struct seq_file *s, void *data) 637 { 638 struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *) 639 dev_get_drvdata(s->private); 640 u32 val; 641 642 seq_printf(s, "Tx L0s entry count : %u\n", 643 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Tx_L0S)); 644 645 seq_printf(s, "Rx L0s entry count : %u\n", 646 event_counter_prog(pcie, EVENT_COUNTER_EVENT_Rx_L0S)); 647 648 seq_printf(s, "Link L1 entry count : %u\n", 649 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1)); 650 651 seq_printf(s, "Link L1.1 entry count : %u\n", 652 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_1)); 653 654 seq_printf(s, "Link L1.2 entry count : %u\n", 655 event_counter_prog(pcie, EVENT_COUNTER_EVENT_L1_2)); 656 657 /* Clear all counters */ 658 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 659 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, 660 EVENT_COUNTER_ALL_CLEAR); 661 662 /* Re-enable counting */ 663 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 664 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 665 dw_pcie_writel_dbi(&pcie->pci, pcie->ras_des_cap + 666 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 667 668 return 0; 669 } 670 671 static void init_host_aspm(struct tegra_pcie_dw *pcie) 672 { 673 struct dw_pcie *pci = &pcie->pci; 674 u32 l1ss, val; 675 676 l1ss = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_L1SS); 677 678 pcie->ras_des_cap = dw_pcie_find_ext_capability(&pcie->pci, 679 PCI_EXT_CAP_ID_VNDR); 680 681 /* Enable ASPM counters */ 682 val = EVENT_COUNTER_ENABLE_ALL << EVENT_COUNTER_ENABLE_SHIFT; 683 val |= EVENT_COUNTER_GROUP_5 << EVENT_COUNTER_GROUP_SEL_SHIFT; 684 dw_pcie_writel_dbi(pci, pcie->ras_des_cap + 685 PCIE_RAS_DES_EVENT_COUNTER_CONTROL, val); 686 687 /* Program T_cmrt and T_pwr_on values */ 688 val = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP); 689 val &= ~(PCI_L1SS_CAP_CM_RESTORE_TIME | PCI_L1SS_CAP_P_PWR_ON_VALUE); 690 val |= (pcie->aspm_cmrt << 8); 691 val |= (pcie->aspm_pwr_on_t << 19); 692 dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, val); 693 694 if (pcie->supports_clkreq) 695 pci->l1ss_support = true; 696 697 /* 698 * Disable L1.2 capability advertisement for Tegra234 Endpoint mode. 699 * Tegra234 has a hardware bug where during L1.2 exit, the UPHY PLL is 700 * powered up immediately without waiting for REFCLK to stabilize. This 701 * causes the PLL to fail to lock to the correct frequency, resulting in 702 * PCIe link loss. Since there is no hardware fix available, we prevent 703 * the Endpoint from advertising L1.2 support by clearing the L1.2 bits 704 * in the L1 PM Substates Capabilities register. This ensures the host 705 * will not attempt to enter L1.2 state with this Endpoint. 706 */ 707 if (pcie->of_data->disable_l1_2 && 708 pcie->of_data->mode == DW_PCIE_EP_TYPE) { 709 val = dw_pcie_readl_dbi(pci, l1ss + PCI_L1SS_CAP); 710 val &= ~(PCI_L1SS_CAP_PCIPM_L1_2 | PCI_L1SS_CAP_ASPM_L1_2); 711 dw_pcie_writel_dbi(pci, l1ss + PCI_L1SS_CAP, val); 712 } 713 714 /* Program L0s and L1 entrance latencies */ 715 val = dw_pcie_readl_dbi(pci, PCIE_PORT_AFR); 716 val &= ~PORT_AFR_L0S_ENTRANCE_LAT_MASK; 717 val |= (pcie->aspm_l0s_enter_lat << PORT_AFR_L0S_ENTRANCE_LAT_SHIFT); 718 val |= PORT_AFR_ENTER_ASPM; 719 dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val); 720 } 721 722 static void init_debugfs(struct tegra_pcie_dw *pcie) 723 { 724 struct device *dev = pcie->dev; 725 char *name; 726 727 name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); 728 if (!name) 729 return; 730 731 pcie->debugfs = debugfs_create_dir(name, NULL); 732 733 debugfs_create_devm_seqfile(dev, "aspm_state_cnt", pcie->debugfs, 734 aspm_state_cnt); 735 } 736 #else 737 static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; } 738 static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; } 739 #endif 740 741 static void tegra_pcie_enable_system_interrupts(struct dw_pcie_rp *pp) 742 { 743 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 744 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 745 u32 val; 746 u16 val_w; 747 748 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 749 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 750 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 751 752 if (!pcie->of_data->has_sbr_reset_fix) { 753 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 754 val |= APPL_INTR_EN_L1_0_0_LINK_REQ_RST_NOT_INT_EN; 755 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 756 } 757 758 if (pcie->enable_cdm_check) { 759 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 760 val |= pcie->of_data->cdm_chk_int_en_bit; 761 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 762 763 val = appl_readl(pcie, APPL_INTR_EN_L1_18); 764 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_CMP_ERR; 765 val |= APPL_INTR_EN_L1_18_CDM_REG_CHK_LOGIC_ERR; 766 appl_writel(pcie, val, APPL_INTR_EN_L1_18); 767 } 768 769 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 770 PCI_EXP_LNKSTA); 771 pcie->init_link_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val_w); 772 773 val_w = dw_pcie_readw_dbi(&pcie->pci, pcie->pcie_cap_base + 774 PCI_EXP_LNKCTL); 775 val_w |= PCI_EXP_LNKCTL_LBMIE; 776 dw_pcie_writew_dbi(&pcie->pci, pcie->pcie_cap_base + PCI_EXP_LNKCTL, 777 val_w); 778 } 779 780 static void tegra_pcie_enable_intx_interrupts(struct dw_pcie_rp *pp) 781 { 782 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 783 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 784 u32 val; 785 786 /* Enable INTX interrupt generation */ 787 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 788 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 789 val |= APPL_INTR_EN_L0_0_INT_INT_EN; 790 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 791 792 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); 793 val |= APPL_INTR_EN_L1_8_INTX_EN; 794 val |= APPL_INTR_EN_L1_8_AUTO_BW_INT_EN; 795 val |= APPL_INTR_EN_L1_8_BW_MGT_INT_EN; 796 val |= APPL_INTR_EN_L1_8_EDMA_INT_EN; 797 if (IS_ENABLED(CONFIG_PCIEAER)) 798 val |= APPL_INTR_EN_L1_8_AER_INT_EN; 799 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); 800 } 801 802 static void tegra_pcie_enable_msi_interrupts(struct dw_pcie_rp *pp) 803 { 804 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 805 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 806 u32 val; 807 808 /* Enable MSI interrupt generation */ 809 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 810 val |= APPL_INTR_EN_L0_0_SYS_MSI_INTR_EN; 811 val |= APPL_INTR_EN_L0_0_MSI_RCV_INT_EN; 812 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 813 } 814 815 static void tegra_pcie_enable_interrupts(struct dw_pcie_rp *pp) 816 { 817 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 818 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 819 820 /* Clear interrupt statuses before enabling interrupts */ 821 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 822 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 823 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 824 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 825 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 826 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 827 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 828 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 829 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 830 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 831 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 832 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 833 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 834 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 835 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 836 837 tegra_pcie_enable_system_interrupts(pp); 838 tegra_pcie_enable_intx_interrupts(pp); 839 if (IS_ENABLED(CONFIG_PCI_MSI)) 840 tegra_pcie_enable_msi_interrupts(pp); 841 } 842 843 static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie) 844 { 845 struct dw_pcie *pci = &pcie->pci; 846 u32 val, offset, i; 847 848 /* Program init preset */ 849 for (i = 0; i < pcie->num_lanes; i++) { 850 val = dw_pcie_readw_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2)); 851 val &= ~CAP_SPCIE_CAP_OFF_DSP_TX_PRESET0_MASK; 852 val |= GEN3_GEN4_EQ_PRESET_INIT; 853 val &= ~CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_MASK; 854 val |= (GEN3_GEN4_EQ_PRESET_INIT << 855 CAP_SPCIE_CAP_OFF_USP_TX_PRESET0_SHIFT); 856 dw_pcie_writew_dbi(pci, CAP_SPCIE_CAP_OFF + (i * 2), val); 857 858 offset = dw_pcie_find_ext_capability(pci, 859 PCI_EXT_CAP_ID_PL_16GT) + 860 PCI_PL_16GT_LE_CTRL; 861 val = dw_pcie_readb_dbi(pci, offset + i); 862 val &= ~PCI_PL_16GT_LE_CTRL_DSP_TX_PRESET_MASK; 863 val |= GEN3_GEN4_EQ_PRESET_INIT; 864 val &= ~PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_MASK; 865 val |= (GEN3_GEN4_EQ_PRESET_INIT << 866 PCI_PL_16GT_LE_CTRL_USP_TX_PRESET_SHIFT); 867 dw_pcie_writeb_dbi(pci, offset + i, val); 868 } 869 870 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 871 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 872 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 873 874 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 875 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC; 876 val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 0x3ff); 877 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE; 878 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 879 880 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 881 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 882 val |= (0x1 << GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT); 883 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 884 885 val = dw_pcie_readl_dbi(pci, GEN3_EQ_CONTROL_OFF); 886 val &= ~GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC; 887 val |= FIELD_PREP(GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC, 888 pcie->of_data->gen4_preset_vec); 889 val &= ~GEN3_EQ_CONTROL_OFF_FB_MODE; 890 dw_pcie_writel_dbi(pci, GEN3_EQ_CONTROL_OFF, val); 891 892 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 893 val &= ~GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK; 894 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 895 } 896 897 static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp) 898 { 899 struct dw_pcie *pci = to_dw_pcie_from_pp(pp); 900 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 901 u32 val; 902 u16 val_16; 903 904 pp->bridge->ops = &tegra_pci_ops; 905 906 if (!pcie->pcie_cap_base) 907 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 908 PCI_CAP_ID_EXP); 909 910 val = dw_pcie_readl_dbi(pci, PCI_IO_BASE); 911 val &= ~(IO_BASE_IO_DECODE | IO_BASE_IO_DECODE_BIT8); 912 dw_pcie_writel_dbi(pci, PCI_IO_BASE, val); 913 914 val = dw_pcie_readl_dbi(pci, PCI_PREF_MEMORY_BASE); 915 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_DECODE; 916 val |= CFG_PREF_MEM_LIMIT_BASE_MEM_LIMIT_DECODE; 917 dw_pcie_writel_dbi(pci, PCI_PREF_MEMORY_BASE, val); 918 919 dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); 920 921 /* Enable as 0xFFFF0001 response for RRS */ 922 val = dw_pcie_readl_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT); 923 val &= ~(AMBA_ERROR_RESPONSE_RRS_MASK << AMBA_ERROR_RESPONSE_RRS_SHIFT); 924 val |= (AMBA_ERROR_RESPONSE_RRS_OKAY_FFFF0001 << 925 AMBA_ERROR_RESPONSE_RRS_SHIFT); 926 dw_pcie_writel_dbi(pci, PORT_LOGIC_AMBA_ERROR_RESPONSE_DEFAULT, val); 927 928 /* Clear Slot Clock Configuration bit if SRNS configuration */ 929 if (pcie->enable_srns) { 930 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 931 PCI_EXP_LNKSTA); 932 val_16 &= ~PCI_EXP_LNKSTA_SLC; 933 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, 934 val_16); 935 } 936 937 config_gen3_gen4_eq_presets(pcie); 938 939 init_host_aspm(pcie); 940 941 if (!pcie->of_data->has_l1ss_exit_fix) { 942 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 943 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 944 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 945 } 946 947 if (pcie->update_fc_fixup) { 948 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 949 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 950 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 951 } 952 953 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 954 if (clk_prepare_enable(pcie->core_clk_m)) 955 dev_err(pci->dev, "Failed to enable core monitor clock\n"); 956 957 return 0; 958 } 959 960 static int tegra_pcie_dw_start_link(struct dw_pcie *pci) 961 { 962 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 963 struct dw_pcie_rp *pp = &pci->pp; 964 u32 val, offset, tmp; 965 bool retry = true; 966 967 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 968 enable_irq(pcie->pex_rst_irq); 969 return 0; 970 } 971 972 retry_link: 973 /* Assert RST */ 974 val = appl_readl(pcie, APPL_PINMUX); 975 val &= ~APPL_PINMUX_PEX_RST; 976 appl_writel(pcie, val, APPL_PINMUX); 977 978 usleep_range(100, 200); 979 980 /* Enable LTSSM */ 981 val = appl_readl(pcie, APPL_CTRL); 982 val |= APPL_CTRL_LTSSM_EN; 983 appl_writel(pcie, val, APPL_CTRL); 984 985 /* De-assert RST */ 986 val = appl_readl(pcie, APPL_PINMUX); 987 val |= APPL_PINMUX_PEX_RST; 988 appl_writel(pcie, val, APPL_PINMUX); 989 990 msleep(100); 991 992 if (dw_pcie_wait_for_link(pci)) { 993 if (!retry) 994 return 0; 995 /* 996 * There are some endpoints which can't get the link up if 997 * root port has Data Link Feature (DLF) enabled. 998 * Refer Spec rev 4.0 ver 1.0 sec 3.4.2 & 7.7.4 for more info 999 * on Scaled Flow Control and DLF. 1000 * So, need to confirm that is indeed the case here and attempt 1001 * link up once again with DLF disabled. 1002 */ 1003 val = appl_readl(pcie, APPL_DEBUG); 1004 val &= APPL_DEBUG_LTSSM_STATE_MASK; 1005 val >>= APPL_DEBUG_LTSSM_STATE_SHIFT; 1006 tmp = appl_readl(pcie, APPL_LINK_STATUS); 1007 tmp &= APPL_LINK_STATUS_RDLH_LINK_UP; 1008 if (!(val == 0x11 && !tmp)) { 1009 /* Link is down for all good reasons */ 1010 return 0; 1011 } 1012 1013 dev_info(pci->dev, "Link is down in DLL"); 1014 dev_info(pci->dev, "Trying again with DLFE disabled\n"); 1015 /* Disable LTSSM */ 1016 val = appl_readl(pcie, APPL_CTRL); 1017 val &= ~APPL_CTRL_LTSSM_EN; 1018 appl_writel(pcie, val, APPL_CTRL); 1019 1020 reset_control_assert(pcie->core_rst); 1021 reset_control_deassert(pcie->core_rst); 1022 1023 offset = dw_pcie_find_ext_capability(pci, PCI_EXT_CAP_ID_DLF); 1024 val = dw_pcie_readl_dbi(pci, offset + PCI_DLF_CAP); 1025 val &= ~PCI_DLF_EXCHANGE_ENABLE; 1026 dw_pcie_writel_dbi(pci, offset + PCI_DLF_CAP, val); 1027 1028 /* 1029 * core_clk_m is enabled as part of host_init callback in 1030 * dw_pcie_host_init(). Disable the clock since below 1031 * tegra_pcie_dw_host_init() will enable it again. 1032 */ 1033 clk_disable_unprepare(pcie->core_clk_m); 1034 tegra_pcie_dw_host_init(pp); 1035 dw_pcie_setup_rc(pp); 1036 1037 retry = false; 1038 goto retry_link; 1039 } 1040 1041 tegra_pcie_icc_set(pcie); 1042 1043 tegra_pcie_enable_interrupts(pp); 1044 1045 return 0; 1046 } 1047 1048 static bool tegra_pcie_dw_link_up(struct dw_pcie *pci) 1049 { 1050 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1051 u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA); 1052 1053 return val & PCI_EXP_LNKSTA_DLLLA; 1054 } 1055 1056 static void tegra_pcie_dw_stop_link(struct dw_pcie *pci) 1057 { 1058 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1059 1060 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) 1061 disable_irq(pcie->pex_rst_irq); 1062 } 1063 1064 static const struct dw_pcie_ops tegra_dw_pcie_ops = { 1065 .link_up = tegra_pcie_dw_link_up, 1066 .start_link = tegra_pcie_dw_start_link, 1067 .stop_link = tegra_pcie_dw_stop_link, 1068 }; 1069 1070 static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = { 1071 .init = tegra_pcie_dw_host_init, 1072 }; 1073 1074 static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie) 1075 { 1076 unsigned int phy_count = pcie->phy_count; 1077 1078 while (phy_count--) { 1079 phy_power_off(pcie->phys[phy_count]); 1080 phy_exit(pcie->phys[phy_count]); 1081 } 1082 } 1083 1084 static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie) 1085 { 1086 unsigned int i; 1087 int ret; 1088 1089 for (i = 0; i < pcie->phy_count; i++) { 1090 ret = phy_init(pcie->phys[i]); 1091 if (ret < 0) 1092 goto phy_power_off; 1093 1094 ret = phy_power_on(pcie->phys[i]); 1095 if (ret < 0) 1096 goto phy_exit; 1097 1098 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) 1099 phy_calibrate(pcie->phys[i]); 1100 } 1101 1102 return 0; 1103 1104 phy_power_off: 1105 while (i--) { 1106 phy_power_off(pcie->phys[i]); 1107 phy_exit: 1108 phy_exit(pcie->phys[i]); 1109 } 1110 1111 return ret; 1112 } 1113 1114 static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie) 1115 { 1116 struct platform_device *pdev = to_platform_device(pcie->dev); 1117 struct device_node *np = pcie->dev->of_node; 1118 int ret; 1119 1120 pcie->dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); 1121 if (!pcie->dbi_res) { 1122 dev_err(pcie->dev, "Failed to find \"dbi\" region\n"); 1123 return -ENODEV; 1124 } 1125 1126 ret = of_property_read_u32(np, "nvidia,aspm-cmrt-us", &pcie->aspm_cmrt); 1127 if (ret < 0) { 1128 dev_info(pcie->dev, "Failed to read ASPM T_cmrt: %d\n", ret); 1129 return ret; 1130 } 1131 1132 ret = of_property_read_u32(np, "nvidia,aspm-pwr-on-t-us", 1133 &pcie->aspm_pwr_on_t); 1134 if (ret < 0) 1135 dev_info(pcie->dev, "Failed to read ASPM Power On time: %d\n", 1136 ret); 1137 1138 ret = of_property_read_u32(np, "nvidia,aspm-l0s-entrance-latency-us", 1139 &pcie->aspm_l0s_enter_lat); 1140 if (ret < 0) 1141 dev_info(pcie->dev, 1142 "Failed to read ASPM L0s Entrance latency: %d\n", ret); 1143 1144 ret = of_property_read_u32(np, "num-lanes", &pcie->num_lanes); 1145 if (ret < 0) { 1146 dev_err(pcie->dev, "Failed to read num-lanes: %d\n", ret); 1147 return ret; 1148 } 1149 1150 ret = of_property_read_u32_index(np, "nvidia,bpmp", 1, &pcie->cid); 1151 if (ret) { 1152 dev_err(pcie->dev, "Failed to read Controller-ID: %d\n", ret); 1153 return ret; 1154 } 1155 1156 ret = of_property_count_strings(np, "phy-names"); 1157 if (ret < 0) { 1158 dev_err(pcie->dev, "Failed to find PHY entries: %d\n", 1159 ret); 1160 return ret; 1161 } 1162 pcie->phy_count = ret; 1163 1164 if (of_property_read_bool(np, "nvidia,update-fc-fixup")) 1165 pcie->update_fc_fixup = true; 1166 1167 /* RP using an external REFCLK is supported only in Tegra234 */ 1168 if (pcie->of_data->version == TEGRA194_DWC_IP_VER) { 1169 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) 1170 pcie->enable_ext_refclk = true; 1171 } else { 1172 pcie->enable_ext_refclk = 1173 of_property_read_bool(pcie->dev->of_node, 1174 "nvidia,enable-ext-refclk"); 1175 } 1176 1177 pcie->supports_clkreq = 1178 of_property_read_bool(pcie->dev->of_node, "supports-clkreq"); 1179 1180 pcie->enable_cdm_check = 1181 of_property_read_bool(np, "snps,enable-cdm-check"); 1182 1183 if (pcie->of_data->version == TEGRA234_DWC_IP_VER) 1184 pcie->enable_srns = 1185 of_property_read_bool(np, "nvidia,enable-srns"); 1186 1187 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) 1188 return 0; 1189 1190 /* Endpoint mode specific DT entries */ 1191 pcie->pex_rst_gpiod = devm_gpiod_get(pcie->dev, "reset", GPIOD_IN); 1192 if (IS_ERR(pcie->pex_rst_gpiod)) { 1193 int err = PTR_ERR(pcie->pex_rst_gpiod); 1194 const char *level = KERN_ERR; 1195 1196 if (err == -EPROBE_DEFER) 1197 level = KERN_DEBUG; 1198 1199 dev_printk(level, pcie->dev, 1200 dev_fmt("Failed to get PERST GPIO: %d\n"), 1201 err); 1202 return err; 1203 } 1204 1205 pcie->pex_refclk_sel_gpiod = devm_gpiod_get_optional(pcie->dev, 1206 "nvidia,refclk-select", 1207 GPIOD_OUT_HIGH); 1208 if (IS_ERR(pcie->pex_refclk_sel_gpiod)) { 1209 int err = PTR_ERR(pcie->pex_refclk_sel_gpiod); 1210 const char *level = KERN_ERR; 1211 1212 if (err == -EPROBE_DEFER) 1213 level = KERN_DEBUG; 1214 1215 dev_printk(level, pcie->dev, 1216 dev_fmt("Failed to get REFCLK select GPIOs: %d\n"), 1217 err); 1218 pcie->pex_refclk_sel_gpiod = NULL; 1219 } 1220 1221 return 0; 1222 } 1223 1224 static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie, 1225 bool enable) 1226 { 1227 struct mrq_uphy_response resp; 1228 struct tegra_bpmp_message msg; 1229 struct mrq_uphy_request req; 1230 int err; 1231 1232 /* 1233 * Controller-5 doesn't need to have its state set by BPMP-FW in 1234 * Tegra194 1235 */ 1236 if (pcie->of_data->version == TEGRA194_DWC_IP_VER && pcie->cid == 5) 1237 return 0; 1238 1239 memset(&req, 0, sizeof(req)); 1240 memset(&resp, 0, sizeof(resp)); 1241 1242 req.cmd = CMD_UPHY_PCIE_CONTROLLER_STATE; 1243 req.controller_state.pcie_controller = pcie->cid; 1244 req.controller_state.enable = enable; 1245 1246 memset(&msg, 0, sizeof(msg)); 1247 msg.mrq = MRQ_UPHY; 1248 msg.tx.data = &req; 1249 msg.tx.size = sizeof(req); 1250 msg.rx.data = &resp; 1251 msg.rx.size = sizeof(resp); 1252 1253 err = tegra_bpmp_transfer(pcie->bpmp, &msg); 1254 if (err) 1255 return err; 1256 if (msg.rx.ret) 1257 return -EINVAL; 1258 1259 return 0; 1260 } 1261 1262 static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie, 1263 bool enable) 1264 { 1265 struct mrq_uphy_response resp; 1266 struct tegra_bpmp_message msg; 1267 struct mrq_uphy_request req; 1268 int err; 1269 1270 memset(&req, 0, sizeof(req)); 1271 memset(&resp, 0, sizeof(resp)); 1272 1273 if (enable) { 1274 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_INIT; 1275 req.ep_ctrlr_pll_init.ep_controller = pcie->cid; 1276 } else { 1277 req.cmd = CMD_UPHY_PCIE_EP_CONTROLLER_PLL_OFF; 1278 req.ep_ctrlr_pll_off.ep_controller = pcie->cid; 1279 } 1280 1281 memset(&msg, 0, sizeof(msg)); 1282 msg.mrq = MRQ_UPHY; 1283 msg.tx.data = &req; 1284 msg.tx.size = sizeof(req); 1285 msg.rx.data = &resp; 1286 msg.rx.size = sizeof(resp); 1287 1288 err = tegra_bpmp_transfer(pcie->bpmp, &msg); 1289 if (err) 1290 return err; 1291 if (msg.rx.ret) 1292 return -EINVAL; 1293 1294 return 0; 1295 } 1296 1297 static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie) 1298 { 1299 pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3"); 1300 if (IS_ERR(pcie->slot_ctl_3v3)) { 1301 if (PTR_ERR(pcie->slot_ctl_3v3) != -ENODEV) 1302 return PTR_ERR(pcie->slot_ctl_3v3); 1303 1304 pcie->slot_ctl_3v3 = NULL; 1305 } 1306 1307 pcie->slot_ctl_12v = devm_regulator_get_optional(pcie->dev, "vpcie12v"); 1308 if (IS_ERR(pcie->slot_ctl_12v)) { 1309 if (PTR_ERR(pcie->slot_ctl_12v) != -ENODEV) 1310 return PTR_ERR(pcie->slot_ctl_12v); 1311 1312 pcie->slot_ctl_12v = NULL; 1313 } 1314 1315 return 0; 1316 } 1317 1318 static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie) 1319 { 1320 int ret; 1321 1322 if (pcie->slot_ctl_3v3) { 1323 ret = regulator_enable(pcie->slot_ctl_3v3); 1324 if (ret < 0) { 1325 dev_err(pcie->dev, 1326 "Failed to enable 3.3V slot supply: %d\n", ret); 1327 return ret; 1328 } 1329 } 1330 1331 if (pcie->slot_ctl_12v) { 1332 ret = regulator_enable(pcie->slot_ctl_12v); 1333 if (ret < 0) { 1334 dev_err(pcie->dev, 1335 "Failed to enable 12V slot supply: %d\n", ret); 1336 goto fail_12v_enable; 1337 } 1338 } 1339 1340 /* 1341 * According to PCI Express Card Electromechanical Specification 1342 * Revision 1.1, Table-2.4, T_PVPERL (Power stable to PERST# inactive) 1343 * should be a minimum of 100ms. 1344 */ 1345 if (pcie->slot_ctl_3v3 || pcie->slot_ctl_12v) 1346 msleep(100); 1347 1348 return 0; 1349 1350 fail_12v_enable: 1351 if (pcie->slot_ctl_3v3) 1352 regulator_disable(pcie->slot_ctl_3v3); 1353 return ret; 1354 } 1355 1356 static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie) 1357 { 1358 if (pcie->slot_ctl_12v) 1359 regulator_disable(pcie->slot_ctl_12v); 1360 if (pcie->slot_ctl_3v3) 1361 regulator_disable(pcie->slot_ctl_3v3); 1362 } 1363 1364 static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie, 1365 bool en_hw_hot_rst) 1366 { 1367 int ret; 1368 u32 val; 1369 1370 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1371 if (ret) { 1372 dev_err(pcie->dev, 1373 "Failed to enable controller %u: %d\n", pcie->cid, ret); 1374 return ret; 1375 } 1376 1377 if (pcie->enable_ext_refclk) { 1378 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1379 if (ret) { 1380 dev_err(pcie->dev, "Failed to init UPHY: %d\n", ret); 1381 goto fail_pll_init; 1382 } 1383 } 1384 1385 ret = tegra_pcie_enable_slot_regulators(pcie); 1386 if (ret < 0) 1387 goto fail_slot_reg_en; 1388 1389 ret = regulator_enable(pcie->pex_ctl_supply); 1390 if (ret < 0) { 1391 dev_err(pcie->dev, "Failed to enable regulator: %d\n", ret); 1392 goto fail_reg_en; 1393 } 1394 1395 ret = clk_prepare_enable(pcie->core_clk); 1396 if (ret) { 1397 dev_err(pcie->dev, "Failed to enable core clock: %d\n", ret); 1398 goto fail_core_clk; 1399 } 1400 1401 ret = reset_control_deassert(pcie->core_apb_rst); 1402 if (ret) { 1403 dev_err(pcie->dev, "Failed to deassert core APB reset: %d\n", 1404 ret); 1405 goto fail_core_apb_rst; 1406 } 1407 1408 if (en_hw_hot_rst || pcie->of_data->has_sbr_reset_fix) { 1409 /* Enable HW_HOT_RST mode */ 1410 val = appl_readl(pcie, APPL_CTRL); 1411 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 1412 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1413 val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << 1414 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1415 val |= APPL_CTRL_HW_HOT_RST_EN; 1416 appl_writel(pcie, val, APPL_CTRL); 1417 } 1418 1419 ret = tegra_pcie_enable_phy(pcie); 1420 if (ret) { 1421 dev_err(pcie->dev, "Failed to enable PHY: %d\n", ret); 1422 goto fail_phy; 1423 } 1424 1425 /* Update CFG base address */ 1426 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1427 APPL_CFG_BASE_ADDR); 1428 1429 /* Configure this core for RP mode operation */ 1430 appl_writel(pcie, APPL_DM_TYPE_RP, APPL_DM_TYPE); 1431 1432 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1433 1434 val = appl_readl(pcie, APPL_CTRL); 1435 appl_writel(pcie, val | APPL_CTRL_SYS_PRE_DET_STATE, APPL_CTRL); 1436 1437 val = appl_readl(pcie, APPL_CFG_MISC); 1438 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1439 appl_writel(pcie, val, APPL_CFG_MISC); 1440 1441 if (pcie->enable_srns || pcie->enable_ext_refclk) { 1442 /* 1443 * When Tegra PCIe RP is using external clock, it cannot supply 1444 * same clock to its downstream hierarchy. Hence, gate PCIe RP 1445 * REFCLK out pads when RP & EP are using separate clocks or RP 1446 * is using an external REFCLK. 1447 */ 1448 val = appl_readl(pcie, APPL_PINMUX); 1449 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1450 val &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1451 appl_writel(pcie, val, APPL_PINMUX); 1452 } 1453 1454 if (!pcie->supports_clkreq) { 1455 val = appl_readl(pcie, APPL_PINMUX); 1456 val |= APPL_PINMUX_CLKREQ_OVERRIDE_EN; 1457 val &= ~APPL_PINMUX_CLKREQ_OVERRIDE; 1458 val &= ~APPL_PINMUX_CLKREQ_DEFAULT_VALUE; 1459 appl_writel(pcie, val, APPL_PINMUX); 1460 } 1461 1462 /* Update iATU_DMA base address */ 1463 appl_writel(pcie, 1464 pcie->atu_dma_res->start & APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1465 APPL_CFG_IATU_DMA_BASE_ADDR); 1466 1467 reset_control_deassert(pcie->core_rst); 1468 1469 return ret; 1470 1471 fail_phy: 1472 reset_control_assert(pcie->core_apb_rst); 1473 fail_core_apb_rst: 1474 clk_disable_unprepare(pcie->core_clk); 1475 fail_core_clk: 1476 regulator_disable(pcie->pex_ctl_supply); 1477 fail_reg_en: 1478 tegra_pcie_disable_slot_regulators(pcie); 1479 fail_slot_reg_en: 1480 if (pcie->enable_ext_refclk) 1481 tegra_pcie_bpmp_set_pll_state(pcie, false); 1482 fail_pll_init: 1483 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1484 1485 return ret; 1486 } 1487 1488 static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie) 1489 { 1490 int ret; 1491 1492 ret = reset_control_assert(pcie->core_rst); 1493 if (ret) 1494 dev_err(pcie->dev, "Failed to assert \"core\" reset: %d\n", ret); 1495 1496 tegra_pcie_disable_phy(pcie); 1497 1498 ret = reset_control_assert(pcie->core_apb_rst); 1499 if (ret) 1500 dev_err(pcie->dev, "Failed to assert APB reset: %d\n", ret); 1501 1502 clk_disable_unprepare(pcie->core_clk); 1503 1504 ret = regulator_disable(pcie->pex_ctl_supply); 1505 if (ret) 1506 dev_err(pcie->dev, "Failed to disable regulator: %d\n", ret); 1507 1508 tegra_pcie_disable_slot_regulators(pcie); 1509 1510 if (pcie->enable_ext_refclk) { 1511 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1512 if (ret) 1513 dev_err(pcie->dev, "Failed to deinit UPHY: %d\n", ret); 1514 } 1515 1516 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1517 if (ret) 1518 dev_err(pcie->dev, "Failed to disable controller %d: %d\n", 1519 pcie->cid, ret); 1520 } 1521 1522 static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie) 1523 { 1524 struct dw_pcie *pci = &pcie->pci; 1525 struct dw_pcie_rp *pp = &pci->pp; 1526 int ret; 1527 1528 ret = tegra_pcie_config_controller(pcie, false); 1529 if (ret < 0) 1530 return ret; 1531 1532 pp->ops = &tegra_pcie_dw_host_ops; 1533 1534 ret = dw_pcie_host_init(pp); 1535 if (ret < 0) { 1536 dev_err(pcie->dev, "Failed to add PCIe port: %d\n", ret); 1537 goto fail_host_init; 1538 } 1539 1540 return 0; 1541 1542 fail_host_init: 1543 tegra_pcie_unconfig_controller(pcie); 1544 return ret; 1545 } 1546 1547 static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie) 1548 { 1549 u32 val; 1550 1551 if (!tegra_pcie_dw_link_up(&pcie->pci)) 1552 return 0; 1553 1554 val = appl_readl(pcie, APPL_RADM_STATUS); 1555 val |= APPL_PM_XMT_TURNOFF_STATE; 1556 appl_writel(pcie, val, APPL_RADM_STATUS); 1557 1558 return readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, 1559 val & APPL_DEBUG_PM_LINKST_IN_L2_LAT, 1560 PCIE_PME_TO_L2_TIMEOUT_US/10, 1561 PCIE_PME_TO_L2_TIMEOUT_US); 1562 } 1563 1564 static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie) 1565 { 1566 u32 data; 1567 int err; 1568 1569 if (!tegra_pcie_dw_link_up(&pcie->pci)) { 1570 dev_dbg(pcie->dev, "PCIe link is not up...!\n"); 1571 return; 1572 } 1573 1574 /* 1575 * PCIe controller exits from L2 only if reset is applied, so 1576 * controller doesn't handle interrupts. But in cases where 1577 * L2 entry fails, PERST# is asserted which can trigger surprise 1578 * link down AER. However this function call happens in 1579 * suspend_noirq(), so AER interrupt will not be processed. 1580 * Disable all interrupts to avoid such a scenario. 1581 */ 1582 appl_writel(pcie, 0x0, APPL_INTR_EN_L0_0); 1583 1584 if (tegra_pcie_try_link_l2(pcie)) { 1585 dev_info(pcie->dev, "Link didn't transition to L2 state\n"); 1586 /* 1587 * TX lane clock freq will reset to Gen1 only if link is in L2 1588 * or detect state. 1589 * So apply pex_rst to end point to force RP to go into detect 1590 * state 1591 */ 1592 data = appl_readl(pcie, APPL_PINMUX); 1593 data &= ~APPL_PINMUX_PEX_RST; 1594 appl_writel(pcie, data, APPL_PINMUX); 1595 1596 err = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, data, 1597 ((data & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_QUIET) || 1598 ((data & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_ACT) || 1599 ((data & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_PRE_DETECT_QUIET) || 1600 ((data & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_WAIT), 1601 LTSSM_DELAY_US, LTSSM_TIMEOUT_US); 1602 if (err) 1603 dev_info(pcie->dev, "LTSSM state: 0x%x detect timeout: %d\n", data, err); 1604 1605 /* 1606 * Deassert LTSSM state to stop the state toggling between 1607 * Polling and Detect. 1608 */ 1609 data = readl(pcie->appl_base + APPL_CTRL); 1610 data &= ~APPL_CTRL_LTSSM_EN; 1611 writel(data, pcie->appl_base + APPL_CTRL); 1612 } 1613 /* 1614 * DBI registers may not be accessible after this as PLL-E would be 1615 * down depending on how CLKREQ is pulled by end point 1616 */ 1617 data = appl_readl(pcie, APPL_PINMUX); 1618 data |= (APPL_PINMUX_CLKREQ_OVERRIDE_EN | APPL_PINMUX_CLKREQ_OVERRIDE); 1619 /* Cut REFCLK to slot */ 1620 data |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1621 data &= ~APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1622 appl_writel(pcie, data, APPL_PINMUX); 1623 } 1624 1625 static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie) 1626 { 1627 clk_disable_unprepare(pcie->core_clk_m); 1628 dw_pcie_host_deinit(&pcie->pci.pp); 1629 tegra_pcie_dw_pme_turnoff(pcie); 1630 tegra_pcie_unconfig_controller(pcie); 1631 } 1632 1633 static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie) 1634 { 1635 struct device *dev = pcie->dev; 1636 int ret; 1637 1638 pm_runtime_enable(dev); 1639 1640 ret = pm_runtime_get_sync(dev); 1641 if (ret < 0) { 1642 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1643 ret); 1644 goto fail_pm_get_sync; 1645 } 1646 1647 ret = pinctrl_pm_select_default_state(dev); 1648 if (ret < 0) { 1649 dev_err(dev, "Failed to configure sideband pins: %d\n", ret); 1650 goto fail_pm_get_sync; 1651 } 1652 1653 ret = tegra_pcie_init_controller(pcie); 1654 if (ret < 0) { 1655 dev_err(dev, "Failed to initialize controller: %d\n", ret); 1656 goto fail_pm_get_sync; 1657 } 1658 1659 pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci); 1660 if (!pcie->link_state) { 1661 ret = -ENOMEDIUM; 1662 goto fail_host_init; 1663 } 1664 1665 init_debugfs(pcie); 1666 1667 return ret; 1668 1669 fail_host_init: 1670 tegra_pcie_deinit_controller(pcie); 1671 fail_pm_get_sync: 1672 pm_runtime_put_sync(dev); 1673 pm_runtime_disable(dev); 1674 return ret; 1675 } 1676 1677 static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie) 1678 { 1679 u32 val; 1680 int ret; 1681 1682 if (pcie->ep_state == EP_STATE_DISABLED) 1683 return; 1684 1685 ret = readl_poll_timeout(pcie->appl_base + APPL_DEBUG, val, 1686 ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_QUIET) || 1687 ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_ACT) || 1688 ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_PRE_DETECT_QUIET) || 1689 ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_DETECT_WAIT) || 1690 ((val & APPL_DEBUG_LTSSM_STATE_MASK) == LTSSM_STATE_L2_IDLE), 1691 LTSSM_DELAY_US, LTSSM_TIMEOUT_US); 1692 if (ret) 1693 dev_info(pcie->dev, "LTSSM state: 0x%x detect timeout: %d\n", val, ret); 1694 1695 /* 1696 * Deassert LTSSM state to stop the state toggling between 1697 * Polling and Detect. 1698 */ 1699 val = appl_readl(pcie, APPL_CTRL); 1700 val &= ~APPL_CTRL_LTSSM_EN; 1701 appl_writel(pcie, val, APPL_CTRL); 1702 1703 reset_control_assert(pcie->core_rst); 1704 1705 tegra_pcie_disable_phy(pcie); 1706 1707 reset_control_assert(pcie->core_apb_rst); 1708 1709 clk_disable_unprepare(pcie->core_clk); 1710 1711 pm_runtime_put_sync(pcie->dev); 1712 1713 if (pcie->enable_ext_refclk) { 1714 ret = tegra_pcie_bpmp_set_pll_state(pcie, false); 1715 if (ret) 1716 dev_err(pcie->dev, "Failed to turn off UPHY: %d\n", 1717 ret); 1718 } 1719 1720 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1721 if (ret) 1722 dev_err(pcie->dev, "Failed to disable controller: %d\n", ret); 1723 1724 pcie->ep_state = EP_STATE_DISABLED; 1725 dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n"); 1726 } 1727 1728 static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie) 1729 { 1730 struct dw_pcie *pci = &pcie->pci; 1731 struct dw_pcie_ep *ep = &pci->ep; 1732 struct device *dev = pcie->dev; 1733 u32 val; 1734 int ret; 1735 u16 val_16; 1736 1737 if (pcie->ep_state == EP_STATE_ENABLED) 1738 return; 1739 1740 ret = pm_runtime_resume_and_get(dev); 1741 if (ret < 0) { 1742 dev_err(dev, "Failed to get runtime sync for PCIe dev: %d\n", 1743 ret); 1744 return; 1745 } 1746 1747 ret = tegra_pcie_bpmp_set_ctrl_state(pcie, true); 1748 if (ret) { 1749 dev_err(pcie->dev, "Failed to enable controller %u: %d\n", 1750 pcie->cid, ret); 1751 goto fail_set_ctrl_state; 1752 } 1753 1754 if (pcie->enable_ext_refclk) { 1755 ret = tegra_pcie_bpmp_set_pll_state(pcie, true); 1756 if (ret) { 1757 dev_err(dev, "Failed to init UPHY for PCIe EP: %d\n", 1758 ret); 1759 goto fail_pll_init; 1760 } 1761 } 1762 1763 ret = clk_prepare_enable(pcie->core_clk); 1764 if (ret) { 1765 dev_err(dev, "Failed to enable core clock: %d\n", ret); 1766 goto fail_core_clk_enable; 1767 } 1768 1769 ret = reset_control_deassert(pcie->core_apb_rst); 1770 if (ret) { 1771 dev_err(dev, "Failed to deassert core APB reset: %d\n", ret); 1772 goto fail_core_apb_rst; 1773 } 1774 1775 ret = tegra_pcie_enable_phy(pcie); 1776 if (ret) { 1777 dev_err(dev, "Failed to enable PHY: %d\n", ret); 1778 goto fail_phy; 1779 } 1780 1781 /* Clear any stale interrupt statuses */ 1782 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0); 1783 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_0_0); 1784 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_1); 1785 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_2); 1786 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_3); 1787 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_6); 1788 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_7); 1789 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_8_0); 1790 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_9); 1791 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_10); 1792 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_11); 1793 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_13); 1794 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_14); 1795 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_15); 1796 appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L1_17); 1797 1798 /* configure this core for EP mode operation */ 1799 val = appl_readl(pcie, APPL_DM_TYPE); 1800 val &= ~APPL_DM_TYPE_MASK; 1801 val |= APPL_DM_TYPE_EP; 1802 appl_writel(pcie, val, APPL_DM_TYPE); 1803 1804 appl_writel(pcie, 0x0, APPL_CFG_SLCG_OVERRIDE); 1805 1806 val = appl_readl(pcie, APPL_CTRL); 1807 val |= APPL_CTRL_SYS_PRE_DET_STATE; 1808 val |= APPL_CTRL_HW_HOT_RST_EN; 1809 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1810 val |= (APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST_LTSSM_EN << APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 1811 appl_writel(pcie, val, APPL_CTRL); 1812 1813 val = appl_readl(pcie, APPL_CFG_MISC); 1814 val |= APPL_CFG_MISC_SLV_EP_MODE; 1815 val |= (APPL_CFG_MISC_ARCACHE_VAL << APPL_CFG_MISC_ARCACHE_SHIFT); 1816 appl_writel(pcie, val, APPL_CFG_MISC); 1817 1818 val = appl_readl(pcie, APPL_PINMUX); 1819 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE_EN; 1820 val |= APPL_PINMUX_CLK_OUTPUT_IN_OVERRIDE; 1821 appl_writel(pcie, val, APPL_PINMUX); 1822 1823 appl_writel(pcie, pcie->dbi_res->start & APPL_CFG_BASE_ADDR_MASK, 1824 APPL_CFG_BASE_ADDR); 1825 1826 appl_writel(pcie, pcie->atu_dma_res->start & 1827 APPL_CFG_IATU_DMA_BASE_ADDR_MASK, 1828 APPL_CFG_IATU_DMA_BASE_ADDR); 1829 1830 val = appl_readl(pcie, APPL_INTR_EN_L0_0); 1831 val |= APPL_INTR_EN_L0_0_SYS_INTR_EN; 1832 val |= APPL_INTR_EN_L0_0_LINK_STATE_INT_EN; 1833 val |= APPL_INTR_EN_L0_0_PCI_CMD_EN_INT_EN; 1834 val |= APPL_INTR_EN_L0_0_INT_INT_EN; 1835 appl_writel(pcie, val, APPL_INTR_EN_L0_0); 1836 1837 val = appl_readl(pcie, APPL_INTR_EN_L1_0_0); 1838 val |= APPL_INTR_EN_L1_0_0_HOT_RESET_DONE_INT_EN; 1839 val |= APPL_INTR_EN_L1_0_0_RDLH_LINK_UP_INT_EN; 1840 appl_writel(pcie, val, APPL_INTR_EN_L1_0_0); 1841 1842 val = appl_readl(pcie, APPL_INTR_EN_L1_8_0); 1843 val |= APPL_INTR_EN_L1_8_EDMA_INT_EN; 1844 appl_writel(pcie, val, APPL_INTR_EN_L1_8_0); 1845 1846 /* 110us for both snoop and no-snoop */ 1847 val = FIELD_PREP(PCI_LTR_VALUE_MASK, 110) | 1848 FIELD_PREP(PCI_LTR_SCALE_MASK, 2) | 1849 LTR_MSG_REQ | 1850 FIELD_PREP(PCI_LTR_NOSNOOP_VALUE, 110) | 1851 FIELD_PREP(PCI_LTR_NOSNOOP_SCALE, 2) | 1852 LTR_NOSNOOP_MSG_REQ; 1853 appl_writel(pcie, val, APPL_LTR_MSG_1); 1854 1855 reset_control_deassert(pcie->core_rst); 1856 1857 /* Perform cleanup that requires refclk and core reset deasserted */ 1858 pci_epc_deinit_notify(pcie->pci.ep.epc); 1859 dw_pcie_ep_cleanup(&pcie->pci.ep); 1860 1861 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); 1862 val &= ~PORT_LOGIC_SPEED_CHANGE; 1863 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); 1864 1865 if (pcie->update_fc_fixup) { 1866 val = dw_pcie_readl_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF); 1867 val |= 0x1 << CFG_TIMER_CTRL_ACK_NAK_SHIFT; 1868 dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val); 1869 } 1870 1871 config_gen3_gen4_eq_presets(pcie); 1872 1873 init_host_aspm(pcie); 1874 1875 if (!pcie->of_data->has_l1ss_exit_fix) { 1876 val = dw_pcie_readl_dbi(pci, GEN3_RELATED_OFF); 1877 val &= ~GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL; 1878 dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val); 1879 } 1880 1881 pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci, 1882 PCI_CAP_ID_EXP); 1883 1884 /* Clear Slot Clock Configuration bit if SRNS configuration */ 1885 if (pcie->enable_srns) { 1886 val_16 = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + 1887 PCI_EXP_LNKSTA); 1888 val_16 &= ~PCI_EXP_LNKSTA_SLC; 1889 dw_pcie_writew_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA, 1890 val_16); 1891 } 1892 1893 clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ); 1894 1895 val = (ep->msi_mem_phys & MSIX_ADDR_MATCH_LOW_OFF_MASK); 1896 val |= MSIX_ADDR_MATCH_LOW_OFF_EN; 1897 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_LOW_OFF, val); 1898 val = (upper_32_bits(ep->msi_mem_phys) & MSIX_ADDR_MATCH_HIGH_OFF_MASK); 1899 dw_pcie_writel_dbi(pci, MSIX_ADDR_MATCH_HIGH_OFF, val); 1900 1901 ret = dw_pcie_ep_init_registers(ep); 1902 if (ret) { 1903 dev_err(dev, "Failed to complete initialization: %d\n", ret); 1904 goto fail_init_complete; 1905 } 1906 1907 pci_epc_init_notify(ep->epc); 1908 1909 /* Program the private control to allow sending LTR upstream */ 1910 if (pcie->of_data->has_ltr_req_fix) { 1911 val = appl_readl(pcie, APPL_LTR_MSG_2); 1912 val |= APPL_LTR_MSG_2_LTR_MSG_REQ_STATE; 1913 appl_writel(pcie, val, APPL_LTR_MSG_2); 1914 } 1915 1916 /* Enable LTSSM */ 1917 val = appl_readl(pcie, APPL_CTRL); 1918 val |= APPL_CTRL_LTSSM_EN; 1919 appl_writel(pcie, val, APPL_CTRL); 1920 1921 pcie->ep_state = EP_STATE_ENABLED; 1922 dev_dbg(dev, "Initialization of endpoint is completed\n"); 1923 1924 return; 1925 1926 fail_init_complete: 1927 reset_control_assert(pcie->core_rst); 1928 tegra_pcie_disable_phy(pcie); 1929 fail_phy: 1930 reset_control_assert(pcie->core_apb_rst); 1931 fail_core_apb_rst: 1932 clk_disable_unprepare(pcie->core_clk); 1933 fail_core_clk_enable: 1934 tegra_pcie_bpmp_set_pll_state(pcie, false); 1935 fail_pll_init: 1936 tegra_pcie_bpmp_set_ctrl_state(pcie, false); 1937 fail_set_ctrl_state: 1938 pm_runtime_put_sync(dev); 1939 } 1940 1941 static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg) 1942 { 1943 struct tegra_pcie_dw *pcie = arg; 1944 1945 if (gpiod_get_value(pcie->pex_rst_gpiod)) 1946 pex_ep_event_pex_rst_assert(pcie); 1947 else 1948 pex_ep_event_pex_rst_deassert(pcie); 1949 1950 return IRQ_HANDLED; 1951 } 1952 1953 static int tegra_pcie_ep_raise_intx_irq(struct tegra_pcie_dw *pcie, u16 irq) 1954 { 1955 /* Tegra194 supports only INTA */ 1956 if (irq > 1) 1957 return -EINVAL; 1958 1959 appl_writel(pcie, 1, APPL_LEGACY_INTX); 1960 usleep_range(1000, 2000); 1961 appl_writel(pcie, 0, APPL_LEGACY_INTX); 1962 return 0; 1963 } 1964 1965 static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq) 1966 { 1967 if (unlikely(irq > 32)) 1968 return -EINVAL; 1969 1970 appl_writel(pcie, BIT(irq - 1), APPL_MSI_CTRL_1); 1971 1972 return 0; 1973 } 1974 1975 static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq) 1976 { 1977 struct dw_pcie_ep *ep = &pcie->pci.ep; 1978 1979 writel(irq, ep->msi_mem); 1980 1981 return 0; 1982 } 1983 1984 static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, 1985 unsigned int type, u16 interrupt_num) 1986 { 1987 struct dw_pcie *pci = to_dw_pcie_from_ep(ep); 1988 struct tegra_pcie_dw *pcie = to_tegra_pcie(pci); 1989 1990 switch (type) { 1991 case PCI_IRQ_INTX: 1992 return tegra_pcie_ep_raise_intx_irq(pcie, interrupt_num); 1993 1994 case PCI_IRQ_MSI: 1995 return tegra_pcie_ep_raise_msi_irq(pcie, interrupt_num); 1996 1997 case PCI_IRQ_MSIX: 1998 return tegra_pcie_ep_raise_msix_irq(pcie, interrupt_num); 1999 2000 default: 2001 dev_err(pci->dev, "Unknown IRQ type\n"); 2002 return -EPERM; 2003 } 2004 2005 return 0; 2006 } 2007 2008 static const struct pci_epc_bar_rsvd_region tegra194_bar2_rsvd[] = { 2009 { 2010 /* MSI-X table structure */ 2011 .type = PCI_EPC_BAR_RSVD_MSIX_TBL_RAM, 2012 .offset = 0x0, 2013 .size = SZ_64K, 2014 }, 2015 { 2016 /* MSI-X PBA structure */ 2017 .type = PCI_EPC_BAR_RSVD_MSIX_PBA_RAM, 2018 .offset = 0x10000, 2019 .size = SZ_64K, 2020 }, 2021 }; 2022 2023 static const struct pci_epc_bar_rsvd_region tegra194_bar4_rsvd[] = { 2024 { 2025 /* DMA_CAP (BAR4: DMA Port Logic Structure) */ 2026 .type = PCI_EPC_BAR_RSVD_DMA_CTRL_MMIO, 2027 .offset = 0x0, 2028 .size = SZ_4K, 2029 }, 2030 }; 2031 2032 /* Tegra EP: BAR0 = 64-bit programmable BAR, BAR2 = 64-bit MSI-X table, BAR4 = 64-bit DMA regs. */ 2033 static const struct pci_epc_features tegra_pcie_epc_features = { 2034 DWC_EPC_COMMON_FEATURES, 2035 .linkup_notifier = true, 2036 .msi_capable = true, 2037 .bar[BAR_0] = { .only_64bit = true, }, 2038 .bar[BAR_2] = { 2039 .type = BAR_RESERVED, 2040 .only_64bit = true, 2041 .nr_rsvd_regions = ARRAY_SIZE(tegra194_bar2_rsvd), 2042 .rsvd_regions = tegra194_bar2_rsvd, 2043 }, 2044 .bar[BAR_4] = { 2045 .type = BAR_RESERVED, 2046 .only_64bit = true, 2047 .nr_rsvd_regions = ARRAY_SIZE(tegra194_bar4_rsvd), 2048 .rsvd_regions = tegra194_bar4_rsvd, 2049 }, 2050 .align = SZ_64K, 2051 }; 2052 2053 static const struct pci_epc_features* 2054 tegra_pcie_ep_get_features(struct dw_pcie_ep *ep) 2055 { 2056 return &tegra_pcie_epc_features; 2057 } 2058 2059 static const struct dw_pcie_ep_ops pcie_ep_ops = { 2060 .raise_irq = tegra_pcie_ep_raise_irq, 2061 .get_features = tegra_pcie_ep_get_features, 2062 }; 2063 2064 static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie, 2065 struct platform_device *pdev) 2066 { 2067 struct dw_pcie *pci = &pcie->pci; 2068 struct device *dev = pcie->dev; 2069 struct dw_pcie_ep *ep; 2070 char *name; 2071 int ret; 2072 2073 ep = &pci->ep; 2074 ep->ops = &pcie_ep_ops; 2075 2076 ep->page_size = SZ_64K; 2077 2078 ret = gpiod_set_debounce(pcie->pex_rst_gpiod, PERST_DEBOUNCE_TIME); 2079 if (ret < 0) { 2080 dev_err(dev, "Failed to set PERST GPIO debounce time: %d\n", 2081 ret); 2082 return ret; 2083 } 2084 2085 ret = gpiod_to_irq(pcie->pex_rst_gpiod); 2086 if (ret < 0) { 2087 dev_err(dev, "Failed to get IRQ for PERST GPIO: %d\n", ret); 2088 return ret; 2089 } 2090 pcie->pex_rst_irq = (unsigned int)ret; 2091 2092 name = devm_kasprintf(dev, GFP_KERNEL, "tegra_pcie_%u_pex_rst_irq", 2093 pcie->cid); 2094 if (!name) { 2095 dev_err(dev, "Failed to create PERST IRQ string\n"); 2096 return -ENOMEM; 2097 } 2098 2099 irq_set_status_flags(pcie->pex_rst_irq, IRQ_NOAUTOEN); 2100 2101 pcie->ep_state = EP_STATE_DISABLED; 2102 2103 ret = devm_request_threaded_irq(dev, pcie->pex_rst_irq, NULL, 2104 tegra_pcie_ep_pex_rst_irq, 2105 IRQF_TRIGGER_RISING | 2106 IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 2107 name, (void *)pcie); 2108 if (ret < 0) { 2109 dev_err(dev, "Failed to request IRQ for PERST: %d\n", ret); 2110 return ret; 2111 } 2112 2113 pm_runtime_enable(dev); 2114 2115 ret = dw_pcie_ep_init(ep); 2116 if (ret) { 2117 dev_err(dev, "Failed to initialize DWC Endpoint subsystem: %d\n", 2118 ret); 2119 pm_runtime_disable(dev); 2120 return ret; 2121 } 2122 2123 return 0; 2124 } 2125 2126 static int tegra_pcie_dw_probe(struct platform_device *pdev) 2127 { 2128 const struct tegra_pcie_dw_of_data *data; 2129 struct device *dev = &pdev->dev; 2130 struct resource *atu_dma_res; 2131 struct tegra_pcie_dw *pcie; 2132 struct dw_pcie_rp *pp; 2133 struct dw_pcie *pci; 2134 struct phy **phys; 2135 char *name; 2136 int ret; 2137 u32 i; 2138 2139 data = of_device_get_match_data(dev); 2140 2141 pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); 2142 if (!pcie) 2143 return -ENOMEM; 2144 2145 pci = &pcie->pci; 2146 pci->dev = &pdev->dev; 2147 pci->ops = &tegra_dw_pcie_ops; 2148 pcie->dev = &pdev->dev; 2149 pcie->of_data = (struct tegra_pcie_dw_of_data *)data; 2150 pci->n_fts[0] = pcie->of_data->n_fts[0]; 2151 pci->n_fts[1] = pcie->of_data->n_fts[1]; 2152 pp = &pci->pp; 2153 pp->num_vectors = MAX_MSI_IRQS; 2154 2155 ret = tegra_pcie_dw_parse_dt(pcie); 2156 if (ret < 0) { 2157 const char *level = KERN_ERR; 2158 2159 if (ret == -EPROBE_DEFER) 2160 level = KERN_DEBUG; 2161 2162 dev_printk(level, dev, 2163 dev_fmt("Failed to parse device tree: %d\n"), 2164 ret); 2165 return ret; 2166 } 2167 2168 ret = tegra_pcie_get_slot_regulators(pcie); 2169 if (ret < 0) { 2170 const char *level = KERN_ERR; 2171 2172 if (ret == -EPROBE_DEFER) 2173 level = KERN_DEBUG; 2174 2175 dev_printk(level, dev, 2176 dev_fmt("Failed to get slot regulators: %d\n"), 2177 ret); 2178 return ret; 2179 } 2180 2181 if (pcie->pex_refclk_sel_gpiod) 2182 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 1); 2183 2184 pcie->pex_ctl_supply = devm_regulator_get(dev, "vddio-pex-ctl"); 2185 if (IS_ERR(pcie->pex_ctl_supply)) { 2186 ret = PTR_ERR(pcie->pex_ctl_supply); 2187 if (ret != -EPROBE_DEFER) 2188 dev_err(dev, "Failed to get regulator: %ld\n", 2189 PTR_ERR(pcie->pex_ctl_supply)); 2190 return ret; 2191 } 2192 2193 pcie->core_clk = devm_clk_get(dev, "core"); 2194 if (IS_ERR(pcie->core_clk)) { 2195 dev_err(dev, "Failed to get core clock: %ld\n", 2196 PTR_ERR(pcie->core_clk)); 2197 return PTR_ERR(pcie->core_clk); 2198 } 2199 2200 pcie->core_clk_m = devm_clk_get_optional(dev, "core_m"); 2201 if (IS_ERR(pcie->core_clk_m)) 2202 return dev_err_probe(dev, PTR_ERR(pcie->core_clk_m), 2203 "Failed to get monitor clock\n"); 2204 2205 pcie->appl_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2206 "appl"); 2207 if (!pcie->appl_res) { 2208 dev_err(dev, "Failed to find \"appl\" region\n"); 2209 return -ENODEV; 2210 } 2211 2212 pcie->appl_base = devm_ioremap_resource(dev, pcie->appl_res); 2213 if (IS_ERR(pcie->appl_base)) 2214 return PTR_ERR(pcie->appl_base); 2215 2216 pcie->core_apb_rst = devm_reset_control_get(dev, "apb"); 2217 if (IS_ERR(pcie->core_apb_rst)) { 2218 dev_err(dev, "Failed to get APB reset: %ld\n", 2219 PTR_ERR(pcie->core_apb_rst)); 2220 return PTR_ERR(pcie->core_apb_rst); 2221 } 2222 2223 phys = devm_kcalloc(dev, pcie->phy_count, sizeof(*phys), GFP_KERNEL); 2224 if (!phys) 2225 return -ENOMEM; 2226 2227 for (i = 0; i < pcie->phy_count; i++) { 2228 name = kasprintf(GFP_KERNEL, "p2u-%u", i); 2229 if (!name) { 2230 dev_err(dev, "Failed to create P2U string\n"); 2231 return -ENOMEM; 2232 } 2233 phys[i] = devm_phy_get(dev, name); 2234 kfree(name); 2235 if (IS_ERR(phys[i])) { 2236 ret = PTR_ERR(phys[i]); 2237 if (ret != -EPROBE_DEFER) 2238 dev_err(dev, "Failed to get PHY: %d\n", ret); 2239 return ret; 2240 } 2241 } 2242 2243 pcie->phys = phys; 2244 2245 atu_dma_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2246 "atu_dma"); 2247 if (!atu_dma_res) { 2248 dev_err(dev, "Failed to find \"atu_dma\" region\n"); 2249 return -ENODEV; 2250 } 2251 pcie->atu_dma_res = atu_dma_res; 2252 2253 pci->atu_size = resource_size(atu_dma_res); 2254 pci->atu_base = devm_ioremap_resource(dev, atu_dma_res); 2255 if (IS_ERR(pci->atu_base)) 2256 return PTR_ERR(pci->atu_base); 2257 2258 pcie->core_rst = devm_reset_control_get(dev, "core"); 2259 if (IS_ERR(pcie->core_rst)) { 2260 dev_err(dev, "Failed to get core reset: %ld\n", 2261 PTR_ERR(pcie->core_rst)); 2262 return PTR_ERR(pcie->core_rst); 2263 } 2264 2265 pp->irq = platform_get_irq_byname(pdev, "intr"); 2266 if (pp->irq < 0) 2267 return pp->irq; 2268 2269 pcie->bpmp = tegra_bpmp_get(dev); 2270 if (IS_ERR(pcie->bpmp)) 2271 return PTR_ERR(pcie->bpmp); 2272 2273 platform_set_drvdata(pdev, pcie); 2274 2275 pcie->icc_path = devm_of_icc_get(&pdev->dev, "write"); 2276 ret = PTR_ERR_OR_ZERO(pcie->icc_path); 2277 if (ret) { 2278 tegra_bpmp_put(pcie->bpmp); 2279 dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n"); 2280 return ret; 2281 } 2282 2283 switch (pcie->of_data->mode) { 2284 case DW_PCIE_RC_TYPE: 2285 ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler, 2286 IRQF_SHARED, "tegra-pcie-intr", pcie); 2287 if (ret) { 2288 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2289 ret); 2290 goto fail; 2291 } 2292 2293 ret = tegra_pcie_config_rp(pcie); 2294 if (ret && ret != -ENOMEDIUM) 2295 goto fail; 2296 else 2297 return 0; 2298 break; 2299 2300 case DW_PCIE_EP_TYPE: 2301 ret = devm_request_threaded_irq(dev, pp->irq, 2302 tegra_pcie_ep_hard_irq, 2303 tegra_pcie_ep_irq_thread, 2304 IRQF_SHARED, 2305 "tegra-pcie-ep-intr", pcie); 2306 if (ret) { 2307 dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, 2308 ret); 2309 goto fail; 2310 } 2311 2312 ret = tegra_pcie_config_ep(pcie, pdev); 2313 if (ret < 0) 2314 goto fail; 2315 else 2316 return 0; 2317 break; 2318 2319 default: 2320 dev_err(dev, "Invalid PCIe device type %d\n", 2321 pcie->of_data->mode); 2322 ret = -EINVAL; 2323 } 2324 2325 fail: 2326 tegra_bpmp_put(pcie->bpmp); 2327 return ret; 2328 } 2329 2330 static void tegra_pcie_dw_remove(struct platform_device *pdev) 2331 { 2332 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2333 struct dw_pcie_ep *ep = &pcie->pci.ep; 2334 2335 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { 2336 if (!pcie->link_state) 2337 return; 2338 2339 debugfs_remove_recursive(pcie->debugfs); 2340 tegra_pcie_deinit_controller(pcie); 2341 pm_runtime_put_sync(pcie->dev); 2342 } else { 2343 disable_irq(pcie->pex_rst_irq); 2344 pex_ep_event_pex_rst_assert(pcie); 2345 dw_pcie_ep_deinit(ep); 2346 } 2347 2348 pm_runtime_disable(pcie->dev); 2349 tegra_bpmp_put(pcie->bpmp); 2350 if (pcie->pex_refclk_sel_gpiod) 2351 gpiod_set_value(pcie->pex_refclk_sel_gpiod, 0); 2352 } 2353 2354 static int tegra_pcie_dw_suspend(struct device *dev) 2355 { 2356 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2357 2358 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 2359 if (pcie->ep_state == EP_STATE_ENABLED) { 2360 dev_err(dev, "Tegra PCIe is in EP mode, suspend not allowed\n"); 2361 return -EPERM; 2362 } 2363 2364 disable_irq(pcie->pex_rst_irq); 2365 return 0; 2366 } 2367 2368 return 0; 2369 } 2370 2371 static int tegra_pcie_dw_suspend_late(struct device *dev) 2372 { 2373 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2374 u32 val; 2375 2376 if (!pcie->link_state) 2377 return 0; 2378 2379 /* Enable HW_HOT_RST mode */ 2380 if (!pcie->of_data->has_sbr_reset_fix) { 2381 val = appl_readl(pcie, APPL_CTRL); 2382 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2383 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2384 val |= APPL_CTRL_HW_HOT_RST_EN; 2385 appl_writel(pcie, val, APPL_CTRL); 2386 } 2387 2388 return 0; 2389 } 2390 2391 static int tegra_pcie_dw_suspend_noirq(struct device *dev) 2392 { 2393 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2394 2395 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) 2396 return 0; 2397 2398 if (!pcie->link_state) 2399 return 0; 2400 2401 clk_disable_unprepare(pcie->core_clk_m); 2402 tegra_pcie_dw_pme_turnoff(pcie); 2403 tegra_pcie_unconfig_controller(pcie); 2404 2405 return 0; 2406 } 2407 2408 static int tegra_pcie_dw_resume_noirq(struct device *dev) 2409 { 2410 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2411 int ret; 2412 2413 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) 2414 return 0; 2415 2416 if (!pcie->link_state) 2417 return 0; 2418 2419 ret = tegra_pcie_config_controller(pcie, true); 2420 if (ret < 0) 2421 return ret; 2422 2423 ret = tegra_pcie_dw_host_init(&pcie->pci.pp); 2424 if (ret < 0) { 2425 dev_err(dev, "Failed to init host: %d\n", ret); 2426 goto fail_host_init; 2427 } 2428 2429 dw_pcie_setup_rc(&pcie->pci.pp); 2430 2431 ret = tegra_pcie_dw_start_link(&pcie->pci); 2432 if (ret < 0) 2433 goto fail_host_init; 2434 2435 return 0; 2436 2437 fail_host_init: 2438 tegra_pcie_unconfig_controller(pcie); 2439 return ret; 2440 } 2441 2442 static int tegra_pcie_dw_resume_early(struct device *dev) 2443 { 2444 struct tegra_pcie_dw *pcie = dev_get_drvdata(dev); 2445 u32 val; 2446 2447 if (pcie->of_data->mode == DW_PCIE_EP_TYPE) { 2448 enable_irq(pcie->pex_rst_irq); 2449 return 0; 2450 } 2451 2452 if (!pcie->link_state) 2453 return 0; 2454 2455 /* Disable HW_HOT_RST mode */ 2456 if (!pcie->of_data->has_sbr_reset_fix) { 2457 val = appl_readl(pcie, APPL_CTRL); 2458 val &= ~(APPL_CTRL_HW_HOT_RST_MODE_MASK << 2459 APPL_CTRL_HW_HOT_RST_MODE_SHIFT); 2460 val |= APPL_CTRL_HW_HOT_RST_MODE_IMDT_RST << 2461 APPL_CTRL_HW_HOT_RST_MODE_SHIFT; 2462 val &= ~APPL_CTRL_HW_HOT_RST_EN; 2463 appl_writel(pcie, val, APPL_CTRL); 2464 } 2465 2466 return 0; 2467 } 2468 2469 static void tegra_pcie_dw_shutdown(struct platform_device *pdev) 2470 { 2471 struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev); 2472 2473 if (pcie->of_data->mode == DW_PCIE_RC_TYPE) { 2474 if (!pcie->link_state) 2475 return; 2476 2477 debugfs_remove_recursive(pcie->debugfs); 2478 2479 disable_irq(pcie->pci.pp.irq); 2480 if (IS_ENABLED(CONFIG_PCI_MSI)) 2481 disable_irq(pcie->pci.pp.msi_irq[0]); 2482 2483 tegra_pcie_dw_pme_turnoff(pcie); 2484 tegra_pcie_unconfig_controller(pcie); 2485 pm_runtime_put_sync(pcie->dev); 2486 } else { 2487 disable_irq(pcie->pex_rst_irq); 2488 pex_ep_event_pex_rst_assert(pcie); 2489 } 2490 } 2491 2492 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_rc_of_data = { 2493 .version = TEGRA194_DWC_IP_VER, 2494 .mode = DW_PCIE_RC_TYPE, 2495 .cdm_chk_int_en_bit = BIT(19), 2496 /* Gen4 - 5, 6, 8 and 9 presets enabled */ 2497 .gen4_preset_vec = 0x360, 2498 .n_fts = { 52, 52 }, 2499 }; 2500 2501 static const struct tegra_pcie_dw_of_data tegra194_pcie_dw_ep_of_data = { 2502 .version = TEGRA194_DWC_IP_VER, 2503 .mode = DW_PCIE_EP_TYPE, 2504 .cdm_chk_int_en_bit = BIT(19), 2505 /* Gen4 - 5, 6, 8 and 9 presets enabled */ 2506 .gen4_preset_vec = 0x360, 2507 .n_fts = { 52, 52 }, 2508 }; 2509 2510 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_rc_of_data = { 2511 .version = TEGRA234_DWC_IP_VER, 2512 .mode = DW_PCIE_RC_TYPE, 2513 .has_msix_doorbell_access_fix = true, 2514 .has_sbr_reset_fix = true, 2515 .has_l1ss_exit_fix = true, 2516 .cdm_chk_int_en_bit = BIT(18), 2517 /* Gen4 - 6, 8 and 9 presets enabled */ 2518 .gen4_preset_vec = 0x340, 2519 .n_fts = { 52, 80 }, 2520 }; 2521 2522 static const struct tegra_pcie_dw_of_data tegra234_pcie_dw_ep_of_data = { 2523 .version = TEGRA234_DWC_IP_VER, 2524 .mode = DW_PCIE_EP_TYPE, 2525 .has_l1ss_exit_fix = true, 2526 .has_ltr_req_fix = true, 2527 .disable_l1_2 = true, 2528 .cdm_chk_int_en_bit = BIT(18), 2529 /* Gen4 - 6, 8 and 9 presets enabled */ 2530 .gen4_preset_vec = 0x340, 2531 .n_fts = { 52, 80 }, 2532 }; 2533 2534 static const struct of_device_id tegra_pcie_dw_of_match[] = { 2535 { 2536 .compatible = "nvidia,tegra194-pcie", 2537 .data = &tegra194_pcie_dw_rc_of_data, 2538 }, 2539 { 2540 .compatible = "nvidia,tegra194-pcie-ep", 2541 .data = &tegra194_pcie_dw_ep_of_data, 2542 }, 2543 { 2544 .compatible = "nvidia,tegra234-pcie", 2545 .data = &tegra234_pcie_dw_rc_of_data, 2546 }, 2547 { 2548 .compatible = "nvidia,tegra234-pcie-ep", 2549 .data = &tegra234_pcie_dw_ep_of_data, 2550 }, 2551 {} 2552 }; 2553 2554 static const struct dev_pm_ops tegra_pcie_dw_pm_ops = { 2555 .suspend = tegra_pcie_dw_suspend, 2556 .suspend_late = tegra_pcie_dw_suspend_late, 2557 .suspend_noirq = tegra_pcie_dw_suspend_noirq, 2558 .resume_noirq = tegra_pcie_dw_resume_noirq, 2559 .resume_early = tegra_pcie_dw_resume_early, 2560 }; 2561 2562 static struct platform_driver tegra_pcie_dw_driver = { 2563 .probe = tegra_pcie_dw_probe, 2564 .remove = tegra_pcie_dw_remove, 2565 .shutdown = tegra_pcie_dw_shutdown, 2566 .driver = { 2567 .name = "tegra194-pcie", 2568 .pm = &tegra_pcie_dw_pm_ops, 2569 .of_match_table = tegra_pcie_dw_of_match, 2570 }, 2571 }; 2572 module_platform_driver(tegra_pcie_dw_driver); 2573 2574 MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match); 2575 2576 MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>"); 2577 MODULE_DESCRIPTION("NVIDIA PCIe host controller driver"); 2578 MODULE_LICENSE("GPL v2"); 2579