1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2009 - 2019 Broadcom */
3 
4 #include <linux/bitfield.h>
5 #include <linux/bitops.h>
6 #include <linux/clk.h>
7 #include <linux/compiler.h>
8 #include <linux/delay.h>
9 #include <linux/init.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/ioport.h>
14 #include <linux/irqchip/chained_irq.h>
15 #include <linux/irqdomain.h>
16 #include <linux/kernel.h>
17 #include <linux/list.h>
18 #include <linux/log2.h>
19 #include <linux/module.h>
20 #include <linux/msi.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_pci.h>
24 #include <linux/of_platform.h>
25 #include <linux/pci.h>
26 #include <linux/pci-ecam.h>
27 #include <linux/printk.h>
28 #include <linux/regulator/consumer.h>
29 #include <linux/reset.h>
30 #include <linux/sizes.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/types.h>
34 
35 #include "../pci.h"
36 
37 /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */
38 #define BRCM_PCIE_CAP_REGS				0x00ac
39 
40 /* Broadcom STB PCIe Register Offsets */
41 #define PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1				0x0188
42 #define  PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK	0xc
43 #define  PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN			0x0
44 
45 #define PCIE_RC_CFG_PRIV1_ID_VAL3			0x043c
46 #define  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK	0xffffff
47 
48 #define PCIE_RC_CFG_PRIV1_LINK_CAPABILITY			0x04dc
49 #define  PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK	0xc00
50 
51 #define PCIE_RC_CFG_PRIV1_ROOT_CAP			0x4f8
52 #define  PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK	0xf8
53 
54 #define PCIE_RC_DL_MDIO_ADDR				0x1100
55 #define PCIE_RC_DL_MDIO_WR_DATA				0x1104
56 #define PCIE_RC_DL_MDIO_RD_DATA				0x1108
57 
58 #define PCIE_RC_PL_PHY_CTL_15				0x184c
59 #define  PCIE_RC_PL_PHY_CTL_15_DIS_PLL_PD_MASK		0x400000
60 #define  PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK	0xff
61 
62 #define PCIE_MISC_MISC_CTRL				0x4008
63 #define  PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK	0x80
64 #define  PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK	0x400
65 #define  PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK		0x1000
66 #define  PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK	0x2000
67 #define  PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK	0x300000
68 
69 #define  PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK		0xf8000000
70 #define  PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK		0x07c00000
71 #define  PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK		0x0000001f
72 #define  SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
73 
74 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO		0x400c
75 #define PCIE_MEM_WIN0_LO(win)	\
76 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO + ((win) * 8)
77 
78 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI		0x4010
79 #define PCIE_MEM_WIN0_HI(win)	\
80 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_HI + ((win) * 8)
81 
82 /*
83  * NOTE: You may see the term "BAR" in a number of register names used by
84  *   this driver.  The term is an artifact of when the HW core was an
85  *   endpoint device (EP).  Now it is a root complex (RC) and anywhere a
86  *   register has the term "BAR" it is related to an inbound window.
87  */
88 
89 #define PCIE_BRCM_MAX_INBOUND_WINS			16
90 #define PCIE_MISC_RC_BAR1_CONFIG_LO			0x402c
91 #define  PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK		0x1f
92 
93 #define PCIE_MISC_RC_BAR4_CONFIG_LO			0x40d4
94 
95 
96 #define PCIE_MISC_MSI_BAR_CONFIG_LO			0x4044
97 #define PCIE_MISC_MSI_BAR_CONFIG_HI			0x4048
98 
99 #define PCIE_MISC_MSI_DATA_CONFIG			0x404c
100 #define  PCIE_MISC_MSI_DATA_CONFIG_VAL_32		0xffe06540
101 #define  PCIE_MISC_MSI_DATA_CONFIG_VAL_8		0xfff86540
102 
103 #define PCIE_MISC_PCIE_CTRL				0x4064
104 #define  PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK	0x1
105 #define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK		0x4
106 
107 #define PCIE_MISC_PCIE_STATUS				0x4068
108 #define  PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK		0x80
109 #define  PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK	0x20
110 #define  PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK	0x10
111 #define  PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK	0x40
112 
113 #define PCIE_MISC_REVISION				0x406c
114 #define  BRCM_PCIE_HW_REV_33				0x0303
115 #define  BRCM_PCIE_HW_REV_3_20				0x0320
116 
117 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT		0x4070
118 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK	0xfff00000
119 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK	0xfff0
120 #define PCIE_MEM_WIN0_BASE_LIMIT(win)	\
121 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT + ((win) * 4)
122 
123 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI			0x4080
124 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK	0xff
125 #define PCIE_MEM_WIN0_BASE_HI(win)	\
126 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI + ((win) * 8)
127 
128 #define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI			0x4084
129 #define  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK	0xff
130 #define PCIE_MEM_WIN0_LIMIT_HI(win)	\
131 		PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI + ((win) * 8)
132 
133 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK	0x2
134 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK		0x200000
135 #define  PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x08000000
136 #define  PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK		0x00800000
137 #define  PCIE_CLKREQ_MASK \
138 	  (PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK | \
139 	   PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK)
140 
141 #define PCIE_MISC_UBUS_BAR1_CONFIG_REMAP			0x40ac
142 #define  PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK	BIT(0)
143 #define PCIE_MISC_UBUS_BAR4_CONFIG_REMAP			0x410c
144 
145 #define PCIE_MSI_INTR2_BASE		0x4500
146 
147 /* Offsets from INTR2_CPU and MSI_INTR2 BASE offsets */
148 #define  MSI_INT_STATUS			0x0
149 #define  MSI_INT_CLR			0x8
150 #define  MSI_INT_MASK_SET		0x10
151 #define  MSI_INT_MASK_CLR		0x14
152 
153 #define  PCIE_RGR1_SW_INIT_1_PERST_MASK			0x1
154 #define  PCIE_RGR1_SW_INIT_1_PERST_SHIFT		0x0
155 
156 #define RGR1_SW_INIT_1_INIT_GENERIC_MASK		0x2
157 #define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT		0x1
158 #define RGR1_SW_INIT_1_INIT_7278_MASK			0x1
159 #define RGR1_SW_INIT_1_INIT_7278_SHIFT			0x0
160 
161 /* PCIe parameters */
162 #define BRCM_NUM_PCIE_OUT_WINS		0x4
163 #define BRCM_INT_PCI_MSI_NR		32
164 #define BRCM_INT_PCI_MSI_LEGACY_NR	8
165 #define BRCM_INT_PCI_MSI_SHIFT		0
166 #define BRCM_INT_PCI_MSI_MASK		GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0)
167 #define BRCM_INT_PCI_MSI_LEGACY_MASK	GENMASK(31, \
168 						32 - BRCM_INT_PCI_MSI_LEGACY_NR)
169 
170 /* MSI target addresses */
171 #define BRCM_MSI_TARGET_ADDR_LT_4GB	0x0fffffffcULL
172 #define BRCM_MSI_TARGET_ADDR_GT_4GB	0xffffffffcULL
173 
174 /* MDIO registers */
175 #define MDIO_PORT0			0x0
176 #define MDIO_DATA_MASK			0x7fffffff
177 #define MDIO_PORT_MASK			0xf0000
178 #define MDIO_PORT_EXT_MASK		0x200000
179 #define MDIO_REGAD_MASK			0xffff
180 #define MDIO_CMD_MASK			0x00100000
181 #define MDIO_CMD_READ			0x1
182 #define MDIO_CMD_WRITE			0x0
183 #define MDIO_DATA_DONE_MASK		0x80000000
184 #define MDIO_RD_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 1 : 0)
185 #define MDIO_WT_DONE(x)			(((x) & MDIO_DATA_DONE_MASK) ? 0 : 1)
186 #define SSC_REGS_ADDR			0x1100
187 #define SET_ADDR_OFFSET			0x1f
188 #define SSC_CNTL_OFFSET			0x2
189 #define SSC_CNTL_OVRD_EN_MASK		0x8000
190 #define SSC_CNTL_OVRD_VAL_MASK		0x4000
191 #define SSC_STATUS_OFFSET		0x1
192 #define SSC_STATUS_SSC_MASK		0x400
193 #define SSC_STATUS_PLL_LOCK_MASK	0x800
194 #define PCIE_BRCM_MAX_MEMC		3
195 
196 #define IDX_ADDR(pcie)			((pcie)->cfg->offsets[EXT_CFG_INDEX])
197 #define DATA_ADDR(pcie)			((pcie)->cfg->offsets[EXT_CFG_DATA])
198 #define PCIE_RGR1_SW_INIT_1(pcie)	((pcie)->cfg->offsets[RGR1_SW_INIT_1])
199 #define HARD_DEBUG(pcie)		((pcie)->cfg->offsets[PCIE_HARD_DEBUG])
200 #define INTR2_CPU_BASE(pcie)		((pcie)->cfg->offsets[PCIE_INTR2_CPU_BASE])
201 
202 /* Rescal registers */
203 #define PCIE_DVT_PMU_PCIE_PHY_CTRL				0xc700
204 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS			0x3
205 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK		0x4
206 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT	0x2
207 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK		0x2
208 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT		0x1
209 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK		0x1
210 #define  PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT		0x0
211 
212 /* Forward declarations */
213 struct brcm_pcie;
214 
215 enum {
216 	RGR1_SW_INIT_1,
217 	EXT_CFG_INDEX,
218 	EXT_CFG_DATA,
219 	PCIE_HARD_DEBUG,
220 	PCIE_INTR2_CPU_BASE,
221 };
222 
223 enum pcie_soc_base {
224 	GENERIC,
225 	BCM2711,
226 	BCM4908,
227 	BCM7278,
228 	BCM7425,
229 	BCM7435,
230 	BCM7712,
231 };
232 
233 struct inbound_win {
234 	u64 size;
235 	u64 pci_offset;
236 	u64 cpu_addr;
237 };
238 
239 /*
240  * The RESCAL block is tied to PCIe controller #1, regardless of the number of
241  * controllers, and turning off PCIe controller #1 prevents access to the RESCAL
242  * register blocks, therefore no other controller can access this register
243  * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB),
244  * or a hang (AXI).
245  */
246 #define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN		BIT(0)
247 
248 struct pcie_cfg_data {
249 	const int *offsets;
250 	const enum pcie_soc_base soc_base;
251 	const bool has_phy;
252 	const u32 quirks;
253 	u8 num_inbound_wins;
254 	int (*perst_set)(struct brcm_pcie *pcie, u32 val);
255 	int (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
256 	int (*post_setup)(struct brcm_pcie *pcie);
257 };
258 
259 struct subdev_regulators {
260 	unsigned int num_supplies;
261 	struct regulator_bulk_data supplies[];
262 };
263 
264 struct brcm_msi {
265 	struct device		*dev;
266 	void __iomem		*base;
267 	struct device_node	*np;
268 	struct irq_domain	*msi_domain;
269 	struct irq_domain	*inner_domain;
270 	struct mutex		lock; /* guards the alloc/free operations */
271 	u64			target_addr;
272 	int			irq;
273 	DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR);
274 	bool			legacy;
275 	/* Some chips have MSIs in bits [31..24] of a shared register. */
276 	int			legacy_shift;
277 	int			nr; /* No. of MSI available, depends on chip */
278 	/* This is the base pointer for interrupt status/set/clr regs */
279 	void __iomem		*intr_base;
280 };
281 
282 /* Internal PCIe Host Controller Information.*/
283 struct brcm_pcie {
284 	struct device		*dev;
285 	void __iomem		*base;
286 	struct clk		*clk;
287 	struct device_node	*np;
288 	bool			ssc;
289 	int			gen;
290 	u64			msi_target_addr;
291 	struct brcm_msi		*msi;
292 	struct reset_control	*rescal;
293 	struct reset_control	*perst_reset;
294 	struct reset_control	*bridge_reset;
295 	struct reset_control	*swinit_reset;
296 	int			num_memc;
297 	u64			memc_size[PCIE_BRCM_MAX_MEMC];
298 	u32			hw_rev;
299 	struct subdev_regulators *sr;
300 	bool			ep_wakeup_capable;
301 	const struct pcie_cfg_data	*cfg;
302 };
303 
304 static inline bool is_bmips(const struct brcm_pcie *pcie)
305 {
306 	return pcie->cfg->soc_base == BCM7435 || pcie->cfg->soc_base == BCM7425;
307 }
308 
309 /*
310  * This is to convert the size of the inbound "BAR" region to the
311  * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
312  */
313 static int brcm_pcie_encode_ibar_size(u64 size)
314 {
315 	int log2_in = ilog2(size);
316 
317 	if (log2_in >= 12 && log2_in <= 15)
318 		/* Covers 4KB to 32KB (inclusive) */
319 		return (log2_in - 12) + 0x1c;
320 	else if (log2_in >= 16 && log2_in <= 36)
321 		/* Covers 64KB to 64GB, (inclusive) */
322 		return log2_in - 15;
323 	/* Something is awry so disable */
324 	return 0;
325 }
326 
327 static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
328 {
329 	u32 pkt = 0;
330 
331 	pkt |= FIELD_PREP(MDIO_PORT_EXT_MASK, port >> 4);
332 	pkt |= FIELD_PREP(MDIO_PORT_MASK, port);
333 	pkt |= FIELD_PREP(MDIO_REGAD_MASK, regad);
334 	pkt |= FIELD_PREP(MDIO_CMD_MASK, cmd);
335 
336 	return pkt;
337 }
338 
339 /* negative return value indicates error */
340 static int brcm_pcie_mdio_read(void __iomem *base, u8 port, u8 regad, u32 *val)
341 {
342 	u32 data;
343 	int err;
344 
345 	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_READ),
346 		   base + PCIE_RC_DL_MDIO_ADDR);
347 	readl(base + PCIE_RC_DL_MDIO_ADDR);
348 	err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_RD_DATA, data,
349 					MDIO_RD_DONE(data), 10, 100);
350 	*val = FIELD_GET(MDIO_DATA_MASK, data);
351 
352 	return err;
353 }
354 
355 /* negative return value indicates error */
356 static int brcm_pcie_mdio_write(void __iomem *base, u8 port,
357 				u8 regad, u16 wrdata)
358 {
359 	u32 data;
360 	int err;
361 
362 	writel(brcm_pcie_mdio_form_pkt(port, regad, MDIO_CMD_WRITE),
363 		   base + PCIE_RC_DL_MDIO_ADDR);
364 	readl(base + PCIE_RC_DL_MDIO_ADDR);
365 	writel(MDIO_DATA_DONE_MASK | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
366 
367 	err = readl_poll_timeout_atomic(base + PCIE_RC_DL_MDIO_WR_DATA, data,
368 					MDIO_WT_DONE(data), 10, 100);
369 	return err;
370 }
371 
372 /*
373  * Configures device for Spread Spectrum Clocking (SSC) mode; a negative
374  * return value indicates error.
375  */
376 static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
377 {
378 	int pll, ssc;
379 	int ret;
380 	u32 tmp;
381 
382 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET,
383 				   SSC_REGS_ADDR);
384 	if (ret < 0)
385 		return ret;
386 
387 	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
388 				  SSC_CNTL_OFFSET, &tmp);
389 	if (ret < 0)
390 		return ret;
391 
392 	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_EN_MASK);
393 	u32p_replace_bits(&tmp, 1, SSC_CNTL_OVRD_VAL_MASK);
394 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0,
395 				   SSC_CNTL_OFFSET, tmp);
396 	if (ret < 0)
397 		return ret;
398 
399 	usleep_range(1000, 2000);
400 	ret = brcm_pcie_mdio_read(pcie->base, MDIO_PORT0,
401 				  SSC_STATUS_OFFSET, &tmp);
402 	if (ret < 0)
403 		return ret;
404 
405 	ssc = FIELD_GET(SSC_STATUS_SSC_MASK, tmp);
406 	pll = FIELD_GET(SSC_STATUS_PLL_LOCK_MASK, tmp);
407 
408 	return ssc && pll ? 0 : -EIO;
409 }
410 
411 /* Limits operation to a specific generation (1, 2, or 3) */
412 static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
413 {
414 	u16 lnkctl2 = readw(pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
415 	u32 lnkcap = readl(pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
416 
417 	u32p_replace_bits(&lnkcap, gen, PCI_EXP_LNKCAP_SLS);
418 	writel(lnkcap, pcie->base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
419 
420 	u16p_replace_bits(&lnkctl2, gen, PCI_EXP_LNKCTL2_TLS);
421 	writew(lnkctl2, pcie->base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKCTL2);
422 }
423 
424 static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
425 				       u8 win, u64 cpu_addr,
426 				       u64 pcie_addr, u64 size)
427 {
428 	u32 cpu_addr_mb_high, limit_addr_mb_high;
429 	phys_addr_t cpu_addr_mb, limit_addr_mb;
430 	int high_addr_shift;
431 	u32 tmp;
432 
433 	/* Set the base of the pcie_addr window */
434 	writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
435 	writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
436 
437 	/* Write the addr base & limit lower bits (in MBs) */
438 	cpu_addr_mb = cpu_addr / SZ_1M;
439 	limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
440 
441 	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
442 	u32p_replace_bits(&tmp, cpu_addr_mb,
443 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
444 	u32p_replace_bits(&tmp, limit_addr_mb,
445 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
446 	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
447 
448 	if (is_bmips(pcie))
449 		return;
450 
451 	/* Write the cpu & limit addr upper bits */
452 	high_addr_shift =
453 		HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
454 
455 	cpu_addr_mb_high = cpu_addr_mb >> high_addr_shift;
456 	tmp = readl(pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
457 	u32p_replace_bits(&tmp, cpu_addr_mb_high,
458 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_HI_BASE_MASK);
459 	writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_HI(win));
460 
461 	limit_addr_mb_high = limit_addr_mb >> high_addr_shift;
462 	tmp = readl(pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
463 	u32p_replace_bits(&tmp, limit_addr_mb_high,
464 			  PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LIMIT_HI_LIMIT_MASK);
465 	writel(tmp, pcie->base + PCIE_MEM_WIN0_LIMIT_HI(win));
466 }
467 
468 static struct irq_chip brcm_msi_irq_chip = {
469 	.name            = "BRCM STB PCIe MSI",
470 	.irq_ack         = irq_chip_ack_parent,
471 	.irq_mask        = pci_msi_mask_irq,
472 	.irq_unmask      = pci_msi_unmask_irq,
473 };
474 
475 static struct msi_domain_info brcm_msi_domain_info = {
476 	.flags	= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
477 		  MSI_FLAG_NO_AFFINITY | MSI_FLAG_MULTI_PCI_MSI,
478 	.chip	= &brcm_msi_irq_chip,
479 };
480 
481 static void brcm_pcie_msi_isr(struct irq_desc *desc)
482 {
483 	struct irq_chip *chip = irq_desc_get_chip(desc);
484 	unsigned long status;
485 	struct brcm_msi *msi;
486 	struct device *dev;
487 	u32 bit;
488 
489 	chained_irq_enter(chip, desc);
490 	msi = irq_desc_get_handler_data(desc);
491 	dev = msi->dev;
492 
493 	status = readl(msi->intr_base + MSI_INT_STATUS);
494 	status >>= msi->legacy_shift;
495 
496 	for_each_set_bit(bit, &status, msi->nr) {
497 		int ret;
498 		ret = generic_handle_domain_irq(msi->inner_domain, bit);
499 		if (ret)
500 			dev_dbg(dev, "unexpected MSI\n");
501 	}
502 
503 	chained_irq_exit(chip, desc);
504 }
505 
506 static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
507 {
508 	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
509 
510 	msg->address_lo = lower_32_bits(msi->target_addr);
511 	msg->address_hi = upper_32_bits(msi->target_addr);
512 	msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
513 }
514 
515 static void brcm_msi_ack_irq(struct irq_data *data)
516 {
517 	struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
518 	const int shift_amt = data->hwirq + msi->legacy_shift;
519 
520 	writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
521 }
522 
523 
524 static struct irq_chip brcm_msi_bottom_irq_chip = {
525 	.name			= "BRCM STB MSI",
526 	.irq_compose_msi_msg	= brcm_msi_compose_msi_msg,
527 	.irq_ack                = brcm_msi_ack_irq,
528 };
529 
530 static int brcm_msi_alloc(struct brcm_msi *msi, unsigned int nr_irqs)
531 {
532 	int hwirq;
533 
534 	mutex_lock(&msi->lock);
535 	hwirq = bitmap_find_free_region(msi->used, msi->nr,
536 					order_base_2(nr_irqs));
537 	mutex_unlock(&msi->lock);
538 
539 	return hwirq;
540 }
541 
542 static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq,
543 			  unsigned int nr_irqs)
544 {
545 	mutex_lock(&msi->lock);
546 	bitmap_release_region(msi->used, hwirq, order_base_2(nr_irqs));
547 	mutex_unlock(&msi->lock);
548 }
549 
550 static int brcm_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
551 				 unsigned int nr_irqs, void *args)
552 {
553 	struct brcm_msi *msi = domain->host_data;
554 	int hwirq, i;
555 
556 	hwirq = brcm_msi_alloc(msi, nr_irqs);
557 
558 	if (hwirq < 0)
559 		return hwirq;
560 
561 	for (i = 0; i < nr_irqs; i++)
562 		irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,
563 				    &brcm_msi_bottom_irq_chip, domain->host_data,
564 				    handle_edge_irq, NULL, NULL);
565 	return 0;
566 }
567 
568 static void brcm_irq_domain_free(struct irq_domain *domain,
569 				 unsigned int virq, unsigned int nr_irqs)
570 {
571 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
572 	struct brcm_msi *msi = irq_data_get_irq_chip_data(d);
573 
574 	brcm_msi_free(msi, d->hwirq, nr_irqs);
575 }
576 
577 static const struct irq_domain_ops msi_domain_ops = {
578 	.alloc	= brcm_irq_domain_alloc,
579 	.free	= brcm_irq_domain_free,
580 };
581 
582 static int brcm_allocate_domains(struct brcm_msi *msi)
583 {
584 	struct fwnode_handle *fwnode = of_fwnode_handle(msi->np);
585 	struct device *dev = msi->dev;
586 
587 	msi->inner_domain = irq_domain_create_linear(NULL, msi->nr, &msi_domain_ops, msi);
588 	if (!msi->inner_domain) {
589 		dev_err(dev, "failed to create IRQ domain\n");
590 		return -ENOMEM;
591 	}
592 
593 	msi->msi_domain = pci_msi_create_irq_domain(fwnode,
594 						    &brcm_msi_domain_info,
595 						    msi->inner_domain);
596 	if (!msi->msi_domain) {
597 		dev_err(dev, "failed to create MSI domain\n");
598 		irq_domain_remove(msi->inner_domain);
599 		return -ENOMEM;
600 	}
601 
602 	return 0;
603 }
604 
605 static void brcm_free_domains(struct brcm_msi *msi)
606 {
607 	irq_domain_remove(msi->msi_domain);
608 	irq_domain_remove(msi->inner_domain);
609 }
610 
611 static void brcm_msi_remove(struct brcm_pcie *pcie)
612 {
613 	struct brcm_msi *msi = pcie->msi;
614 
615 	if (!msi)
616 		return;
617 	irq_set_chained_handler_and_data(msi->irq, NULL, NULL);
618 	brcm_free_domains(msi);
619 }
620 
621 static void brcm_msi_set_regs(struct brcm_msi *msi)
622 {
623 	u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK :
624 				BRCM_INT_PCI_MSI_MASK;
625 
626 	writel(val, msi->intr_base + MSI_INT_MASK_CLR);
627 	writel(val, msi->intr_base + MSI_INT_CLR);
628 
629 	/*
630 	 * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
631 	 * enable, which we set to 1.
632 	 */
633 	writel(lower_32_bits(msi->target_addr) | 0x1,
634 	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
635 	writel(upper_32_bits(msi->target_addr),
636 	       msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
637 
638 	val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
639 	writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
640 }
641 
642 static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
643 {
644 	struct brcm_msi *msi;
645 	int irq, ret;
646 	struct device *dev = pcie->dev;
647 
648 	irq = irq_of_parse_and_map(dev->of_node, 1);
649 	if (irq <= 0) {
650 		dev_err(dev, "cannot map MSI interrupt\n");
651 		return -ENODEV;
652 	}
653 
654 	msi = devm_kzalloc(dev, sizeof(struct brcm_msi), GFP_KERNEL);
655 	if (!msi)
656 		return -ENOMEM;
657 
658 	mutex_init(&msi->lock);
659 	msi->dev = dev;
660 	msi->base = pcie->base;
661 	msi->np = pcie->np;
662 	msi->target_addr = pcie->msi_target_addr;
663 	msi->irq = irq;
664 	msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
665 
666 	/*
667 	 * Sanity check to make sure that the 'used' bitmap in struct brcm_msi
668 	 * is large enough.
669 	 */
670 	BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
671 
672 	if (msi->legacy) {
673 		msi->intr_base = msi->base + INTR2_CPU_BASE(pcie);
674 		msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
675 		msi->legacy_shift = 24;
676 	} else {
677 		msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
678 		msi->nr = BRCM_INT_PCI_MSI_NR;
679 		msi->legacy_shift = 0;
680 	}
681 
682 	ret = brcm_allocate_domains(msi);
683 	if (ret)
684 		return ret;
685 
686 	irq_set_chained_handler_and_data(msi->irq, brcm_pcie_msi_isr, msi);
687 
688 	brcm_msi_set_regs(msi);
689 	pcie->msi = msi;
690 
691 	return 0;
692 }
693 
694 /* The controller is capable of serving in both RC and EP roles */
695 static bool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
696 {
697 	void __iomem *base = pcie->base;
698 	u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
699 
700 	return !!FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK, val);
701 }
702 
703 static bool brcm_pcie_link_up(struct brcm_pcie *pcie)
704 {
705 	u32 val = readl(pcie->base + PCIE_MISC_PCIE_STATUS);
706 	u32 dla = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_DL_ACTIVE_MASK, val);
707 	u32 plu = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK, val);
708 
709 	return dla && plu;
710 }
711 
712 static void __iomem *brcm_pcie_map_bus(struct pci_bus *bus,
713 				       unsigned int devfn, int where)
714 {
715 	struct brcm_pcie *pcie = bus->sysdata;
716 	void __iomem *base = pcie->base;
717 	int idx;
718 
719 	/* Accesses to the RC go right to the RC registers if !devfn */
720 	if (pci_is_root_bus(bus))
721 		return devfn ? NULL : base + PCIE_ECAM_REG(where);
722 
723 	/* An access to our HW w/o link-up will cause a CPU Abort */
724 	if (!brcm_pcie_link_up(pcie))
725 		return NULL;
726 
727 	/* For devices, write to the config space index register */
728 	idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
729 	writel(idx, base + IDX_ADDR(pcie));
730 	return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);
731 }
732 
733 static void __iomem *brcm7425_pcie_map_bus(struct pci_bus *bus,
734 					   unsigned int devfn, int where)
735 {
736 	struct brcm_pcie *pcie = bus->sysdata;
737 	void __iomem *base = pcie->base;
738 	int idx;
739 
740 	/* Accesses to the RC go right to the RC registers if !devfn */
741 	if (pci_is_root_bus(bus))
742 		return devfn ? NULL : base + PCIE_ECAM_REG(where);
743 
744 	/* An access to our HW w/o link-up will cause a CPU Abort */
745 	if (!brcm_pcie_link_up(pcie))
746 		return NULL;
747 
748 	/* For devices, write to the config space index register */
749 	idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);
750 	writel(idx, base + IDX_ADDR(pcie));
751 	return base + DATA_ADDR(pcie);
752 }
753 
754 static int brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
755 {
756 	u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
757 	u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
758 	int ret = 0;
759 
760 	if (pcie->bridge_reset) {
761 		if (val)
762 			ret = reset_control_assert(pcie->bridge_reset);
763 		else
764 			ret = reset_control_deassert(pcie->bridge_reset);
765 
766 		if (ret)
767 			dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
768 				val ? "assert" : "deassert", ret);
769 
770 		return ret;
771 	}
772 
773 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
774 	tmp = (tmp & ~mask) | ((val << shift) & mask);
775 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
776 
777 	return ret;
778 }
779 
780 static int brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
781 {
782 	u32 tmp, mask =  RGR1_SW_INIT_1_INIT_7278_MASK;
783 	u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
784 
785 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
786 	tmp = (tmp & ~mask) | ((val << shift) & mask);
787 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
788 
789 	return 0;
790 }
791 
792 static int brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val)
793 {
794 	int ret;
795 
796 	if (WARN_ONCE(!pcie->perst_reset, "missing PERST# reset controller\n"))
797 		return -EINVAL;
798 
799 	if (val)
800 		ret = reset_control_assert(pcie->perst_reset);
801 	else
802 		ret = reset_control_deassert(pcie->perst_reset);
803 
804 	if (ret)
805 		dev_err(pcie->dev, "failed to %s 'perst' reset, err=%d\n",
806 			val ? "assert" : "deassert", ret);
807 	return ret;
808 }
809 
810 static int brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
811 {
812 	u32 tmp;
813 
814 	/* Perst bit has moved and assert value is 0 */
815 	tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
816 	u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
817 	writel(tmp, pcie->base +  PCIE_MISC_PCIE_CTRL);
818 
819 	return 0;
820 }
821 
822 static int brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
823 {
824 	u32 tmp;
825 
826 	tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
827 	u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
828 	writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
829 
830 	return 0;
831 }
832 
833 static int brcm_pcie_post_setup_bcm2712(struct brcm_pcie *pcie)
834 {
835 	static const u16 data[] = { 0x50b9, 0xbda1, 0x0094, 0x97b4, 0x5030,
836 				    0x5030, 0x0007 };
837 	static const u8 regs[] = { 0x16, 0x17, 0x18, 0x19, 0x1b, 0x1c, 0x1e };
838 	int ret, i;
839 	u32 tmp;
840 
841 	/* Allow a 54MHz (xosc) refclk source */
842 	ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, SET_ADDR_OFFSET, 0x1600);
843 	if (ret < 0)
844 		return ret;
845 
846 	for (i = 0; i < ARRAY_SIZE(regs); i++) {
847 		ret = brcm_pcie_mdio_write(pcie->base, MDIO_PORT0, regs[i], data[i]);
848 		if (ret < 0)
849 			return ret;
850 	}
851 
852 	usleep_range(100, 200);
853 
854 	/*
855 	 * Set L1SS sub-state timers to avoid lengthy state transitions,
856 	 * PM clock period is 18.52ns (1/54MHz, round down).
857 	 */
858 	tmp = readl(pcie->base + PCIE_RC_PL_PHY_CTL_15);
859 	tmp &= ~PCIE_RC_PL_PHY_CTL_15_PM_CLK_PERIOD_MASK;
860 	tmp |= 0x12;
861 	writel(tmp, pcie->base + PCIE_RC_PL_PHY_CTL_15);
862 
863 	return 0;
864 }
865 
866 static void add_inbound_win(struct inbound_win *b, u8 *count, u64 size,
867 			    u64 cpu_addr, u64 pci_offset)
868 {
869 	b->size = size;
870 	b->cpu_addr = cpu_addr;
871 	b->pci_offset = pci_offset;
872 	(*count)++;
873 }
874 
875 static int brcm_pcie_get_inbound_wins(struct brcm_pcie *pcie,
876 				      struct inbound_win inbound_wins[])
877 {
878 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
879 	u64 pci_offset, cpu_addr, size = 0, tot_size = 0;
880 	struct resource_entry *entry;
881 	struct device *dev = pcie->dev;
882 	u64 lowest_pcie_addr = ~(u64)0;
883 	int ret, i = 0;
884 	u8 n = 0;
885 
886 	/*
887 	 * The HW registers (and PCIe) use order-1 numbering for BARs.  As such,
888 	 * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
889 	 */
890 	struct inbound_win *b_begin = &inbound_wins[1];
891 	struct inbound_win *b = b_begin;
892 
893 	/*
894 	 * STB chips beside 7712 disable the first inbound window default.
895 	 * Rather being mapped to system memory it is mapped to the
896 	 * internal registers of the SoC.  This feature is deprecated, has
897 	 * security considerations, and is not implemented in our modern
898 	 * SoCs.
899 	 */
900 	if (pcie->cfg->soc_base != BCM7712)
901 		add_inbound_win(b++, &n, 0, 0, 0);
902 
903 	resource_list_for_each_entry(entry, &bridge->dma_ranges) {
904 		u64 pcie_start = entry->res->start - entry->offset;
905 		u64 cpu_start = entry->res->start;
906 
907 		size = resource_size(entry->res);
908 		tot_size += size;
909 		if (pcie_start < lowest_pcie_addr)
910 			lowest_pcie_addr = pcie_start;
911 		/*
912 		 * 7712 and newer chips may have many BARs, with each
913 		 * offering a non-overlapping viewport to system memory.
914 		 * That being said, each BARs size must still be a power of
915 		 * two.
916 		 */
917 		if (pcie->cfg->soc_base == BCM7712)
918 			add_inbound_win(b++, &n, size, cpu_start, pcie_start);
919 
920 		if (n > pcie->cfg->num_inbound_wins)
921 			break;
922 	}
923 
924 	if (lowest_pcie_addr == ~(u64)0) {
925 		dev_err(dev, "DT node has no dma-ranges\n");
926 		return -EINVAL;
927 	}
928 
929 	/*
930 	 * 7712 and newer chips do not have an internal memory mapping system
931 	 * that enables multiple memory controllers.  As such, it can return
932 	 * now w/o doing special configuration.
933 	 */
934 	if (pcie->cfg->soc_base == BCM7712)
935 		return n;
936 
937 	ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
938 						  PCIE_BRCM_MAX_MEMC);
939 	if (ret <= 0) {
940 		/* Make an educated guess */
941 		pcie->num_memc = 1;
942 		pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
943 	} else {
944 		pcie->num_memc = ret;
945 	}
946 
947 	/* Each memc is viewed through a "port" that is a power of 2 */
948 	for (i = 0, size = 0; i < pcie->num_memc; i++)
949 		size += pcie->memc_size[i];
950 
951 	/* Our HW mandates that the window size must be a power of 2 */
952 	size = 1ULL << fls64(size - 1);
953 
954 	/*
955 	 * For STB chips, the BAR2 cpu_addr is hardwired to the start
956 	 * of system memory, so we set it to 0.
957 	 */
958 	cpu_addr = 0;
959 	pci_offset = lowest_pcie_addr;
960 
961 	/*
962 	 * We validate the inbound memory view even though we should trust
963 	 * whatever the device-tree provides. This is because of an HW issue on
964 	 * early Raspberry Pi 4's revisions (bcm2711). It turns out its
965 	 * firmware has to dynamically edit dma-ranges due to a bug on the
966 	 * PCIe controller integration, which prohibits any access above the
967 	 * lower 3GB of memory. Given this, we decided to keep the dma-ranges
968 	 * in check, avoiding hard to debug device-tree related issues in the
969 	 * future:
970 	 *
971 	 * The PCIe host controller by design must set the inbound viewport to
972 	 * be a contiguous arrangement of all of the system's memory.  In
973 	 * addition, its size mut be a power of two.  To further complicate
974 	 * matters, the viewport must start on a pcie-address that is aligned
975 	 * on a multiple of its size.  If a portion of the viewport does not
976 	 * represent system memory -- e.g. 3GB of memory requires a 4GB
977 	 * viewport -- we can map the outbound memory in or after 3GB and even
978 	 * though the viewport will overlap the outbound memory the controller
979 	 * will know to send outbound memory downstream and everything else
980 	 * upstream.
981 	 *
982 	 * For example:
983 	 *
984 	 * - The best-case scenario, memory up to 3GB, is to place the inbound
985 	 *   region in the first 4GB of pcie-space, as some legacy devices can
986 	 *   only address 32bits. We would also like to put the MSI under 4GB
987 	 *   as well, since some devices require a 32bit MSI target address.
988 	 *
989 	 * - If the system memory is 4GB or larger we cannot start the inbound
990 	 *   region at location 0 (since we have to allow some space for
991 	 *   outbound memory @ 3GB). So instead it will  start at the 1x
992 	 *   multiple of its size
993 	 */
994 	if (!size || (pci_offset & (size - 1)) ||
995 	    (pci_offset < SZ_4G && pci_offset > SZ_2G)) {
996 		dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
997 			size, pci_offset);
998 		return -EINVAL;
999 	}
1000 
1001 	/* Enable inbound window 2, the main inbound window for STB chips */
1002 	add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
1003 
1004 	/*
1005 	 * Disable inbound window 3.  On some chips presents the same
1006 	 * window as #2 but the data appears in a settable endianness.
1007 	 */
1008 	add_inbound_win(b++, &n, 0, 0, 0);
1009 
1010 	return n;
1011 }
1012 
1013 static u32 brcm_bar_reg_offset(int bar)
1014 {
1015 	if (bar <= 3)
1016 		return PCIE_MISC_RC_BAR1_CONFIG_LO + 8 * (bar - 1);
1017 	else
1018 		return PCIE_MISC_RC_BAR4_CONFIG_LO + 8 * (bar - 4);
1019 }
1020 
1021 static u32 brcm_ubus_reg_offset(int bar)
1022 {
1023 	if (bar <= 3)
1024 		return PCIE_MISC_UBUS_BAR1_CONFIG_REMAP + 8 * (bar - 1);
1025 	else
1026 		return PCIE_MISC_UBUS_BAR4_CONFIG_REMAP + 8 * (bar - 4);
1027 }
1028 
1029 static void set_inbound_win_registers(struct brcm_pcie *pcie,
1030 				      const struct inbound_win *inbound_wins,
1031 				      u8 num_inbound_wins)
1032 {
1033 	void __iomem *base = pcie->base;
1034 	int i;
1035 
1036 	for (i = 1; i <= num_inbound_wins; i++) {
1037 		u64 pci_offset = inbound_wins[i].pci_offset;
1038 		u64 cpu_addr = inbound_wins[i].cpu_addr;
1039 		u64 size = inbound_wins[i].size;
1040 		u32 reg_offset = brcm_bar_reg_offset(i);
1041 		u32 tmp = lower_32_bits(pci_offset);
1042 
1043 		u32p_replace_bits(&tmp, brcm_pcie_encode_ibar_size(size),
1044 				  PCIE_MISC_RC_BAR1_CONFIG_LO_SIZE_MASK);
1045 
1046 		/* Write low */
1047 		writel_relaxed(tmp, base + reg_offset);
1048 		/* Write high */
1049 		writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
1050 
1051 		/*
1052 		 * Most STB chips:
1053 		 *     Do nothing.
1054 		 * 7712:
1055 		 *     All of their BARs need to be set.
1056 		 */
1057 		if (pcie->cfg->soc_base == BCM7712) {
1058 			/* BUS remap register settings */
1059 			reg_offset = brcm_ubus_reg_offset(i);
1060 			tmp = lower_32_bits(cpu_addr) & ~0xfff;
1061 			tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
1062 			writel_relaxed(tmp, base + reg_offset);
1063 			tmp = upper_32_bits(cpu_addr);
1064 			writel_relaxed(tmp, base + reg_offset + 4);
1065 		}
1066 	}
1067 }
1068 
1069 static int brcm_pcie_setup(struct brcm_pcie *pcie)
1070 {
1071 	struct inbound_win inbound_wins[PCIE_BRCM_MAX_INBOUND_WINS];
1072 	void __iomem *base = pcie->base;
1073 	struct pci_host_bridge *bridge;
1074 	struct resource_entry *entry;
1075 	u32 tmp, burst, aspm_support;
1076 	u8 num_out_wins = 0;
1077 	int num_inbound_wins = 0;
1078 	int memc, ret;
1079 
1080 	/* Reset the bridge */
1081 	ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
1082 	if (ret)
1083 		return ret;
1084 
1085 	/* Ensure that PERST# is asserted; some bootloaders may deassert it. */
1086 	if (pcie->cfg->soc_base == BCM2711) {
1087 		ret = pcie->cfg->perst_set(pcie, 1);
1088 		if (ret) {
1089 			pcie->cfg->bridge_sw_init_set(pcie, 0);
1090 			return ret;
1091 		}
1092 	}
1093 
1094 	usleep_range(100, 200);
1095 
1096 	/* Take the bridge out of reset */
1097 	ret = pcie->cfg->bridge_sw_init_set(pcie, 0);
1098 	if (ret)
1099 		return ret;
1100 
1101 	tmp = readl(base + HARD_DEBUG(pcie));
1102 	if (is_bmips(pcie))
1103 		tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
1104 	else
1105 		tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
1106 	writel(tmp, base + HARD_DEBUG(pcie));
1107 	/* Wait for SerDes to be stable */
1108 	usleep_range(100, 200);
1109 
1110 	/*
1111 	 * SCB_MAX_BURST_SIZE is a two bit field.  For GENERIC chips it
1112 	 * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
1113 	 * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
1114 	 */
1115 	if (is_bmips(pcie))
1116 		burst = 0x1; /* 256 bytes */
1117 	else if (pcie->cfg->soc_base == BCM2711)
1118 		burst = 0x0; /* 128 bytes */
1119 	else if (pcie->cfg->soc_base == BCM7278)
1120 		burst = 0x3; /* 512 bytes */
1121 	else
1122 		burst = 0x2; /* 512 bytes */
1123 
1124 	/*
1125 	 * Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN,
1126 	 * RCB_MPS_MODE, RCB_64B_MODE
1127 	 */
1128 	tmp = readl(base + PCIE_MISC_MISC_CTRL);
1129 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
1130 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
1131 	u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
1132 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_MPS_MODE_MASK);
1133 	u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_PCIE_RCB_64B_MODE_MASK);
1134 	writel(tmp, base + PCIE_MISC_MISC_CTRL);
1135 
1136 	num_inbound_wins = brcm_pcie_get_inbound_wins(pcie, inbound_wins);
1137 	if (num_inbound_wins < 0)
1138 		return num_inbound_wins;
1139 
1140 	set_inbound_win_registers(pcie, inbound_wins, num_inbound_wins);
1141 
1142 	if (!brcm_pcie_rc_mode(pcie)) {
1143 		dev_err(pcie->dev, "PCIe RC controller misconfigured as Endpoint\n");
1144 		return -EINVAL;
1145 	}
1146 
1147 	tmp = readl(base + PCIE_MISC_MISC_CTRL);
1148 	for (memc = 0; memc < pcie->num_memc; memc++) {
1149 		u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
1150 
1151 		if (memc == 0)
1152 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
1153 		else if (memc == 1)
1154 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
1155 		else if (memc == 2)
1156 			u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
1157 	}
1158 	writel(tmp, base + PCIE_MISC_MISC_CTRL);
1159 
1160 	/*
1161 	 * We ideally want the MSI target address to be located in the 32bit
1162 	 * addressable memory area. Some devices might depend on it. This is
1163 	 * possible either when the inbound window is located above the lower
1164 	 * 4GB or when the inbound area is smaller than 4GB (taking into
1165 	 * account the rounding-up we're forced to perform).
1166 	 */
1167 	if (inbound_wins[2].pci_offset >= SZ_4G ||
1168 	    (inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
1169 		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB;
1170 	else
1171 		pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
1172 
1173 
1174 	/* Don't advertise L0s capability if 'aspm-no-l0s' */
1175 	aspm_support = PCIE_LINK_STATE_L1;
1176 	if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
1177 		aspm_support |= PCIE_LINK_STATE_L0S;
1178 	tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
1179 	u32p_replace_bits(&tmp, aspm_support,
1180 		PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
1181 	writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
1182 
1183 	/*
1184 	 * For config space accesses on the RC, show the right class for
1185 	 * a PCIe-PCIe bridge (the default setting is to be EP mode).
1186 	 */
1187 	tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
1188 	u32p_replace_bits(&tmp, 0x060400,
1189 			  PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
1190 	writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
1191 
1192 	bridge = pci_host_bridge_from_priv(pcie);
1193 	resource_list_for_each_entry(entry, &bridge->windows) {
1194 		struct resource *res = entry->res;
1195 
1196 		if (resource_type(res) != IORESOURCE_MEM)
1197 			continue;
1198 
1199 		if (num_out_wins >= BRCM_NUM_PCIE_OUT_WINS) {
1200 			dev_err(pcie->dev, "too many outbound wins\n");
1201 			return -EINVAL;
1202 		}
1203 
1204 		if (is_bmips(pcie)) {
1205 			u64 start = res->start;
1206 			unsigned int j, nwins = resource_size(res) / SZ_128M;
1207 
1208 			/* bmips PCIe outbound windows have a 128MB max size */
1209 			if (nwins > BRCM_NUM_PCIE_OUT_WINS)
1210 				nwins = BRCM_NUM_PCIE_OUT_WINS;
1211 			for (j = 0; j < nwins; j++, start += SZ_128M)
1212 				brcm_pcie_set_outbound_win(pcie, j, start,
1213 							   start - entry->offset,
1214 							   SZ_128M);
1215 			break;
1216 		}
1217 		brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
1218 					   res->start - entry->offset,
1219 					   resource_size(res));
1220 		num_out_wins++;
1221 	}
1222 
1223 	/* PCIe->SCB endian mode for inbound window */
1224 	tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
1225 	u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,
1226 		PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
1227 	writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
1228 
1229 	if (pcie->cfg->post_setup) {
1230 		ret = pcie->cfg->post_setup(pcie);
1231 		if (ret < 0)
1232 			return ret;
1233 	}
1234 
1235 	return 0;
1236 }
1237 
1238 /*
1239  * This extends the timeout period for an access to an internal bus.  This
1240  * access timeout may occur during L1SS sleep periods, even without the
1241  * presence of a PCIe access.
1242  */
1243 static void brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
1244 {
1245 	/* TIMEOUT register is two registers before RGR1_SW_INIT_1 */
1246 	const unsigned int REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
1247 	u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
1248 
1249 	/* 7712 does not have this (RGR1) timer */
1250 	if (pcie->cfg->soc_base == BCM7712)
1251 		return;
1252 
1253 	/* Each unit in timeout register is 1/216,000,000 seconds */
1254 	writel(216 * timeout_us, pcie->base + REG_OFFSET);
1255 }
1256 
1257 static void brcm_config_clkreq(struct brcm_pcie *pcie)
1258 {
1259 	static const char err_msg[] = "invalid 'brcm,clkreq-mode' DT string\n";
1260 	const char *mode = "default";
1261 	u32 clkreq_cntl;
1262 	int ret, tmp;
1263 
1264 	ret = of_property_read_string(pcie->np, "brcm,clkreq-mode", &mode);
1265 	if (ret && ret != -EINVAL) {
1266 		dev_err(pcie->dev, err_msg);
1267 		mode = "safe";
1268 	}
1269 
1270 	/* Start out assuming safe mode (both mode bits cleared) */
1271 	clkreq_cntl = readl(pcie->base + HARD_DEBUG(pcie));
1272 	clkreq_cntl &= ~PCIE_CLKREQ_MASK;
1273 
1274 	if (strcmp(mode, "no-l1ss") == 0) {
1275 		/*
1276 		 * "no-l1ss" -- Provides Clock Power Management, L0s, and
1277 		 * L1, but cannot provide L1 substate (L1SS) power
1278 		 * savings. If the downstream device connected to the RC is
1279 		 * L1SS capable AND the OS enables L1SS, all PCIe traffic
1280 		 * may abruptly halt, potentially hanging the system.
1281 		 */
1282 		clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK;
1283 		/*
1284 		 * We want to un-advertise L1 substates because if the OS
1285 		 * tries to configure the controller into using L1 substate
1286 		 * power savings it may fail or hang when the RC HW is in
1287 		 * "no-l1ss" mode.
1288 		 */
1289 		tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
1290 		u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK);
1291 		writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
1292 
1293 	} else if (strcmp(mode, "default") == 0) {
1294 		/*
1295 		 * "default" -- Provides L0s, L1, and L1SS, but not
1296 		 * compliant to provide Clock Power Management;
1297 		 * specifically, may not be able to meet the Tclron max
1298 		 * timing of 400ns as specified in "Dynamic Clock Control",
1299 		 * section 3.2.5.2.2 of the PCIe spec.  This situation is
1300 		 * atypical and should happen only with older devices.
1301 		 */
1302 		clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK;
1303 		brcm_extend_rbus_timeout(pcie);
1304 
1305 	} else {
1306 		/*
1307 		 * "safe" -- No power savings; refclk is driven by RC
1308 		 * unconditionally.
1309 		 */
1310 		if (strcmp(mode, "safe") != 0)
1311 			dev_err(pcie->dev, err_msg);
1312 		mode = "safe";
1313 	}
1314 	writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
1315 
1316 	dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
1317 }
1318 
1319 static int brcm_pcie_start_link(struct brcm_pcie *pcie)
1320 {
1321 	struct device *dev = pcie->dev;
1322 	void __iomem *base = pcie->base;
1323 	u16 nlw, cls, lnksta;
1324 	bool ssc_good = false;
1325 	int ret, i;
1326 
1327 	/* Limit the generation if specified */
1328 	if (pcie->gen)
1329 		brcm_pcie_set_gen(pcie, pcie->gen);
1330 
1331 	/* Unassert the fundamental reset */
1332 	ret = pcie->cfg->perst_set(pcie, 0);
1333 	if (ret)
1334 		return ret;
1335 
1336 	/*
1337 	 * Wait for 100ms after PERST# deassertion; see PCIe CEM specification
1338 	 * sections 2.2, PCIe r5.0, 6.6.1.
1339 	 */
1340 	msleep(100);
1341 
1342 	/*
1343 	 * Give the RC/EP even more time to wake up, before trying to
1344 	 * configure RC.  Intermittently check status for link-up, up to a
1345 	 * total of 100ms.
1346 	 */
1347 	for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
1348 		msleep(5);
1349 
1350 	if (!brcm_pcie_link_up(pcie)) {
1351 		dev_err(dev, "link down\n");
1352 		return -ENODEV;
1353 	}
1354 
1355 	brcm_config_clkreq(pcie);
1356 
1357 	if (pcie->ssc) {
1358 		ret = brcm_pcie_set_ssc(pcie);
1359 		if (ret == 0)
1360 			ssc_good = true;
1361 		else
1362 			dev_err(dev, "failed attempt to enter ssc mode\n");
1363 	}
1364 
1365 	lnksta = readw(base + BRCM_PCIE_CAP_REGS + PCI_EXP_LNKSTA);
1366 	cls = FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta);
1367 	nlw = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
1368 	dev_info(dev, "link up, %s x%u %s\n",
1369 		 pci_speed_string(pcie_link_speed[cls]), nlw,
1370 		 ssc_good ? "(SSC)" : "(!SSC)");
1371 
1372 	return 0;
1373 }
1374 
1375 static const char * const supplies[] = {
1376 	"vpcie3v3",
1377 	"vpcie3v3aux",
1378 	"vpcie12v",
1379 };
1380 
1381 static void *alloc_subdev_regulators(struct device *dev)
1382 {
1383 	const size_t size = sizeof(struct subdev_regulators) +
1384 		sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
1385 	struct subdev_regulators *sr;
1386 	int i;
1387 
1388 	sr = devm_kzalloc(dev, size, GFP_KERNEL);
1389 	if (sr) {
1390 		sr->num_supplies = ARRAY_SIZE(supplies);
1391 		for (i = 0; i < ARRAY_SIZE(supplies); i++)
1392 			sr->supplies[i].supply = supplies[i];
1393 	}
1394 
1395 	return sr;
1396 }
1397 
1398 static int brcm_pcie_add_bus(struct pci_bus *bus)
1399 {
1400 	struct brcm_pcie *pcie = bus->sysdata;
1401 	struct device *dev = &bus->dev;
1402 	struct subdev_regulators *sr;
1403 	int ret;
1404 
1405 	if (!bus->parent || !pci_is_root_bus(bus->parent))
1406 		return 0;
1407 
1408 	if (dev->of_node) {
1409 		sr = alloc_subdev_regulators(dev);
1410 		if (!sr) {
1411 			dev_info(dev, "Can't allocate regulators for downstream device\n");
1412 			goto no_regulators;
1413 		}
1414 
1415 		pcie->sr = sr;
1416 
1417 		ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
1418 		if (ret) {
1419 			dev_info(dev, "Did not get regulators, err=%d\n", ret);
1420 			pcie->sr = NULL;
1421 			goto no_regulators;
1422 		}
1423 
1424 		ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
1425 		if (ret) {
1426 			dev_err(dev, "Can't enable regulators for downstream device\n");
1427 			regulator_bulk_free(sr->num_supplies, sr->supplies);
1428 			pcie->sr = NULL;
1429 		}
1430 	}
1431 
1432 no_regulators:
1433 	brcm_pcie_start_link(pcie);
1434 	return 0;
1435 }
1436 
1437 static void brcm_pcie_remove_bus(struct pci_bus *bus)
1438 {
1439 	struct brcm_pcie *pcie = bus->sysdata;
1440 	struct subdev_regulators *sr = pcie->sr;
1441 	struct device *dev = &bus->dev;
1442 
1443 	if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
1444 		return;
1445 
1446 	if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
1447 		dev_err(dev, "Failed to disable regulators for downstream device\n");
1448 	regulator_bulk_free(sr->num_supplies, sr->supplies);
1449 	pcie->sr = NULL;
1450 }
1451 
1452 /* L23 is a low-power PCIe link state */
1453 static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
1454 {
1455 	void __iomem *base = pcie->base;
1456 	int l23, i;
1457 	u32 tmp;
1458 
1459 	/* Assert request for L23 */
1460 	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
1461 	u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
1462 	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
1463 
1464 	/* Wait up to 36 msec for L23 */
1465 	tmp = readl(base + PCIE_MISC_PCIE_STATUS);
1466 	l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp);
1467 	for (i = 0; i < 15 && !l23; i++) {
1468 		usleep_range(2000, 2400);
1469 		tmp = readl(base + PCIE_MISC_PCIE_STATUS);
1470 		l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
1471 				tmp);
1472 	}
1473 
1474 	if (!l23)
1475 		dev_err(pcie->dev, "failed to enter low-power link state\n");
1476 }
1477 
1478 static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
1479 {
1480 	static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
1481 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
1482 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
1483 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
1484 	static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
1485 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
1486 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
1487 		PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
1488 	const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
1489 	const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
1490 	u32 tmp, combined_mask = 0;
1491 	u32 val;
1492 	void __iomem *base = pcie->base;
1493 	int i, ret;
1494 
1495 	for (i = beg; i != end; start ? i++ : i--) {
1496 		val = start ? BIT_MASK(shifts[i]) : 0;
1497 		tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1498 		tmp = (tmp & ~masks[i]) | (val & masks[i]);
1499 		writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1500 		usleep_range(50, 200);
1501 		combined_mask |= masks[i];
1502 	}
1503 
1504 	tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
1505 	val = start ? combined_mask : 0;
1506 
1507 	ret = (tmp & combined_mask) == val ? 0 : -EIO;
1508 	if (ret)
1509 		dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
1510 
1511 	return ret;
1512 }
1513 
1514 static inline int brcm_phy_start(struct brcm_pcie *pcie)
1515 {
1516 	return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 1) : 0;
1517 }
1518 
1519 static inline int brcm_phy_stop(struct brcm_pcie *pcie)
1520 {
1521 	return pcie->cfg->has_phy ? brcm_phy_cntl(pcie, 0) : 0;
1522 }
1523 
1524 static int brcm_pcie_turn_off(struct brcm_pcie *pcie)
1525 {
1526 	void __iomem *base = pcie->base;
1527 	int tmp, ret;
1528 
1529 	if (brcm_pcie_link_up(pcie))
1530 		brcm_pcie_enter_l23(pcie);
1531 	/* Assert fundamental reset */
1532 	ret = pcie->cfg->perst_set(pcie, 1);
1533 	if (ret)
1534 		return ret;
1535 
1536 	/* Deassert request for L23 in case it was asserted */
1537 	tmp = readl(base + PCIE_MISC_PCIE_CTRL);
1538 	u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
1539 	writel(tmp, base + PCIE_MISC_PCIE_CTRL);
1540 
1541 	/* Turn off SerDes */
1542 	tmp = readl(base + HARD_DEBUG(pcie));
1543 	u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
1544 	writel(tmp, base + HARD_DEBUG(pcie));
1545 
1546 	if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN))
1547 		/* Shutdown PCIe bridge */
1548 		ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
1549 
1550 	return ret;
1551 }
1552 
1553 static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
1554 {
1555 	bool *ret = data;
1556 
1557 	if (device_may_wakeup(&dev->dev)) {
1558 		*ret = true;
1559 		dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n");
1560 	}
1561 	return (int) *ret;
1562 }
1563 
1564 static int brcm_pcie_suspend_noirq(struct device *dev)
1565 {
1566 	struct brcm_pcie *pcie = dev_get_drvdata(dev);
1567 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1568 	int ret, rret;
1569 
1570 	ret = brcm_pcie_turn_off(pcie);
1571 	if (ret)
1572 		return ret;
1573 
1574 	/*
1575 	 * If brcm_phy_stop() returns an error, just dev_err(). If we
1576 	 * return the error it will cause the suspend to fail and this is a
1577 	 * forgivable offense that will probably be erased on resume.
1578 	 */
1579 	if (brcm_phy_stop(pcie))
1580 		dev_err(dev, "Could not stop phy for suspend\n");
1581 
1582 	ret = reset_control_rearm(pcie->rescal);
1583 	if (ret) {
1584 		dev_err(dev, "Could not rearm rescal reset\n");
1585 		return ret;
1586 	}
1587 
1588 	if (pcie->sr) {
1589 		/*
1590 		 * Now turn off the regulators, but if at least one
1591 		 * downstream device is enabled as a wake-up source, do not
1592 		 * turn off regulators.
1593 		 */
1594 		pcie->ep_wakeup_capable = false;
1595 		pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
1596 			     &pcie->ep_wakeup_capable);
1597 		if (!pcie->ep_wakeup_capable) {
1598 			ret = regulator_bulk_disable(pcie->sr->num_supplies,
1599 						     pcie->sr->supplies);
1600 			if (ret) {
1601 				dev_err(dev, "Could not turn off regulators\n");
1602 				rret = reset_control_reset(pcie->rescal);
1603 				if (rret)
1604 					dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
1605 						rret);
1606 				return ret;
1607 			}
1608 		}
1609 	}
1610 	clk_disable_unprepare(pcie->clk);
1611 
1612 	return 0;
1613 }
1614 
1615 static int brcm_pcie_resume_noirq(struct device *dev)
1616 {
1617 	struct brcm_pcie *pcie = dev_get_drvdata(dev);
1618 	void __iomem *base;
1619 	u32 tmp;
1620 	int ret, rret;
1621 
1622 	base = pcie->base;
1623 	ret = clk_prepare_enable(pcie->clk);
1624 	if (ret)
1625 		return ret;
1626 
1627 	ret = reset_control_reset(pcie->rescal);
1628 	if (ret)
1629 		goto err_disable_clk;
1630 
1631 	ret = brcm_phy_start(pcie);
1632 	if (ret)
1633 		goto err_reset;
1634 
1635 	/* Take bridge out of reset so we can access the SERDES reg */
1636 	pcie->cfg->bridge_sw_init_set(pcie, 0);
1637 
1638 	/* SERDES_IDDQ = 0 */
1639 	tmp = readl(base + HARD_DEBUG(pcie));
1640 	u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
1641 	writel(tmp, base + HARD_DEBUG(pcie));
1642 
1643 	/* wait for serdes to be stable */
1644 	udelay(100);
1645 
1646 	ret = brcm_pcie_setup(pcie);
1647 	if (ret)
1648 		goto err_reset;
1649 
1650 	if (pcie->sr) {
1651 		if (pcie->ep_wakeup_capable) {
1652 			/*
1653 			 * We are resuming from a suspend.  In the suspend we
1654 			 * did not disable the power supplies, so there is
1655 			 * no need to enable them (and falsely increase their
1656 			 * usage count).
1657 			 */
1658 			pcie->ep_wakeup_capable = false;
1659 		} else {
1660 			ret = regulator_bulk_enable(pcie->sr->num_supplies,
1661 						    pcie->sr->supplies);
1662 			if (ret) {
1663 				dev_err(dev, "Could not turn on regulators\n");
1664 				goto err_reset;
1665 			}
1666 		}
1667 	}
1668 
1669 	ret = brcm_pcie_start_link(pcie);
1670 	if (ret)
1671 		goto err_regulator;
1672 
1673 	if (pcie->msi)
1674 		brcm_msi_set_regs(pcie->msi);
1675 
1676 	return 0;
1677 
1678 err_regulator:
1679 	if (pcie->sr)
1680 		regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
1681 err_reset:
1682 	rret = reset_control_rearm(pcie->rescal);
1683 	if (rret)
1684 		dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
1685 err_disable_clk:
1686 	clk_disable_unprepare(pcie->clk);
1687 	return ret;
1688 }
1689 
1690 static void __brcm_pcie_remove(struct brcm_pcie *pcie)
1691 {
1692 	brcm_msi_remove(pcie);
1693 	brcm_pcie_turn_off(pcie);
1694 	if (brcm_phy_stop(pcie))
1695 		dev_err(pcie->dev, "Could not stop phy\n");
1696 	if (reset_control_rearm(pcie->rescal))
1697 		dev_err(pcie->dev, "Could not rearm rescal reset\n");
1698 	clk_disable_unprepare(pcie->clk);
1699 }
1700 
1701 static void brcm_pcie_remove(struct platform_device *pdev)
1702 {
1703 	struct brcm_pcie *pcie = platform_get_drvdata(pdev);
1704 	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
1705 
1706 	pci_stop_root_bus(bridge->bus);
1707 	pci_remove_root_bus(bridge->bus);
1708 	__brcm_pcie_remove(pcie);
1709 }
1710 
1711 static const int pcie_offsets[] = {
1712 	[RGR1_SW_INIT_1]	= 0x9210,
1713 	[EXT_CFG_INDEX]		= 0x9000,
1714 	[EXT_CFG_DATA]		= 0x8000,
1715 	[PCIE_HARD_DEBUG]	= 0x4204,
1716 	[PCIE_INTR2_CPU_BASE]	= 0x4300,
1717 };
1718 
1719 static const int pcie_offsets_bcm7278[] = {
1720 	[RGR1_SW_INIT_1]	= 0xc010,
1721 	[EXT_CFG_INDEX]		= 0x9000,
1722 	[EXT_CFG_DATA]		= 0x8000,
1723 	[PCIE_HARD_DEBUG]	= 0x4204,
1724 	[PCIE_INTR2_CPU_BASE]	= 0x4300,
1725 };
1726 
1727 static const int pcie_offsets_bcm7425[] = {
1728 	[RGR1_SW_INIT_1]	= 0x8010,
1729 	[EXT_CFG_INDEX]		= 0x8300,
1730 	[EXT_CFG_DATA]		= 0x8304,
1731 	[PCIE_HARD_DEBUG]	= 0x4204,
1732 	[PCIE_INTR2_CPU_BASE]	= 0x4300,
1733 };
1734 
1735 static const int pcie_offsets_bcm7712[] = {
1736 	[RGR1_SW_INIT_1]	= 0x9210,
1737 	[EXT_CFG_INDEX]		= 0x9000,
1738 	[EXT_CFG_DATA]		= 0x8000,
1739 	[PCIE_HARD_DEBUG]	= 0x4304,
1740 	[PCIE_INTR2_CPU_BASE]	= 0x4400,
1741 };
1742 
1743 static const struct pcie_cfg_data generic_cfg = {
1744 	.offsets	= pcie_offsets,
1745 	.soc_base	= GENERIC,
1746 	.perst_set	= brcm_pcie_perst_set_generic,
1747 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
1748 	.num_inbound_wins = 3,
1749 };
1750 
1751 static const struct pcie_cfg_data bcm2711_cfg = {
1752 	.offsets	= pcie_offsets,
1753 	.soc_base	= BCM2711,
1754 	.perst_set	= brcm_pcie_perst_set_generic,
1755 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
1756 	.num_inbound_wins = 3,
1757 };
1758 
1759 static const struct pcie_cfg_data bcm2712_cfg = {
1760 	.offsets	= pcie_offsets_bcm7712,
1761 	.soc_base	= BCM7712,
1762 	.perst_set	= brcm_pcie_perst_set_7278,
1763 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
1764 	.post_setup	= brcm_pcie_post_setup_bcm2712,
1765 	.quirks		= CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN,
1766 	.num_inbound_wins = 10,
1767 };
1768 
1769 static const struct pcie_cfg_data bcm4908_cfg = {
1770 	.offsets	= pcie_offsets,
1771 	.soc_base	= BCM4908,
1772 	.perst_set	= brcm_pcie_perst_set_4908,
1773 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
1774 	.num_inbound_wins = 3,
1775 };
1776 
1777 static const struct pcie_cfg_data bcm7278_cfg = {
1778 	.offsets	= pcie_offsets_bcm7278,
1779 	.soc_base	= BCM7278,
1780 	.perst_set	= brcm_pcie_perst_set_7278,
1781 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
1782 	.num_inbound_wins = 3,
1783 };
1784 
1785 static const struct pcie_cfg_data bcm7425_cfg = {
1786 	.offsets	= pcie_offsets_bcm7425,
1787 	.soc_base	= BCM7425,
1788 	.perst_set	= brcm_pcie_perst_set_generic,
1789 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
1790 	.num_inbound_wins = 3,
1791 };
1792 
1793 static const struct pcie_cfg_data bcm7435_cfg = {
1794 	.offsets	= pcie_offsets,
1795 	.soc_base	= BCM7435,
1796 	.perst_set	= brcm_pcie_perst_set_generic,
1797 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
1798 	.num_inbound_wins = 3,
1799 };
1800 
1801 static const struct pcie_cfg_data bcm7216_cfg = {
1802 	.offsets	= pcie_offsets_bcm7278,
1803 	.soc_base	= BCM7278,
1804 	.perst_set	= brcm_pcie_perst_set_7278,
1805 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
1806 	.has_phy	= true,
1807 	.num_inbound_wins = 3,
1808 };
1809 
1810 static const struct pcie_cfg_data bcm7712_cfg = {
1811 	.offsets	= pcie_offsets_bcm7712,
1812 	.perst_set	= brcm_pcie_perst_set_7278,
1813 	.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
1814 	.soc_base	= BCM7712,
1815 	.num_inbound_wins = 10,
1816 };
1817 
1818 static const struct of_device_id brcm_pcie_match[] = {
1819 	{ .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
1820 	{ .compatible = "brcm,bcm2712-pcie", .data = &bcm2712_cfg },
1821 	{ .compatible = "brcm,bcm4908-pcie", .data = &bcm4908_cfg },
1822 	{ .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
1823 	{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7216_cfg },
1824 	{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
1825 	{ .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
1826 	{ .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
1827 	{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
1828 	{ .compatible = "brcm,bcm7712-pcie", .data = &bcm7712_cfg },
1829 	{},
1830 };
1831 
1832 static struct pci_ops brcm_pcie_ops = {
1833 	.map_bus = brcm_pcie_map_bus,
1834 	.read = pci_generic_config_read,
1835 	.write = pci_generic_config_write,
1836 	.add_bus = brcm_pcie_add_bus,
1837 	.remove_bus = brcm_pcie_remove_bus,
1838 };
1839 
1840 static struct pci_ops brcm7425_pcie_ops = {
1841 	.map_bus = brcm7425_pcie_map_bus,
1842 	.read = pci_generic_config_read32,
1843 	.write = pci_generic_config_write32,
1844 	.add_bus = brcm_pcie_add_bus,
1845 	.remove_bus = brcm_pcie_remove_bus,
1846 };
1847 
1848 static int brcm_pcie_probe(struct platform_device *pdev)
1849 {
1850 	struct device_node *np = pdev->dev.of_node;
1851 	struct pci_host_bridge *bridge;
1852 	const struct pcie_cfg_data *data;
1853 	struct brcm_pcie *pcie;
1854 	int ret;
1855 
1856 	bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie));
1857 	if (!bridge)
1858 		return -ENOMEM;
1859 
1860 	data = of_device_get_match_data(&pdev->dev);
1861 	if (!data) {
1862 		pr_err("failed to look up compatible string\n");
1863 		return -EINVAL;
1864 	}
1865 
1866 	pcie = pci_host_bridge_priv(bridge);
1867 	pcie->dev = &pdev->dev;
1868 	pcie->np = np;
1869 	pcie->cfg = data;
1870 
1871 	pcie->base = devm_platform_ioremap_resource(pdev, 0);
1872 	if (IS_ERR(pcie->base))
1873 		return PTR_ERR(pcie->base);
1874 
1875 	pcie->clk = devm_clk_get_optional(&pdev->dev, "sw_pcie");
1876 	if (IS_ERR(pcie->clk))
1877 		return PTR_ERR(pcie->clk);
1878 
1879 	ret = of_pci_get_max_link_speed(np);
1880 	pcie->gen = (ret < 0) ? 0 : ret;
1881 
1882 	pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc");
1883 
1884 	pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
1885 	if (IS_ERR(pcie->rescal))
1886 		return PTR_ERR(pcie->rescal);
1887 
1888 	pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst");
1889 	if (IS_ERR(pcie->perst_reset))
1890 		return PTR_ERR(pcie->perst_reset);
1891 
1892 	pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge");
1893 	if (IS_ERR(pcie->bridge_reset))
1894 		return PTR_ERR(pcie->bridge_reset);
1895 
1896 	pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit");
1897 	if (IS_ERR(pcie->swinit_reset))
1898 		return PTR_ERR(pcie->swinit_reset);
1899 
1900 	ret = clk_prepare_enable(pcie->clk);
1901 	if (ret)
1902 		return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
1903 
1904 	pcie->cfg->bridge_sw_init_set(pcie, 0);
1905 
1906 	if (pcie->swinit_reset) {
1907 		ret = reset_control_assert(pcie->swinit_reset);
1908 		if (ret) {
1909 			clk_disable_unprepare(pcie->clk);
1910 			return dev_err_probe(&pdev->dev, ret,
1911 					     "could not assert reset 'swinit'\n");
1912 		}
1913 
1914 		/* HW team recommends 1us for proper sync and propagation of reset */
1915 		udelay(1);
1916 
1917 		ret = reset_control_deassert(pcie->swinit_reset);
1918 		if (ret) {
1919 			clk_disable_unprepare(pcie->clk);
1920 			return dev_err_probe(&pdev->dev, ret,
1921 					     "could not de-assert reset 'swinit'\n");
1922 		}
1923 	}
1924 
1925 	ret = reset_control_reset(pcie->rescal);
1926 	if (ret) {
1927 		clk_disable_unprepare(pcie->clk);
1928 		return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
1929 	}
1930 
1931 	ret = brcm_phy_start(pcie);
1932 	if (ret) {
1933 		reset_control_rearm(pcie->rescal);
1934 		clk_disable_unprepare(pcie->clk);
1935 		return ret;
1936 	}
1937 
1938 	ret = brcm_pcie_setup(pcie);
1939 	if (ret)
1940 		goto fail;
1941 
1942 	pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
1943 	if (pcie->cfg->soc_base == BCM4908 &&
1944 	    pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
1945 		dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
1946 		ret = -ENODEV;
1947 		goto fail;
1948 	}
1949 
1950 	if (pci_msi_enabled()) {
1951 		struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
1952 
1953 		if (msi_np == pcie->np)
1954 			ret = brcm_pcie_enable_msi(pcie);
1955 
1956 		of_node_put(msi_np);
1957 
1958 		if (ret) {
1959 			dev_err(pcie->dev, "probe of internal MSI failed");
1960 			goto fail;
1961 		}
1962 	}
1963 
1964 	bridge->ops = pcie->cfg->soc_base == BCM7425 ?
1965 				&brcm7425_pcie_ops : &brcm_pcie_ops;
1966 	bridge->sysdata = pcie;
1967 
1968 	platform_set_drvdata(pdev, pcie);
1969 
1970 	ret = pci_host_probe(bridge);
1971 	if (!ret && !brcm_pcie_link_up(pcie))
1972 		ret = -ENODEV;
1973 
1974 	if (ret) {
1975 		brcm_pcie_remove(pdev);
1976 		return ret;
1977 	}
1978 
1979 	return 0;
1980 
1981 fail:
1982 	__brcm_pcie_remove(pcie);
1983 
1984 	return ret;
1985 }
1986 
1987 MODULE_DEVICE_TABLE(of, brcm_pcie_match);
1988 
1989 static const struct dev_pm_ops brcm_pcie_pm_ops = {
1990 	.suspend_noirq = brcm_pcie_suspend_noirq,
1991 	.resume_noirq = brcm_pcie_resume_noirq,
1992 };
1993 
1994 static struct platform_driver brcm_pcie_driver = {
1995 	.probe = brcm_pcie_probe,
1996 	.remove = brcm_pcie_remove,
1997 	.driver = {
1998 		.name = "brcm-pcie",
1999 		.of_match_table = brcm_pcie_match,
2000 		.pm = &brcm_pcie_pm_ops,
2001 	},
2002 };
2003 module_platform_driver(brcm_pcie_driver);
2004 
2005 MODULE_LICENSE("GPL");
2006 MODULE_DESCRIPTION("Broadcom STB PCIe RC driver");
2007 MODULE_AUTHOR("Broadcom");
2008 MODULE_SOFTDEP("pre: irq_bcm2712_mip");
2009