xref: /linux/arch/arm/mach-at91/pm.c (revision 098b6e44cbaa2d526d06af90c862d13fb414a0ec)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * arch/arm/mach-at91/pm.c
4  * AT91 Power Management
5  *
6  * Copyright (C) 2005 David Brownell
7  */
8 
9 #include <linux/genalloc.h>
10 #include <linux/io.h>
11 #include <linux/of_address.h>
12 #include <linux/of.h>
13 #include <linux/of_fdt.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/parser.h>
17 #include <linux/suspend.h>
18 
19 #include <linux/clk.h>
20 #include <linux/clk/at91_pmc.h>
21 #include <linux/platform_data/atmel.h>
22 
23 #include <asm/cacheflush.h>
24 #include <asm/fncpy.h>
25 #include <asm/system_misc.h>
26 #include <asm/suspend.h>
27 
28 #include "generic.h"
29 #include "pm.h"
30 #include "sam_secure.h"
31 
32 #define BACKUP_DDR_PHY_CALIBRATION	(9)
33 
34 /**
35  * struct at91_pm_bu - AT91 power management backup unit data structure
36  * @suspended: true if suspended to backup mode
37  * @reserved: reserved
38  * @canary: canary data for memory checking after exit from backup mode
39  * @resume: resume API
40  * @ddr_phy_calibration: DDR PHY calibration data: ZQ0CR0, first 8 words
41  * of the memory
42  */
43 struct at91_pm_bu {
44 	int suspended;
45 	unsigned long reserved;
46 	phys_addr_t canary;
47 	phys_addr_t resume;
48 	unsigned long ddr_phy_calibration[BACKUP_DDR_PHY_CALIBRATION];
49 };
50 
51 /**
52  * struct at91_pm_sfrbu_regs - registers mapping for SFRBU
53  * @pswbu: power switch BU control registers
54  */
55 struct at91_pm_sfrbu_regs {
56 	struct {
57 		u32 key;
58 		u32 ctrl;
59 		u32 state;
60 		u32 softsw;
61 	} pswbu;
62 };
63 
64 /**
65  * enum at91_pm_eth_clk - Ethernet clock indexes
66  * @AT91_PM_ETH_PCLK: pclk index
67  * @AT91_PM_ETH_HCLK: hclk index
68  * @AT91_PM_ETH_MAX_CLK: max index
69  */
70 enum at91_pm_eth_clk {
71 	AT91_PM_ETH_PCLK,
72 	AT91_PM_ETH_HCLK,
73 	AT91_PM_ETH_MAX_CLK,
74 };
75 
76 /**
77  * enum at91_pm_eth - Ethernet controller indexes
78  * @AT91_PM_G_ETH: gigabit Ethernet controller index
79  * @AT91_PM_E_ETH: megabit Ethernet controller index
80  * @AT91_PM_MAX_ETH: max index
81  */
82 enum at91_pm_eth {
83 	AT91_PM_G_ETH,
84 	AT91_PM_E_ETH,
85 	AT91_PM_MAX_ETH,
86 };
87 
88 /**
89  * struct at91_pm_quirk_eth - AT91 PM Ethernet quirks
90  * @dev: Ethernet device
91  * @np: Ethernet device node
92  * @clks: Ethernet clocks
93  * @modes: power management mode that this quirk applies to
94  * @dns_modes: do not suspend modes: stop suspending if Ethernet is configured
95  *	       as wakeup source but buggy and no other wakeup source is
96  *	       available
97  */
98 struct at91_pm_quirk_eth {
99 	struct device *dev;
100 	struct device_node *np;
101 	struct clk_bulk_data clks[AT91_PM_ETH_MAX_CLK];
102 	u32 modes;
103 	u32 dns_modes;
104 };
105 
106 /**
107  * struct at91_pm_quirks - AT91 PM quirks
108  * @eth: Ethernet quirks
109  */
110 struct at91_pm_quirks {
111 	struct at91_pm_quirk_eth eth[AT91_PM_MAX_ETH];
112 };
113 
114 /**
115  * struct at91_soc_pm - AT91 SoC power management data structure
116  * @config_shdwc_ws: wakeup sources configuration function for SHDWC
117  * @config_pmc_ws: wakeup srouces configuration function for PMC
118  * @ws_ids: wakup sources of_device_id array
119  * @bu: backup unit mapped data (for backup mode)
120  * @quirks: PM quirks
121  * @data: PM data to be used on last phase of suspend
122  * @sfrbu_regs: SFRBU registers mapping
123  * @memcs: memory chip select
124  */
125 struct at91_soc_pm {
126 	int (*config_shdwc_ws)(void __iomem *shdwc, u32 *mode, u32 *polarity);
127 	int (*config_pmc_ws)(void __iomem *pmc, u32 mode, u32 polarity);
128 	const struct of_device_id *ws_ids;
129 	struct at91_pm_bu *bu;
130 	struct at91_pm_quirks quirks;
131 	struct at91_pm_data data;
132 	struct at91_pm_sfrbu_regs sfrbu_regs;
133 	void *memcs;
134 };
135 
136 /**
137  * enum at91_pm_iomaps - IOs that needs to be mapped for different PM modes
138  * @AT91_PM_IOMAP_SHDWC:	SHDWC controller
139  * @AT91_PM_IOMAP_SFRBU:	SFRBU controller
140  * @AT91_PM_IOMAP_ETHC:		Ethernet controller
141  */
142 enum at91_pm_iomaps {
143 	AT91_PM_IOMAP_SHDWC,
144 	AT91_PM_IOMAP_SFRBU,
145 	AT91_PM_IOMAP_ETHC,
146 };
147 
148 #define AT91_PM_IOMAP(name)	BIT(AT91_PM_IOMAP_##name)
149 
150 static struct at91_soc_pm soc_pm = {
151 	.data = {
152 		.standby_mode = AT91_PM_STANDBY,
153 		.suspend_mode = AT91_PM_ULP0,
154 	},
155 };
156 
157 static const match_table_t pm_modes __initconst = {
158 	{ AT91_PM_STANDBY,	"standby" },
159 	{ AT91_PM_ULP0,		"ulp0" },
160 	{ AT91_PM_ULP0_FAST,    "ulp0-fast" },
161 	{ AT91_PM_ULP1,		"ulp1" },
162 	{ AT91_PM_BACKUP,	"backup" },
163 	{ -1, NULL },
164 };
165 
166 #define at91_ramc_read(id, field) \
167 	__raw_readl(soc_pm.data.ramc[id] + field)
168 
169 #define at91_ramc_write(id, field, value) \
170 	__raw_writel(value, soc_pm.data.ramc[id] + field)
171 
at91_pm_valid_state(suspend_state_t state)172 static int at91_pm_valid_state(suspend_state_t state)
173 {
174 	switch (state) {
175 		case PM_SUSPEND_ON:
176 		case PM_SUSPEND_STANDBY:
177 		case PM_SUSPEND_MEM:
178 			return 1;
179 
180 		default:
181 			return 0;
182 	}
183 }
184 
185 static int canary = 0xA5A5A5A5;
186 
187 struct wakeup_source_info {
188 	unsigned int pmc_fsmr_bit;
189 	unsigned int shdwc_mr_bit;
190 	bool set_polarity;
191 };
192 
193 static const struct wakeup_source_info ws_info[] = {
194 	{ .pmc_fsmr_bit = AT91_PMC_FSTT(10),	.set_polarity = true },
195 	{ .pmc_fsmr_bit = AT91_PMC_RTCAL,	.shdwc_mr_bit = BIT(17) },
196 	{ .pmc_fsmr_bit = AT91_PMC_USBAL },
197 	{ .pmc_fsmr_bit = AT91_PMC_SDMMC_CD },
198 	{ .pmc_fsmr_bit = AT91_PMC_RTTAL },
199 	{ .pmc_fsmr_bit = AT91_PMC_RXLP_MCE },
200 };
201 
202 static const struct of_device_id sama5d2_ws_ids[] = {
203 	{ .compatible = "atmel,sama5d2-gem",		.data = &ws_info[0] },
204 	{ .compatible = "atmel,sama5d2-rtc",		.data = &ws_info[1] },
205 	{ .compatible = "atmel,sama5d3-udc",		.data = &ws_info[2] },
206 	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
207 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
208 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
209 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
210 	{ .compatible = "atmel,sama5d2-sdhci",		.data = &ws_info[3] },
211 	{ /* sentinel */ }
212 };
213 
214 static const struct of_device_id sam9x60_ws_ids[] = {
215 	{ .compatible = "microchip,sam9x60-rtc",	.data = &ws_info[1] },
216 	{ .compatible = "atmel,at91rm9200-ohci",	.data = &ws_info[2] },
217 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
218 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
219 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
220 	{ .compatible = "microchip,sam9x60-rtt",	.data = &ws_info[4] },
221 	{ .compatible = "cdns,sam9x60-macb",		.data = &ws_info[5] },
222 	{ /* sentinel */ }
223 };
224 
225 static const struct of_device_id sama7_ws_ids[] = {
226 	{ .compatible = "microchip,sama7d65-rtc",	.data = &ws_info[1] },
227 	{ .compatible = "microchip,sama7g5-rtc",	.data = &ws_info[1] },
228 	{ .compatible = "microchip,sama7g5-ohci",	.data = &ws_info[2] },
229 	{ .compatible = "usb-ohci",			.data = &ws_info[2] },
230 	{ .compatible = "atmel,at91sam9g45-ehci",	.data = &ws_info[2] },
231 	{ .compatible = "usb-ehci",			.data = &ws_info[2] },
232 	{ .compatible = "microchip,sama7d65-sdhci",	.data = &ws_info[3] },
233 	{ .compatible = "microchip,sama7g5-sdhci",	.data = &ws_info[3] },
234 	{ .compatible = "microchip,sama7d65-rtt",	.data = &ws_info[4] },
235 	{ .compatible = "microchip,sama7g5-rtt",	.data = &ws_info[4] },
236 	{ /* sentinel */ }
237 };
238 
239 static const struct of_device_id sam9x7_ws_ids[] = {
240 	{ .compatible = "microchip,sam9x7-rtc",		.data = &ws_info[1] },
241 	{ .compatible = "microchip,sam9x7-rtt",		.data = &ws_info[4] },
242 	{ .compatible = "microchip,sam9x7-gem",		.data = &ws_info[5] },
243 	{ /* sentinel */ }
244 };
245 
at91_pm_config_ws(unsigned int pm_mode,bool set)246 static int at91_pm_config_ws(unsigned int pm_mode, bool set)
247 {
248 	const struct wakeup_source_info *wsi;
249 	const struct of_device_id *match;
250 	struct platform_device *pdev;
251 	struct device_node *np;
252 	unsigned int mode = 0, polarity = 0, val = 0;
253 
254 	if (pm_mode != AT91_PM_ULP1)
255 		return 0;
256 
257 	if (!soc_pm.data.pmc || !soc_pm.data.shdwc || !soc_pm.ws_ids)
258 		return -EPERM;
259 
260 	if (!set) {
261 		writel(mode, soc_pm.data.pmc + AT91_PMC_FSMR);
262 		return 0;
263 	}
264 
265 	if (soc_pm.config_shdwc_ws)
266 		soc_pm.config_shdwc_ws(soc_pm.data.shdwc, &mode, &polarity);
267 
268 	/* SHDWC.MR */
269 	val = readl(soc_pm.data.shdwc + 0x04);
270 
271 	/* Loop through defined wakeup sources. */
272 	for_each_matching_node_and_match(np, soc_pm.ws_ids, &match) {
273 		pdev = of_find_device_by_node(np);
274 		if (!pdev)
275 			continue;
276 
277 		if (device_may_wakeup(&pdev->dev)) {
278 			wsi = match->data;
279 
280 			/* Check if enabled on SHDWC. */
281 			if (wsi->shdwc_mr_bit && !(val & wsi->shdwc_mr_bit))
282 				goto put_device;
283 
284 			mode |= wsi->pmc_fsmr_bit;
285 			if (wsi->set_polarity)
286 				polarity |= wsi->pmc_fsmr_bit;
287 		}
288 
289 put_device:
290 		put_device(&pdev->dev);
291 	}
292 
293 	if (mode) {
294 		if (soc_pm.config_pmc_ws)
295 			soc_pm.config_pmc_ws(soc_pm.data.pmc, mode, polarity);
296 	} else {
297 		pr_err("AT91: PM: no ULP1 wakeup sources found!");
298 	}
299 
300 	return mode ? 0 : -EPERM;
301 }
302 
at91_sama5d2_config_shdwc_ws(void __iomem * shdwc,u32 * mode,u32 * polarity)303 static int at91_sama5d2_config_shdwc_ws(void __iomem *shdwc, u32 *mode,
304 					u32 *polarity)
305 {
306 	u32 val;
307 
308 	/* SHDWC.WUIR */
309 	val = readl(shdwc + 0x0c);
310 	*mode |= (val & 0x3ff);
311 	*polarity |= ((val >> 16) & 0x3ff);
312 
313 	return 0;
314 }
315 
at91_sama5d2_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)316 static int at91_sama5d2_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
317 {
318 	writel(mode, pmc + AT91_PMC_FSMR);
319 	writel(polarity, pmc + AT91_PMC_FSPR);
320 
321 	return 0;
322 }
323 
at91_sam9x60_config_pmc_ws(void __iomem * pmc,u32 mode,u32 polarity)324 static int at91_sam9x60_config_pmc_ws(void __iomem *pmc, u32 mode, u32 polarity)
325 {
326 	writel(mode, pmc + AT91_PMC_FSMR);
327 
328 	return 0;
329 }
330 
at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth * eth)331 static bool at91_pm_eth_quirk_is_valid(struct at91_pm_quirk_eth *eth)
332 {
333 	struct platform_device *pdev;
334 
335 	/* Interface NA in DT. */
336 	if (!eth->np)
337 		return false;
338 
339 	/* No quirks for this interface and current suspend mode. */
340 	if (!(eth->modes & BIT(soc_pm.data.mode)))
341 		return false;
342 
343 	if (!eth->dev) {
344 		/* Driver not probed. */
345 		pdev = of_find_device_by_node(eth->np);
346 		if (!pdev)
347 			return false;
348 		/* put_device(eth->dev) is called at the end of suspend. */
349 		eth->dev = &pdev->dev;
350 	}
351 
352 	/* No quirks if device isn't a wakeup source. */
353 	if (!device_may_wakeup(eth->dev))
354 		return false;
355 
356 	return true;
357 }
358 
at91_pm_config_quirks(bool suspend)359 static int at91_pm_config_quirks(bool suspend)
360 {
361 	struct at91_pm_quirk_eth *eth;
362 	int i, j, ret, tmp;
363 
364 	/*
365 	 * Ethernet IPs who's device_node pointers are stored into
366 	 * soc_pm.quirks.eth[].np cannot handle WoL packets while in ULP0, ULP1
367 	 * or both due to a hardware bug. If they receive WoL packets while in
368 	 * ULP0 or ULP1 IPs could stop working or the whole system could stop
369 	 * working. We cannot handle this scenario in the ethernet driver itself
370 	 * as the driver is common to multiple vendors and also we only know
371 	 * here, in this file, if we suspend to ULP0 or ULP1 mode. Thus handle
372 	 * these scenarios here, as quirks.
373 	 */
374 	for (i = 0; i < AT91_PM_MAX_ETH; i++) {
375 		eth = &soc_pm.quirks.eth[i];
376 
377 		if (!at91_pm_eth_quirk_is_valid(eth))
378 			continue;
379 
380 		/*
381 		 * For modes in dns_modes mask the system blocks if quirk is not
382 		 * applied but if applied the interface doesn't act at WoL
383 		 * events. Thus take care to avoid suspending if this interface
384 		 * is the only configured wakeup source.
385 		 */
386 		if (suspend && eth->dns_modes & BIT(soc_pm.data.mode)) {
387 			int ws_count = 0;
388 #ifdef CONFIG_PM_SLEEP
389 			struct wakeup_source *ws;
390 
391 			for_each_wakeup_source(ws) {
392 				if (ws->dev == eth->dev)
393 					continue;
394 
395 				ws_count++;
396 				break;
397 			}
398 #endif
399 
400 			/*
401 			 * Checking !ws is good for all platforms with issues
402 			 * even when both G_ETH and E_ETH are available as dns_modes
403 			 * is populated only on G_ETH interface.
404 			 */
405 			if (!ws_count) {
406 				pr_err("AT91: PM: Ethernet cannot resume from WoL!");
407 				ret = -EPERM;
408 				put_device(eth->dev);
409 				eth->dev = NULL;
410 				/* No need to revert clock settings for this eth. */
411 				i--;
412 				goto clk_unconfigure;
413 			}
414 		}
415 
416 		if (suspend) {
417 			clk_bulk_disable_unprepare(AT91_PM_ETH_MAX_CLK, eth->clks);
418 		} else {
419 			ret = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK,
420 						      eth->clks);
421 			if (ret)
422 				goto clk_unconfigure;
423 			/*
424 			 * Release the reference to eth->dev taken in
425 			 * at91_pm_eth_quirk_is_valid().
426 			 */
427 			put_device(eth->dev);
428 			eth->dev = NULL;
429 		}
430 	}
431 
432 	return 0;
433 
434 clk_unconfigure:
435 	/*
436 	 * In case of resume we reach this point if clk_prepare_enable() failed.
437 	 * we don't want to revert the previous clk_prepare_enable() for the
438 	 * other IP.
439 	 */
440 	for (j = i; j >= 0; j--) {
441 		eth = &soc_pm.quirks.eth[j];
442 		if (suspend) {
443 			if (!at91_pm_eth_quirk_is_valid(eth))
444 				continue;
445 
446 			tmp = clk_bulk_prepare_enable(AT91_PM_ETH_MAX_CLK, eth->clks);
447 			if (tmp) {
448 				pr_err("AT91: PM: failed to enable %s clocks\n",
449 				       j == AT91_PM_G_ETH ? "geth" : "eth");
450 			}
451 		}
452 
453 		/*
454 		 * Release the reference to eth->dev taken in
455 		 * at91_pm_eth_quirk_is_valid().
456 		 */
457 		put_device(eth->dev);
458 		eth->dev = NULL;
459 	}
460 
461 	return ret;
462 }
463 
464 /*
465  * Called after processes are frozen, but before we shutdown devices.
466  */
at91_pm_begin(suspend_state_t state)467 static int at91_pm_begin(suspend_state_t state)
468 {
469 	int ret;
470 
471 	switch (state) {
472 	case PM_SUSPEND_MEM:
473 		soc_pm.data.mode = soc_pm.data.suspend_mode;
474 		break;
475 
476 	case PM_SUSPEND_STANDBY:
477 		soc_pm.data.mode = soc_pm.data.standby_mode;
478 		break;
479 
480 	default:
481 		soc_pm.data.mode = -1;
482 	}
483 
484 	ret = at91_pm_config_ws(soc_pm.data.mode, true);
485 	if (ret)
486 		return ret;
487 
488 	if (soc_pm.data.mode == AT91_PM_BACKUP)
489 		soc_pm.bu->suspended = 1;
490 	else if (soc_pm.bu)
491 		soc_pm.bu->suspended = 0;
492 
493 	return 0;
494 }
495 
496 /*
497  * Verify that all the clocks are correct before entering
498  * slow-clock mode.
499  */
at91_pm_verify_clocks(void)500 static int at91_pm_verify_clocks(void)
501 {
502 	unsigned long scsr;
503 	int i;
504 
505 	scsr = readl(soc_pm.data.pmc + AT91_PMC_SCSR);
506 
507 	/* USB must not be using PLLB */
508 	if ((scsr & soc_pm.data.uhp_udp_mask) != 0) {
509 		pr_err("AT91: PM - Suspend-to-RAM with USB still active\n");
510 		return 0;
511 	}
512 
513 	/* PCK0..PCK3 must be disabled, or configured to use clk32k */
514 	for (i = 0; i < 4; i++) {
515 		u32 css;
516 
517 		if ((scsr & (AT91_PMC_PCK0 << i)) == 0)
518 			continue;
519 		css = readl(soc_pm.data.pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS;
520 		if (css != AT91_PMC_CSS_SLOW) {
521 			pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css);
522 			return 0;
523 		}
524 	}
525 
526 	return 1;
527 }
528 
529 /*
530  * Call this from platform driver suspend() to see how deeply to suspend.
531  * For example, some controllers (like OHCI) need one of the PLL clocks
532  * in order to act as a wakeup source, and those are not available when
533  * going into slow clock mode.
534  *
535  * REVISIT: generalize as clk_will_be_available(clk)?  Other platforms have
536  * the very same problem (but not using at91 main_clk), and it'd be better
537  * to add one generic API rather than lots of platform-specific ones.
538  */
at91_suspend_entering_slow_clock(void)539 int at91_suspend_entering_slow_clock(void)
540 {
541 	return (soc_pm.data.mode >= AT91_PM_ULP0);
542 }
543 EXPORT_SYMBOL(at91_suspend_entering_slow_clock);
544 
545 static void (*at91_suspend_sram_fn)(struct at91_pm_data *);
546 extern void at91_pm_suspend_in_sram(struct at91_pm_data *pm_data);
547 extern u32 at91_pm_suspend_in_sram_sz;
548 
at91_suspend_finish(unsigned long val)549 static int at91_suspend_finish(unsigned long val)
550 {
551 	/* SYNOPSYS workaround to fix a bug in the calibration logic */
552 	unsigned char modified_fix_code[] = {
553 		0x00, 0x01, 0x01, 0x06, 0x07, 0x0c, 0x06, 0x07, 0x0b, 0x18,
554 		0x0a, 0x0b, 0x0c, 0x0d, 0x0d, 0x0a, 0x13, 0x13, 0x12, 0x13,
555 		0x14, 0x15, 0x15, 0x12, 0x18, 0x19, 0x19, 0x1e, 0x1f, 0x14,
556 		0x1e, 0x1f,
557 	};
558 	unsigned int tmp, index;
559 	int i;
560 
561 	if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
562 		/*
563 		 * Bootloader will perform DDR recalibration and will try to
564 		 * restore the ZQ0SR0 with the value saved here. But the
565 		 * calibration is buggy and restoring some values from ZQ0SR0
566 		 * is forbidden and risky thus we need to provide processed
567 		 * values for these.
568 		 */
569 		tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
570 
571 		/* Store pull-down output impedance select. */
572 		index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
573 		soc_pm.bu->ddr_phy_calibration[0] = modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDO_OFF;
574 
575 		/* Store pull-up output impedance select. */
576 		index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
577 		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PUO_OFF;
578 
579 		/* Store pull-down on-die termination impedance select. */
580 		index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
581 		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SR0_PDODT_OFF;
582 
583 		/* Store pull-up on-die termination impedance select. */
584 		index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
585 		soc_pm.bu->ddr_phy_calibration[0] |= modified_fix_code[index] << DDR3PHY_ZQ0SRO_PUODT_OFF;
586 
587 		/*
588 		 * The 1st 8 words of memory might get corrupted in the process
589 		 * of DDR PHY recalibration; it is saved here in securam and it
590 		 * will be restored later, after recalibration, by bootloader
591 		 */
592 		for (i = 1; i < BACKUP_DDR_PHY_CALIBRATION; i++)
593 			soc_pm.bu->ddr_phy_calibration[i] =
594 				*((unsigned int *)soc_pm.memcs + (i - 1));
595 	}
596 
597 	flush_cache_all();
598 	outer_disable();
599 
600 	at91_suspend_sram_fn(&soc_pm.data);
601 
602 	return 0;
603 }
604 
605 /**
606  * at91_pm_switch_ba_to_auto() - Configure Backup Unit Power Switch
607  * to automatic/hardware mode.
608  *
609  * The Backup Unit Power Switch can be managed either by software or hardware.
610  * Enabling hardware mode allows the automatic transition of power between
611  * VDDANA (or VDDIN33) and VDDBU (or VBAT, respectively), based on the
612  * availability of these power sources.
613  *
614  * If the Backup Unit Power Switch is already in automatic mode, no action is
615  * required. If it is in software-controlled mode, it is switched to automatic
616  * mode to enhance safety and eliminate the need for toggling between power
617  * sources.
618  */
at91_pm_switch_ba_to_auto(void)619 static void at91_pm_switch_ba_to_auto(void)
620 {
621 	unsigned int offset = offsetof(struct at91_pm_sfrbu_regs, pswbu);
622 	unsigned int val;
623 
624 	/* Just for safety. */
625 	if (!soc_pm.data.sfrbu)
626 		return;
627 
628 	val = readl(soc_pm.data.sfrbu + offset);
629 
630 	/* Already on auto/hardware. */
631 	if (!(val & soc_pm.sfrbu_regs.pswbu.ctrl))
632 		return;
633 
634 	val &= ~soc_pm.sfrbu_regs.pswbu.ctrl;
635 	val |= soc_pm.sfrbu_regs.pswbu.key;
636 	writel(val, soc_pm.data.sfrbu + offset);
637 }
638 
at91_pm_suspend(suspend_state_t state)639 static void at91_pm_suspend(suspend_state_t state)
640 {
641 	if (soc_pm.data.mode == AT91_PM_BACKUP) {
642 		at91_pm_switch_ba_to_auto();
643 
644 		cpu_suspend(0, at91_suspend_finish);
645 
646 		/* The SRAM is lost between suspend cycles */
647 		at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
648 					     &at91_pm_suspend_in_sram,
649 					     at91_pm_suspend_in_sram_sz);
650 
651 		if (IS_ENABLED(CONFIG_SOC_SAMA7D65)) {
652 			/* SHDWC.SR */
653 			readl(soc_pm.data.shdwc + 0x08);
654 		}
655 	} else {
656 		at91_suspend_finish(0);
657 	}
658 
659 	outer_resume();
660 }
661 
662 /*
663  * STANDBY mode has *all* drivers suspended; ignores irqs not marked as 'wakeup'
664  * event sources; and reduces DRAM power.  But otherwise it's identical to
665  * PM_SUSPEND_ON: cpu idle, and nothing fancy done with main or cpu clocks.
666  *
667  * AT91_PM_ULP0 is like STANDBY plus slow clock mode, so drivers must
668  * suspend more deeply, the master clock switches to the clk32k and turns off
669  * the main oscillator
670  *
671  * AT91_PM_BACKUP turns off the whole SoC after placing the DDR in self refresh
672  */
at91_pm_enter(suspend_state_t state)673 static int at91_pm_enter(suspend_state_t state)
674 {
675 	int ret;
676 
677 	ret = at91_pm_config_quirks(true);
678 	if (ret)
679 		return ret;
680 
681 	switch (state) {
682 	case PM_SUSPEND_MEM:
683 	case PM_SUSPEND_STANDBY:
684 		/*
685 		 * Ensure that clocks are in a valid state.
686 		 */
687 		if (soc_pm.data.mode >= AT91_PM_ULP0 &&
688 		    !at91_pm_verify_clocks())
689 			goto error;
690 
691 		at91_pm_suspend(state);
692 
693 		break;
694 
695 	case PM_SUSPEND_ON:
696 		cpu_do_idle();
697 		break;
698 
699 	default:
700 		pr_debug("AT91: PM - bogus suspend state %d\n", state);
701 		goto error;
702 	}
703 
704 error:
705 	at91_pm_config_quirks(false);
706 	return 0;
707 }
708 
709 /*
710  * Called right prior to thawing processes.
711  */
at91_pm_end(void)712 static void at91_pm_end(void)
713 {
714 	at91_pm_config_ws(soc_pm.data.mode, false);
715 }
716 
717 
718 static const struct platform_suspend_ops at91_pm_ops = {
719 	.valid	= at91_pm_valid_state,
720 	.begin	= at91_pm_begin,
721 	.enter	= at91_pm_enter,
722 	.end	= at91_pm_end,
723 };
724 
725 static struct platform_device at91_cpuidle_device = {
726 	.name = "cpuidle-at91",
727 };
728 
729 /*
730  * The AT91RM9200 goes into self-refresh mode with this command, and will
731  * terminate self-refresh automatically on the next SDRAM access.
732  *
733  * Self-refresh mode is exited as soon as a memory access is made, but we don't
734  * know for sure when that happens. However, we need to restore the low-power
735  * mode if it was enabled before going idle. Restoring low-power mode while
736  * still in self-refresh is "not recommended", but seems to work.
737  */
at91rm9200_standby(void)738 static void at91rm9200_standby(void)
739 {
740 	asm volatile(
741 		"b    1f\n\t"
742 		".align    5\n\t"
743 		"1:  mcr    p15, 0, %0, c7, c10, 4\n\t"
744 		"    str    %2, [%1, %3]\n\t"
745 		"    mcr    p15, 0, %0, c7, c0, 4\n\t"
746 		:
747 		: "r" (0), "r" (soc_pm.data.ramc[0]),
748 		  "r" (1), "r" (AT91_MC_SDRAMC_SRR));
749 }
750 
751 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
752  * remember.
753  */
at91_ddr_standby(void)754 static void at91_ddr_standby(void)
755 {
756 	/* Those two values allow us to delay self-refresh activation
757 	 * to the maximum. */
758 	u32 lpr0, lpr1 = 0;
759 	u32 mdr, saved_mdr0, saved_mdr1 = 0;
760 	u32 saved_lpr0, saved_lpr1 = 0;
761 
762 	/* LPDDR1 --> force DDR2 mode during self-refresh */
763 	saved_mdr0 = at91_ramc_read(0, AT91_DDRSDRC_MDR);
764 	if ((saved_mdr0 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
765 		mdr = saved_mdr0 & ~AT91_DDRSDRC_MD;
766 		mdr |= AT91_DDRSDRC_MD_DDR2;
767 		at91_ramc_write(0, AT91_DDRSDRC_MDR, mdr);
768 	}
769 
770 	if (soc_pm.data.ramc[1]) {
771 		saved_lpr1 = at91_ramc_read(1, AT91_DDRSDRC_LPR);
772 		lpr1 = saved_lpr1 & ~AT91_DDRSDRC_LPCB;
773 		lpr1 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
774 		saved_mdr1 = at91_ramc_read(1, AT91_DDRSDRC_MDR);
775 		if ((saved_mdr1 & AT91_DDRSDRC_MD) == AT91_DDRSDRC_MD_LOW_POWER_DDR) {
776 			mdr = saved_mdr1 & ~AT91_DDRSDRC_MD;
777 			mdr |= AT91_DDRSDRC_MD_DDR2;
778 			at91_ramc_write(1, AT91_DDRSDRC_MDR, mdr);
779 		}
780 	}
781 
782 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
783 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
784 	lpr0 |= AT91_DDRSDRC_LPCB_SELF_REFRESH;
785 
786 	/* self-refresh mode now */
787 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
788 	if (soc_pm.data.ramc[1])
789 		at91_ramc_write(1, AT91_DDRSDRC_LPR, lpr1);
790 
791 	cpu_do_idle();
792 
793 	at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr0);
794 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
795 	if (soc_pm.data.ramc[1]) {
796 		at91_ramc_write(0, AT91_DDRSDRC_MDR, saved_mdr1);
797 		at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
798 	}
799 }
800 
sama5d3_ddr_standby(void)801 static void sama5d3_ddr_standby(void)
802 {
803 	u32 lpr0;
804 	u32 saved_lpr0;
805 
806 	saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
807 	lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
808 	lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
809 
810 	at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
811 
812 	cpu_do_idle();
813 
814 	at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
815 }
816 
817 /* We manage both DDRAM/SDRAM controllers, we need more than one value to
818  * remember.
819  */
at91sam9_sdram_standby(void)820 static void at91sam9_sdram_standby(void)
821 {
822 	u32 lpr0, lpr1 = 0;
823 	u32 saved_lpr0, saved_lpr1 = 0;
824 
825 	if (soc_pm.data.ramc[1]) {
826 		saved_lpr1 = at91_ramc_read(1, AT91_SDRAMC_LPR);
827 		lpr1 = saved_lpr1 & ~AT91_SDRAMC_LPCB;
828 		lpr1 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
829 	}
830 
831 	saved_lpr0 = at91_ramc_read(0, AT91_SDRAMC_LPR);
832 	lpr0 = saved_lpr0 & ~AT91_SDRAMC_LPCB;
833 	lpr0 |= AT91_SDRAMC_LPCB_SELF_REFRESH;
834 
835 	/* self-refresh mode now */
836 	at91_ramc_write(0, AT91_SDRAMC_LPR, lpr0);
837 	if (soc_pm.data.ramc[1])
838 		at91_ramc_write(1, AT91_SDRAMC_LPR, lpr1);
839 
840 	cpu_do_idle();
841 
842 	at91_ramc_write(0, AT91_SDRAMC_LPR, saved_lpr0);
843 	if (soc_pm.data.ramc[1])
844 		at91_ramc_write(1, AT91_SDRAMC_LPR, saved_lpr1);
845 }
846 
sama7g5_standby(void)847 static void sama7g5_standby(void)
848 {
849 	int pwrtmg, ratio;
850 
851 	pwrtmg = readl(soc_pm.data.ramc[0] + UDDRC_PWRCTL);
852 	ratio = readl(soc_pm.data.pmc + AT91_PMC_RATIO);
853 
854 	/*
855 	 * Place RAM into self-refresh after a maximum idle clocks. The maximum
856 	 * idle clocks is configured by bootloader in
857 	 * UDDRC_PWRMGT.SELFREF_TO_X32.
858 	 */
859 	writel(pwrtmg | UDDRC_PWRCTL_SELFREF_EN,
860 	       soc_pm.data.ramc[0] + UDDRC_PWRCTL);
861 	/* Divide CPU clock by 16. */
862 	writel(ratio & ~AT91_PMC_RATIO_RATIO, soc_pm.data.pmc + AT91_PMC_RATIO);
863 
864 	cpu_do_idle();
865 
866 	/* Restore previous configuration. */
867 	writel(ratio, soc_pm.data.pmc + AT91_PMC_RATIO);
868 	writel(pwrtmg, soc_pm.data.ramc[0] + UDDRC_PWRCTL);
869 }
870 
871 struct ramc_info {
872 	void (*idle)(void);
873 	unsigned int memctrl;
874 };
875 
876 static const struct ramc_info ramc_infos[] __initconst = {
877 	{ .idle = at91rm9200_standby, .memctrl = AT91_MEMCTRL_MC},
878 	{ .idle = at91sam9_sdram_standby, .memctrl = AT91_MEMCTRL_SDRAMC},
879 	{ .idle = at91_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
880 	{ .idle = sama5d3_ddr_standby, .memctrl = AT91_MEMCTRL_DDRSDR},
881 	{ .idle = sama7g5_standby, },
882 };
883 
884 static const struct of_device_id ramc_ids[] __initconst = {
885 	{ .compatible = "atmel,at91rm9200-sdramc", .data = &ramc_infos[0] },
886 	{ .compatible = "atmel,at91sam9260-sdramc", .data = &ramc_infos[1] },
887 	{ .compatible = "atmel,at91sam9g45-ddramc", .data = &ramc_infos[2] },
888 	{ .compatible = "atmel,sama5d3-ddramc", .data = &ramc_infos[3] },
889 	{ .compatible = "microchip,sama7g5-uddrc", .data = &ramc_infos[4], },
890 	{ /*sentinel*/ }
891 };
892 
893 static const struct of_device_id ramc_phy_ids[] __initconst = {
894 	{ .compatible = "microchip,sama7g5-ddr3phy", },
895 	{ /* Sentinel. */ },
896 };
897 
at91_dt_ramc(bool phy_mandatory)898 static __init int at91_dt_ramc(bool phy_mandatory)
899 {
900 	struct device_node *np;
901 	const struct of_device_id *of_id;
902 	int idx = 0;
903 	void *standby = NULL;
904 	const struct ramc_info *ramc;
905 	int ret;
906 
907 	for_each_matching_node_and_match(np, ramc_ids, &of_id) {
908 		soc_pm.data.ramc[idx] = of_iomap(np, 0);
909 		if (!soc_pm.data.ramc[idx]) {
910 			pr_err("unable to map ramc[%d] cpu registers\n", idx);
911 			ret = -ENOMEM;
912 			of_node_put(np);
913 			goto unmap_ramc;
914 		}
915 
916 		ramc = of_id->data;
917 		if (ramc) {
918 			if (!standby)
919 				standby = ramc->idle;
920 			soc_pm.data.memctrl = ramc->memctrl;
921 		}
922 
923 		idx++;
924 	}
925 
926 	if (!idx) {
927 		pr_err("unable to find compatible ram controller node in dtb\n");
928 		ret = -ENODEV;
929 		goto unmap_ramc;
930 	}
931 
932 	/* Lookup for DDR PHY node, if any. */
933 	for_each_matching_node_and_match(np, ramc_phy_ids, &of_id) {
934 		soc_pm.data.ramc_phy = of_iomap(np, 0);
935 		if (!soc_pm.data.ramc_phy) {
936 			pr_err("unable to map ramc phy cpu registers\n");
937 			ret = -ENOMEM;
938 			of_node_put(np);
939 			goto unmap_ramc;
940 		}
941 	}
942 
943 	if (phy_mandatory && !soc_pm.data.ramc_phy) {
944 		pr_err("DDR PHY is mandatory!\n");
945 		ret = -ENODEV;
946 		goto unmap_ramc;
947 	}
948 
949 	if (!standby) {
950 		pr_warn("ramc no standby function available\n");
951 		return 0;
952 	}
953 
954 	at91_cpuidle_device.dev.platform_data = standby;
955 
956 	return 0;
957 
958 unmap_ramc:
959 	while (idx)
960 		iounmap(soc_pm.data.ramc[--idx]);
961 
962 	return ret;
963 }
964 
at91rm9200_idle(void)965 static void at91rm9200_idle(void)
966 {
967 	/*
968 	 * Disable the processor clock.  The processor will be automatically
969 	 * re-enabled by an interrupt or by a reset.
970 	 */
971 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
972 }
973 
at91sam9_idle(void)974 static void at91sam9_idle(void)
975 {
976 	writel(AT91_PMC_PCK, soc_pm.data.pmc + AT91_PMC_SCDR);
977 	cpu_do_idle();
978 }
979 
at91_pm_sram_init(void)980 static void __init at91_pm_sram_init(void)
981 {
982 	struct gen_pool *sram_pool;
983 	phys_addr_t sram_pbase;
984 	unsigned long sram_base;
985 	struct platform_device *pdev = NULL;
986 
987 	for_each_compatible_node_scoped(node, NULL, "mmio-sram") {
988 		pdev = of_find_device_by_node(node);
989 		if (pdev)
990 			break;
991 	}
992 
993 	if (!pdev) {
994 		pr_warn("%s: failed to find sram device!\n", __func__);
995 		return;
996 	}
997 
998 	sram_pool = gen_pool_get(&pdev->dev, NULL);
999 	if (!sram_pool) {
1000 		pr_warn("%s: sram pool unavailable!\n", __func__);
1001 		goto out_put_device;
1002 	}
1003 
1004 	sram_base = gen_pool_alloc(sram_pool, at91_pm_suspend_in_sram_sz);
1005 	if (!sram_base) {
1006 		pr_warn("%s: unable to alloc sram!\n", __func__);
1007 		goto out_put_device;
1008 	}
1009 
1010 	sram_pbase = gen_pool_virt_to_phys(sram_pool, sram_base);
1011 	at91_suspend_sram_fn = __arm_ioremap_exec(sram_pbase,
1012 					at91_pm_suspend_in_sram_sz, false);
1013 	if (!at91_suspend_sram_fn) {
1014 		pr_warn("SRAM: Could not map\n");
1015 		goto out_put_device;
1016 	}
1017 
1018 	/* Copy the pm suspend handler to SRAM */
1019 	at91_suspend_sram_fn = fncpy(at91_suspend_sram_fn,
1020 			&at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz);
1021 	return;
1022 
1023 out_put_device:
1024 	put_device(&pdev->dev);
1025 	return;
1026 }
1027 
at91_is_pm_mode_active(int pm_mode)1028 static bool __init at91_is_pm_mode_active(int pm_mode)
1029 {
1030 	return (soc_pm.data.standby_mode == pm_mode ||
1031 		soc_pm.data.suspend_mode == pm_mode);
1032 }
1033 
at91_pm_backup_scan_memcs(unsigned long node,const char * uname,int depth,void * data)1034 static int __init at91_pm_backup_scan_memcs(unsigned long node,
1035 					    const char *uname, int depth,
1036 					    void *data)
1037 {
1038 	const char *type;
1039 	const __be32 *reg;
1040 	int *located = data;
1041 	int size;
1042 
1043 	/* Memory node already located. */
1044 	if (*located)
1045 		return 0;
1046 
1047 	type = of_get_flat_dt_prop(node, "device_type", NULL);
1048 
1049 	/* We are scanning "memory" nodes only. */
1050 	if (!type || strcmp(type, "memory"))
1051 		return 0;
1052 
1053 	reg = of_get_flat_dt_prop(node, "reg", &size);
1054 	if (reg) {
1055 		soc_pm.memcs = __va((phys_addr_t)be32_to_cpu(*reg));
1056 		*located = 1;
1057 	}
1058 
1059 	return 0;
1060 }
1061 
at91_pm_backup_init(void)1062 static int __init at91_pm_backup_init(void)
1063 {
1064 	struct gen_pool *sram_pool;
1065 	struct device_node *np;
1066 	struct platform_device *pdev;
1067 	int ret = -ENODEV, located = 0;
1068 
1069 	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2) &&
1070 	    !IS_ENABLED(CONFIG_SOC_SAMA7G5) &&
1071 	    !IS_ENABLED(CONFIG_SOC_SAMA7D65))
1072 		return -EPERM;
1073 
1074 	if (!at91_is_pm_mode_active(AT91_PM_BACKUP))
1075 		return 0;
1076 
1077 	np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
1078 	if (!np)
1079 		return ret;
1080 
1081 	pdev = of_find_device_by_node(np);
1082 	of_node_put(np);
1083 	if (!pdev) {
1084 		pr_warn("%s: failed to find securam device!\n", __func__);
1085 		return ret;
1086 	}
1087 
1088 	sram_pool = gen_pool_get(&pdev->dev, NULL);
1089 	if (!sram_pool) {
1090 		pr_warn("%s: securam pool unavailable!\n", __func__);
1091 		goto securam_fail;
1092 	}
1093 
1094 	soc_pm.bu = (void *)gen_pool_alloc(sram_pool, sizeof(struct at91_pm_bu));
1095 	if (!soc_pm.bu) {
1096 		pr_warn("%s: unable to alloc securam!\n", __func__);
1097 		ret = -ENOMEM;
1098 		goto securam_fail;
1099 	}
1100 
1101 	soc_pm.bu->suspended = 0;
1102 	soc_pm.bu->canary = __pa_symbol(&canary);
1103 	soc_pm.bu->resume = __pa_symbol(cpu_resume);
1104 	if (soc_pm.data.ramc_phy) {
1105 		of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
1106 		if (!located)
1107 			goto securam_fail;
1108 	}
1109 
1110 	return 0;
1111 
1112 securam_fail:
1113 	put_device(&pdev->dev);
1114 	return ret;
1115 }
1116 
at91_pm_secure_init(void)1117 static void __init at91_pm_secure_init(void)
1118 {
1119 	int suspend_mode;
1120 	struct arm_smccc_res res;
1121 
1122 	suspend_mode = soc_pm.data.suspend_mode;
1123 
1124 	res = sam_smccc_call(SAMA5_SMC_SIP_SET_SUSPEND_MODE,
1125 			     suspend_mode, 0);
1126 	if (res.a0 == 0) {
1127 		pr_info("AT91: Secure PM: suspend mode set to %s\n",
1128 			pm_modes[suspend_mode].pattern);
1129 		soc_pm.data.mode = suspend_mode;
1130 		return;
1131 	}
1132 
1133 	pr_warn("AT91: Secure PM: %s mode not supported !\n",
1134 		pm_modes[suspend_mode].pattern);
1135 
1136 	res = sam_smccc_call(SAMA5_SMC_SIP_GET_SUSPEND_MODE, 0, 0);
1137 	if (res.a0 == 0) {
1138 		pr_warn("AT91: Secure PM: failed to get default mode\n");
1139 		soc_pm.data.mode = -1;
1140 		return;
1141 	}
1142 
1143 	pr_info("AT91: Secure PM: using default suspend mode %s\n",
1144 		pm_modes[suspend_mode].pattern);
1145 
1146 	soc_pm.data.suspend_mode = res.a1;
1147 	soc_pm.data.mode = soc_pm.data.suspend_mode;
1148 }
1149 static const struct of_device_id atmel_shdwc_ids[] = {
1150 	{ .compatible = "atmel,sama5d2-shdwc" },
1151 	{ .compatible = "microchip,sam9x60-shdwc" },
1152 	{ .compatible = "microchip,sama7g5-shdwc" },
1153 	{ /* sentinel. */ }
1154 };
1155 
1156 static const struct of_device_id gmac_ids[] __initconst = {
1157 	{ .compatible = "atmel,sama5d3-gem" },
1158 	{ .compatible = "atmel,sama5d2-gem" },
1159 	{ .compatible = "atmel,sama5d29-gem" },
1160 	{ .compatible = "microchip,sama7g5-gem" },
1161 	{ },
1162 };
1163 
1164 static const struct of_device_id emac_ids[] __initconst = {
1165 	{ .compatible = "atmel,sama5d3-macb" },
1166 	{ .compatible = "microchip,sama7g5-emac" },
1167 	{ },
1168 };
1169 
1170 /*
1171  * Replaces _mode_to_replace with a supported mode that doesn't depend
1172  * on controller pointed by _map_bitmask
1173  * @_maps: u32 array containing AT91_PM_IOMAP() flags and indexed by AT91
1174  * PM mode
1175  * @_map_bitmask: AT91_PM_IOMAP() bitmask; if _mode_to_replace depends on
1176  * controller represented by _map_bitmask, _mode_to_replace needs to be
1177  * updated
1178  * @_mode_to_replace: standby_mode or suspend_mode that need to be
1179  * updated
1180  * @_mode_to_check: standby_mode or suspend_mode; this is needed here
1181  * to avoid having standby_mode and suspend_mode set with the same AT91
1182  * PM mode
1183  */
1184 #define AT91_PM_REPLACE_MODE(_maps, _map_bitmask, _mode_to_replace,	\
1185 			     _mode_to_check)				\
1186 	do {								\
1187 		if (((_maps)[(_mode_to_replace)]) & (_map_bitmask)) {	\
1188 			int _mode_to_use, _mode_complementary;		\
1189 			/* Use ULP0 if it doesn't need _map_bitmask. */	\
1190 			if (!((_maps)[AT91_PM_ULP0] & (_map_bitmask))) {\
1191 				_mode_to_use = AT91_PM_ULP0;		\
1192 				_mode_complementary = AT91_PM_STANDBY;	\
1193 			} else {					\
1194 				_mode_to_use = AT91_PM_STANDBY;		\
1195 				_mode_complementary = AT91_PM_STANDBY;	\
1196 			}						\
1197 									\
1198 			if ((_mode_to_check) != _mode_to_use)		\
1199 				(_mode_to_replace) = _mode_to_use;	\
1200 			else						\
1201 				(_mode_to_replace) = _mode_complementary;\
1202 		}							\
1203 	} while (0)
1204 
1205 /*
1206  * Replaces standby and suspend modes with default supported modes:
1207  * ULP0 and STANDBY.
1208  * @_maps: u32 array indexed by AT91 PM mode containing AT91_PM_IOMAP()
1209  * flags
1210  * @_map: controller specific name; standby and suspend mode need to be
1211  * replaced in order to not depend on this controller
1212  */
1213 #define AT91_PM_REPLACE_MODES(_maps, _map)				\
1214 	do {								\
1215 		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1216 				     (soc_pm.data.standby_mode),	\
1217 				     (soc_pm.data.suspend_mode));	\
1218 		AT91_PM_REPLACE_MODE((_maps), BIT(AT91_PM_IOMAP_##_map),\
1219 				     (soc_pm.data.suspend_mode),	\
1220 				     (soc_pm.data.standby_mode));	\
1221 	} while (0)
1222 
at91_pm_get_eth_clks(struct device_node * np,struct clk_bulk_data * clks)1223 static int __init at91_pm_get_eth_clks(struct device_node *np,
1224 				       struct clk_bulk_data *clks)
1225 {
1226 	clks[AT91_PM_ETH_PCLK].clk = of_clk_get_by_name(np, "pclk");
1227 	if (IS_ERR(clks[AT91_PM_ETH_PCLK].clk))
1228 		return PTR_ERR(clks[AT91_PM_ETH_PCLK].clk);
1229 
1230 	clks[AT91_PM_ETH_HCLK].clk = of_clk_get_by_name(np, "hclk");
1231 	if (IS_ERR(clks[AT91_PM_ETH_HCLK].clk))
1232 		return PTR_ERR(clks[AT91_PM_ETH_HCLK].clk);
1233 
1234 	return 0;
1235 }
1236 
at91_pm_eth_clks_empty(struct clk_bulk_data * clks)1237 static int __init at91_pm_eth_clks_empty(struct clk_bulk_data *clks)
1238 {
1239 	return IS_ERR(clks[AT91_PM_ETH_PCLK].clk) ||
1240 	       IS_ERR(clks[AT91_PM_ETH_HCLK].clk);
1241 }
1242 
at91_pm_modes_init(const u32 * maps,int len)1243 static void __init at91_pm_modes_init(const u32 *maps, int len)
1244 {
1245 	struct at91_pm_quirk_eth *gmac = &soc_pm.quirks.eth[AT91_PM_G_ETH];
1246 	struct at91_pm_quirk_eth *emac = &soc_pm.quirks.eth[AT91_PM_E_ETH];
1247 	struct device_node *np;
1248 	int ret;
1249 
1250 	ret = at91_pm_backup_init();
1251 	if (ret) {
1252 		if (soc_pm.data.standby_mode == AT91_PM_BACKUP)
1253 			soc_pm.data.standby_mode = AT91_PM_ULP0;
1254 		if (soc_pm.data.suspend_mode == AT91_PM_BACKUP)
1255 			soc_pm.data.suspend_mode = AT91_PM_ULP0;
1256 	}
1257 
1258 	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1259 	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC)) {
1260 		np = of_find_matching_node(NULL, atmel_shdwc_ids);
1261 		if (!np) {
1262 			pr_warn("%s: failed to find shdwc!\n", __func__);
1263 			AT91_PM_REPLACE_MODES(maps, SHDWC);
1264 		} else {
1265 			soc_pm.data.shdwc = of_iomap(np, 0);
1266 			of_node_put(np);
1267 		}
1268 	}
1269 
1270 	if (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1271 	    maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU)) {
1272 		np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-sfrbu");
1273 		if (!np) {
1274 			pr_warn("%s: failed to find sfrbu!\n", __func__);
1275 			AT91_PM_REPLACE_MODES(maps, SFRBU);
1276 		} else {
1277 			soc_pm.data.sfrbu = of_iomap(np, 0);
1278 			of_node_put(np);
1279 		}
1280 	}
1281 
1282 	if ((at91_is_pm_mode_active(AT91_PM_ULP1) ||
1283 	     at91_is_pm_mode_active(AT91_PM_ULP0) ||
1284 	     at91_is_pm_mode_active(AT91_PM_ULP0_FAST)) &&
1285 	    (maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(ETHC) ||
1286 	     maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(ETHC))) {
1287 		np = of_find_matching_node(NULL, gmac_ids);
1288 		if (!np) {
1289 			np = of_find_matching_node(NULL, emac_ids);
1290 			if (np)
1291 				goto get_emac_clks;
1292 			AT91_PM_REPLACE_MODES(maps, ETHC);
1293 			goto unmap_unused_nodes;
1294 		} else {
1295 			gmac->np = np;
1296 			at91_pm_get_eth_clks(np, gmac->clks);
1297 		}
1298 
1299 		np = of_find_matching_node(NULL, emac_ids);
1300 		if (!np) {
1301 			if (at91_pm_eth_clks_empty(gmac->clks))
1302 				AT91_PM_REPLACE_MODES(maps, ETHC);
1303 		} else {
1304 get_emac_clks:
1305 			emac->np = np;
1306 			ret = at91_pm_get_eth_clks(np, emac->clks);
1307 			if (ret && at91_pm_eth_clks_empty(gmac->clks)) {
1308 				of_node_put(gmac->np);
1309 				of_node_put(emac->np);
1310 				gmac->np = NULL;
1311 				emac->np = NULL;
1312 			}
1313 		}
1314 	}
1315 
1316 unmap_unused_nodes:
1317 	/* Unmap all unnecessary. */
1318 	if (soc_pm.data.shdwc &&
1319 	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SHDWC) ||
1320 	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SHDWC))) {
1321 		iounmap(soc_pm.data.shdwc);
1322 		soc_pm.data.shdwc = NULL;
1323 	}
1324 
1325 	if (soc_pm.data.sfrbu &&
1326 	    !(maps[soc_pm.data.standby_mode] & AT91_PM_IOMAP(SFRBU) ||
1327 	      maps[soc_pm.data.suspend_mode] & AT91_PM_IOMAP(SFRBU))) {
1328 		iounmap(soc_pm.data.sfrbu);
1329 		soc_pm.data.sfrbu = NULL;
1330 	}
1331 
1332 	return;
1333 }
1334 
1335 struct pmc_info {
1336 	unsigned long uhp_udp_mask;
1337 	unsigned long mckr;
1338 	unsigned long version;
1339 	unsigned long mcks;
1340 };
1341 
1342 static const struct pmc_info pmc_infos[] __initconst = {
1343 	{
1344 		.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP,
1345 		.mckr = 0x30,
1346 		.version = AT91_PMC_V1,
1347 	},
1348 
1349 	{
1350 		.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP,
1351 		.mckr = 0x30,
1352 		.version = AT91_PMC_V1,
1353 	},
1354 	{
1355 		.uhp_udp_mask = AT91SAM926x_PMC_UHP,
1356 		.mckr = 0x30,
1357 		.version = AT91_PMC_V1,
1358 	},
1359 	{	.uhp_udp_mask = 0,
1360 		.mckr = 0x30,
1361 		.version = AT91_PMC_V1,
1362 	},
1363 	{
1364 		.uhp_udp_mask = AT91SAM926x_PMC_UHP,
1365 		.mckr = 0x28,
1366 		.version = AT91_PMC_V2,
1367 	},
1368 	{
1369 		.mckr = 0x28,
1370 		.version = AT91_PMC_V2,
1371 		.mcks = 4,
1372 	},
1373 	{
1374 		.uhp_udp_mask = AT91SAM926x_PMC_UHP,
1375 		.mckr = 0x28,
1376 		.version = AT91_PMC_V2,
1377 		.mcks = 9,
1378 	},
1379 };
1380 
1381 static const struct of_device_id atmel_pmc_ids[] __initconst = {
1382 	{ .compatible = "atmel,at91rm9200-pmc", .data = &pmc_infos[0] },
1383 	{ .compatible = "atmel,at91sam9260-pmc", .data = &pmc_infos[1] },
1384 	{ .compatible = "atmel,at91sam9261-pmc", .data = &pmc_infos[1] },
1385 	{ .compatible = "atmel,at91sam9263-pmc", .data = &pmc_infos[1] },
1386 	{ .compatible = "atmel,at91sam9g45-pmc", .data = &pmc_infos[2] },
1387 	{ .compatible = "atmel,at91sam9n12-pmc", .data = &pmc_infos[1] },
1388 	{ .compatible = "atmel,at91sam9rl-pmc", .data = &pmc_infos[3] },
1389 	{ .compatible = "atmel,at91sam9x5-pmc", .data = &pmc_infos[1] },
1390 	{ .compatible = "atmel,sama5d3-pmc", .data = &pmc_infos[1] },
1391 	{ .compatible = "atmel,sama5d4-pmc", .data = &pmc_infos[1] },
1392 	{ .compatible = "atmel,sama5d2-pmc", .data = &pmc_infos[1] },
1393 	{ .compatible = "microchip,sam9x60-pmc", .data = &pmc_infos[4] },
1394 	{ .compatible = "microchip,sam9x7-pmc", .data = &pmc_infos[4] },
1395 	{ .compatible = "microchip,sama7d65-pmc", .data = &pmc_infos[6] },
1396 	{ .compatible = "microchip,sama7g5-pmc", .data = &pmc_infos[5] },
1397 	{ /* sentinel */ },
1398 };
1399 
at91_pm_modes_validate(const int * modes,int len)1400 static void __init at91_pm_modes_validate(const int *modes, int len)
1401 {
1402 	u8 i, standby = 0, suspend = 0;
1403 	int mode;
1404 
1405 	for (i = 0; i < len; i++) {
1406 		if (standby && suspend)
1407 			break;
1408 
1409 		if (modes[i] == soc_pm.data.standby_mode && !standby) {
1410 			standby = 1;
1411 			continue;
1412 		}
1413 
1414 		if (modes[i] == soc_pm.data.suspend_mode && !suspend) {
1415 			suspend = 1;
1416 			continue;
1417 		}
1418 	}
1419 
1420 	if (!standby) {
1421 		if (soc_pm.data.suspend_mode == AT91_PM_STANDBY)
1422 			mode = AT91_PM_ULP0;
1423 		else
1424 			mode = AT91_PM_STANDBY;
1425 
1426 		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1427 			pm_modes[soc_pm.data.standby_mode].pattern,
1428 			pm_modes[mode].pattern);
1429 		soc_pm.data.standby_mode = mode;
1430 	}
1431 
1432 	if (!suspend) {
1433 		if (soc_pm.data.standby_mode == AT91_PM_ULP0)
1434 			mode = AT91_PM_STANDBY;
1435 		else
1436 			mode = AT91_PM_ULP0;
1437 
1438 		pr_warn("AT91: PM: %s mode not supported! Using %s.\n",
1439 			pm_modes[soc_pm.data.suspend_mode].pattern,
1440 			pm_modes[mode].pattern);
1441 		soc_pm.data.suspend_mode = mode;
1442 	}
1443 }
1444 
at91_pm_init(void (* pm_idle)(void))1445 static void __init at91_pm_init(void (*pm_idle)(void))
1446 {
1447 	struct device_node *pmc_np;
1448 	const struct of_device_id *of_id;
1449 	const struct pmc_info *pmc;
1450 
1451 	if (at91_cpuidle_device.dev.platform_data)
1452 		platform_device_register(&at91_cpuidle_device);
1453 
1454 	pmc_np = of_find_matching_node_and_match(NULL, atmel_pmc_ids, &of_id);
1455 	soc_pm.data.pmc = of_iomap(pmc_np, 0);
1456 	of_node_put(pmc_np);
1457 	if (!soc_pm.data.pmc) {
1458 		pr_err("AT91: PM not supported, PMC not found\n");
1459 		return;
1460 	}
1461 
1462 	pmc = of_id->data;
1463 	soc_pm.data.uhp_udp_mask = pmc->uhp_udp_mask;
1464 	soc_pm.data.pmc_mckr_offset = pmc->mckr;
1465 	soc_pm.data.pmc_version = pmc->version;
1466 	soc_pm.data.pmc_mcks = pmc->mcks;
1467 
1468 	if (pm_idle)
1469 		arm_pm_idle = pm_idle;
1470 
1471 	at91_pm_sram_init();
1472 
1473 	if (at91_suspend_sram_fn) {
1474 		suspend_set_ops(&at91_pm_ops);
1475 		pr_info("AT91: PM: standby: %s, suspend: %s\n",
1476 			pm_modes[soc_pm.data.standby_mode].pattern,
1477 			pm_modes[soc_pm.data.suspend_mode].pattern);
1478 	} else {
1479 		pr_info("AT91: PM not supported, due to no SRAM allocated\n");
1480 	}
1481 }
1482 
at91rm9200_pm_init(void)1483 void __init at91rm9200_pm_init(void)
1484 {
1485 	int ret;
1486 
1487 	if (!IS_ENABLED(CONFIG_SOC_AT91RM9200))
1488 		return;
1489 
1490 	/*
1491 	 * Force STANDBY and ULP0 mode to avoid calling
1492 	 * at91_pm_modes_validate() which may increase booting time.
1493 	 * Platform supports anyway only STANDBY and ULP0 modes.
1494 	 */
1495 	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1496 	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1497 
1498 	ret = at91_dt_ramc(false);
1499 	if (ret)
1500 		return;
1501 
1502 	/*
1503 	 * AT91RM9200 SDRAM low-power mode cannot be used with self-refresh.
1504 	 */
1505 	at91_ramc_write(0, AT91_MC_SDRAMC_LPR, 0);
1506 
1507 	at91_pm_init(at91rm9200_idle);
1508 }
1509 
sam9x60_pm_init(void)1510 void __init sam9x60_pm_init(void)
1511 {
1512 	static const int modes[] __initconst = {
1513 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1514 	};
1515 	static const int iomaps[] __initconst = {
1516 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC),
1517 	};
1518 	int ret;
1519 
1520 	if (!IS_ENABLED(CONFIG_SOC_SAM9X60))
1521 		return;
1522 
1523 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1524 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1525 	ret = at91_dt_ramc(false);
1526 	if (ret)
1527 		return;
1528 
1529 	at91_pm_init(NULL);
1530 
1531 	soc_pm.ws_ids = sam9x60_ws_ids;
1532 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1533 }
1534 
sam9x7_pm_init(void)1535 void __init sam9x7_pm_init(void)
1536 {
1537 	static const int modes[] __initconst = {
1538 		AT91_PM_STANDBY, AT91_PM_ULP0,
1539 	};
1540 	int ret;
1541 
1542 	if (!IS_ENABLED(CONFIG_SOC_SAM9X7))
1543 		return;
1544 
1545 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1546 	ret = at91_dt_ramc(false);
1547 	if (ret)
1548 		return;
1549 
1550 	at91_pm_init(NULL);
1551 
1552 	soc_pm.ws_ids = sam9x7_ws_ids;
1553 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1554 }
1555 
at91sam9_pm_init(void)1556 void __init at91sam9_pm_init(void)
1557 {
1558 	int ret;
1559 
1560 	if (!IS_ENABLED(CONFIG_SOC_AT91SAM9))
1561 		return;
1562 
1563 	/*
1564 	 * Force STANDBY and ULP0 mode to avoid calling
1565 	 * at91_pm_modes_validate() which may increase booting time.
1566 	 * Platform supports anyway only STANDBY and ULP0 modes.
1567 	 */
1568 	soc_pm.data.standby_mode = AT91_PM_STANDBY;
1569 	soc_pm.data.suspend_mode = AT91_PM_ULP0;
1570 
1571 	ret = at91_dt_ramc(false);
1572 	if (ret)
1573 		return;
1574 
1575 	at91_pm_init(at91sam9_idle);
1576 }
1577 
sama5_pm_init(void)1578 void __init sama5_pm_init(void)
1579 {
1580 	static const int modes[] __initconst = {
1581 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST,
1582 	};
1583 	static const u32 iomaps[] __initconst = {
1584 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1585 		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1586 	};
1587 	int ret;
1588 
1589 	if (!IS_ENABLED(CONFIG_SOC_SAMA5))
1590 		return;
1591 
1592 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1593 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1594 	ret = at91_dt_ramc(false);
1595 	if (ret)
1596 		return;
1597 
1598 	at91_pm_init(NULL);
1599 
1600 	/* Quirks applies to ULP0, ULP0 fast and ULP1 modes. */
1601 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1602 						 BIT(AT91_PM_ULP0_FAST) |
1603 						 BIT(AT91_PM_ULP1);
1604 	/* Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup source. */
1605 	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1606 						     BIT(AT91_PM_ULP0_FAST);
1607 }
1608 
sama5d2_pm_init(void)1609 void __init sama5d2_pm_init(void)
1610 {
1611 	static const int modes[] __initconst = {
1612 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP0_FAST, AT91_PM_ULP1,
1613 		AT91_PM_BACKUP,
1614 	};
1615 	static const u32 iomaps[] __initconst = {
1616 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(ETHC),
1617 		[AT91_PM_ULP0_FAST]	= AT91_PM_IOMAP(ETHC),
1618 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SHDWC) |
1619 					  AT91_PM_IOMAP(ETHC),
1620 		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SHDWC) |
1621 					  AT91_PM_IOMAP(SFRBU),
1622 	};
1623 	int ret;
1624 
1625 	if (!IS_ENABLED(CONFIG_SOC_SAMA5D2))
1626 		return;
1627 
1628 	if (IS_ENABLED(CONFIG_ATMEL_SECURE_PM)) {
1629 		pr_warn("AT91: Secure PM: ignoring standby mode\n");
1630 		at91_pm_secure_init();
1631 		return;
1632 	}
1633 
1634 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1635 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1636 	ret = at91_dt_ramc(false);
1637 	if (ret)
1638 		return;
1639 
1640 	at91_pm_init(NULL);
1641 
1642 	soc_pm.ws_ids = sama5d2_ws_ids;
1643 	soc_pm.config_shdwc_ws = at91_sama5d2_config_shdwc_ws;
1644 	soc_pm.config_pmc_ws = at91_sama5d2_config_pmc_ws;
1645 
1646 	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1647 	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1648 	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1649 	soc_pm.sfrbu_regs.pswbu.state = BIT(3);
1650 
1651 	/* Quirk applies to ULP0, ULP0 fast and ULP1 modes. */
1652 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP0) |
1653 						 BIT(AT91_PM_ULP0_FAST) |
1654 						 BIT(AT91_PM_ULP1);
1655 	/*
1656 	 * Do not suspend in ULP0, ULP0 fast if GETH is the only wakeup
1657 	 * source.
1658 	 */
1659 	soc_pm.quirks.eth[AT91_PM_G_ETH].dns_modes = BIT(AT91_PM_ULP0) |
1660 						     BIT(AT91_PM_ULP0_FAST);
1661 }
1662 
sama7_pm_init(void)1663 void __init sama7_pm_init(void)
1664 {
1665 	static const int modes[] __initconst = {
1666 		AT91_PM_STANDBY, AT91_PM_ULP0, AT91_PM_ULP1, AT91_PM_BACKUP,
1667 	};
1668 	static const u32 iomaps[] __initconst = {
1669 		[AT91_PM_ULP0]		= AT91_PM_IOMAP(SFRBU),
1670 		[AT91_PM_ULP1]		= AT91_PM_IOMAP(SFRBU) |
1671 					  AT91_PM_IOMAP(SHDWC) |
1672 					  AT91_PM_IOMAP(ETHC),
1673 		[AT91_PM_BACKUP]	= AT91_PM_IOMAP(SFRBU) |
1674 					  AT91_PM_IOMAP(SHDWC),
1675 	};
1676 	int ret;
1677 
1678 	if (!IS_ENABLED(CONFIG_SOC_SAMA7))
1679 		return;
1680 
1681 	at91_pm_modes_validate(modes, ARRAY_SIZE(modes));
1682 
1683 	ret = at91_dt_ramc(true);
1684 	if (ret)
1685 		return;
1686 
1687 	at91_pm_modes_init(iomaps, ARRAY_SIZE(iomaps));
1688 	at91_pm_init(NULL);
1689 
1690 	soc_pm.ws_ids = sama7_ws_ids;
1691 	soc_pm.config_pmc_ws = at91_sam9x60_config_pmc_ws;
1692 
1693 	soc_pm.sfrbu_regs.pswbu.key = (0x4BD20C << 8);
1694 	soc_pm.sfrbu_regs.pswbu.ctrl = BIT(0);
1695 	soc_pm.sfrbu_regs.pswbu.softsw = BIT(1);
1696 	soc_pm.sfrbu_regs.pswbu.state = BIT(2);
1697 
1698 	/* Quirks applies to ULP1 for both Ethernet interfaces. */
1699 	soc_pm.quirks.eth[AT91_PM_E_ETH].modes = BIT(AT91_PM_ULP1);
1700 	soc_pm.quirks.eth[AT91_PM_G_ETH].modes = BIT(AT91_PM_ULP1);
1701 }
1702 
at91_pm_modes_select(char * str)1703 static int __init at91_pm_modes_select(char *str)
1704 {
1705 	char *s;
1706 	substring_t args[MAX_OPT_ARGS];
1707 	int standby, suspend;
1708 
1709 	if (!str)
1710 		return 0;
1711 
1712 	s = strsep(&str, ",");
1713 	standby = match_token(s, pm_modes, args);
1714 	if (standby < 0)
1715 		return 0;
1716 
1717 	suspend = match_token(str, pm_modes, args);
1718 	if (suspend < 0)
1719 		return 0;
1720 
1721 	soc_pm.data.standby_mode = standby;
1722 	soc_pm.data.suspend_mode = suspend;
1723 
1724 	return 0;
1725 }
1726 early_param("atmel.pm_modes", at91_pm_modes_select);
1727