1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2016 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5 */
6
7 #include <linux/bitfield.h>
8 #include <linux/bits.h>
9 #include <linux/iopoll.h>
10 #include <linux/module.h>
11 #include <linux/mmc/host.h>
12 #include <linux/mmc/mmc.h>
13 #include <linux/of.h>
14 #include <linux/platform_device.h>
15 #include <linux/reset.h>
16
17 #include "sdhci-pltfm.h"
18
19 /* HRS - Host Register Set (specific to Cadence) */
20 #define SDHCI_CDNS_HRS04 0x10 /* PHY access port */
21 #define SDHCI_CDNS_HRS04_ACK BIT(26)
22 #define SDHCI_CDNS_HRS04_RD BIT(25)
23 #define SDHCI_CDNS_HRS04_WR BIT(24)
24 #define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16)
25 #define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8)
26 #define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0)
27
28 #define SDHCI_CDNS_HRS06 0x18 /* eMMC control */
29 #define SDHCI_CDNS_HRS06_TUNE_UP BIT(15)
30 #define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8)
31 #define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0)
32 #define SDHCI_CDNS_HRS06_MODE_SD 0x0
33 #define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2
34 #define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3
35 #define SDHCI_CDNS_HRS06_MODE_MMC_HS200 0x4
36 #define SDHCI_CDNS_HRS06_MODE_MMC_HS400 0x5
37 #define SDHCI_CDNS_HRS06_MODE_MMC_HS400ES 0x6
38
39 /* Read block gap */
40 #define SDHCI_CDNS_HRS37 0x94 /* interface mode select */
41 #define SDHCI_CDNS_HRS37_MODE_DS 0x0
42 #define SDHCI_CDNS_HRS37_MODE_HS 0x1
43 #define SDHCI_CDNS_HRS37_MODE_UDS_SDR12 0x8
44 #define SDHCI_CDNS_HRS37_MODE_UDS_SDR25 0x9
45 #define SDHCI_CDNS_HRS37_MODE_UDS_SDR50 0xa
46 #define SDHCI_CDNS_HRS37_MODE_UDS_SDR104 0xb
47 #define SDHCI_CDNS_HRS37_MODE_UDS_DDR50 0xc
48 #define SDHCI_CDNS_HRS37_MODE_MMC_LEGACY 0x20
49 #define SDHCI_CDNS_HRS37_MODE_MMC_SDR 0x21
50 #define SDHCI_CDNS_HRS37_MODE_MMC_DDR 0x22
51 #define SDHCI_CDNS_HRS37_MODE_MMC_HS200 0x23
52 #define SDHCI_CDNS_HRS37_MODE_MMC_HS400 0x24
53 #define SDHCI_CDNS_HRS37_MODE_MMC_HS400ES 0x25
54 #define SDHCI_CDNS_HRS38 0x98 /* Read block gap coefficient */
55 #define SDHCI_CDNS_HRS38_BLKGAP_MAX 0xf
56
57 /* SRS - Slot Register Set (SDHCI-compatible) */
58 #define SDHCI_CDNS_SRS_BASE 0x200
59
60 /* PHY */
61 #define SDHCI_CDNS_PHY_DLY_SD_HS 0x00
62 #define SDHCI_CDNS_PHY_DLY_SD_DEFAULT 0x01
63 #define SDHCI_CDNS_PHY_DLY_UHS_SDR12 0x02
64 #define SDHCI_CDNS_PHY_DLY_UHS_SDR25 0x03
65 #define SDHCI_CDNS_PHY_DLY_UHS_SDR50 0x04
66 #define SDHCI_CDNS_PHY_DLY_UHS_DDR50 0x05
67 #define SDHCI_CDNS_PHY_DLY_EMMC_LEGACY 0x06
68 #define SDHCI_CDNS_PHY_DLY_EMMC_SDR 0x07
69 #define SDHCI_CDNS_PHY_DLY_EMMC_DDR 0x08
70 #define SDHCI_CDNS_PHY_DLY_SDCLK 0x0b
71 #define SDHCI_CDNS_PHY_DLY_HSMMC 0x0c
72 #define SDHCI_CDNS_PHY_DLY_STROBE 0x0d
73
74 /*
75 * The tuned val register is 6 bit-wide, but not the whole of the range is
76 * available. The range 0-42 seems to be available (then 43 wraps around to 0)
77 * but I am not quite sure if it is official. Use only 0 to 39 for safety.
78 */
79 #define SDHCI_CDNS_MAX_TUNING_LOOP 40
80
81 struct sdhci_cdns_phy_param {
82 u8 addr;
83 u8 data;
84 };
85
86 struct sdhci_cdns_priv {
87 void __iomem *hrs_addr;
88 void __iomem *ctl_addr; /* write control */
89 spinlock_t wrlock; /* write lock */
90 bool enhanced_strobe;
91 void (*priv_writel)(struct sdhci_cdns_priv *priv, u32 val, void __iomem *reg);
92 struct reset_control *rst_hw;
93 unsigned int nr_phy_params;
94 struct sdhci_cdns_phy_param phy_params[];
95 };
96
97 struct sdhci_cdns_phy_cfg {
98 const char *property;
99 u8 addr;
100 };
101
102 struct sdhci_cdns_drv_data {
103 int (*init)(struct platform_device *pdev);
104 const struct sdhci_pltfm_data pltfm_data;
105 };
106
107 static const struct sdhci_cdns_phy_cfg sdhci_cdns_phy_cfgs[] = {
108 { "cdns,phy-input-delay-sd-highspeed", SDHCI_CDNS_PHY_DLY_SD_HS, },
109 { "cdns,phy-input-delay-legacy", SDHCI_CDNS_PHY_DLY_SD_DEFAULT, },
110 { "cdns,phy-input-delay-sd-uhs-sdr12", SDHCI_CDNS_PHY_DLY_UHS_SDR12, },
111 { "cdns,phy-input-delay-sd-uhs-sdr25", SDHCI_CDNS_PHY_DLY_UHS_SDR25, },
112 { "cdns,phy-input-delay-sd-uhs-sdr50", SDHCI_CDNS_PHY_DLY_UHS_SDR50, },
113 { "cdns,phy-input-delay-sd-uhs-ddr50", SDHCI_CDNS_PHY_DLY_UHS_DDR50, },
114 { "cdns,phy-input-delay-mmc-highspeed", SDHCI_CDNS_PHY_DLY_EMMC_SDR, },
115 { "cdns,phy-input-delay-mmc-ddr", SDHCI_CDNS_PHY_DLY_EMMC_DDR, },
116 { "cdns,phy-dll-delay-sdclk", SDHCI_CDNS_PHY_DLY_SDCLK, },
117 { "cdns,phy-dll-delay-sdclk-hsmmc", SDHCI_CDNS_PHY_DLY_HSMMC, },
118 { "cdns,phy-dll-delay-strobe", SDHCI_CDNS_PHY_DLY_STROBE, },
119 };
120
cdns_writel(struct sdhci_cdns_priv * priv,u32 val,void __iomem * reg)121 static inline void cdns_writel(struct sdhci_cdns_priv *priv, u32 val,
122 void __iomem *reg)
123 {
124 writel(val, reg);
125 }
126
sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv * priv,u8 addr,u8 data)127 static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv,
128 u8 addr, u8 data)
129 {
130 void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS04;
131 u32 tmp;
132 int ret;
133
134 ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
135 0, 10);
136 if (ret)
137 return ret;
138
139 tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) |
140 FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr);
141 priv->priv_writel(priv, tmp, reg);
142
143 tmp |= SDHCI_CDNS_HRS04_WR;
144 priv->priv_writel(priv, tmp, reg);
145
146 ret = readl_poll_timeout(reg, tmp, tmp & SDHCI_CDNS_HRS04_ACK, 0, 10);
147 if (ret)
148 return ret;
149
150 tmp &= ~SDHCI_CDNS_HRS04_WR;
151 priv->priv_writel(priv, tmp, reg);
152
153 ret = readl_poll_timeout(reg, tmp, !(tmp & SDHCI_CDNS_HRS04_ACK),
154 0, 10);
155
156 return ret;
157 }
158
sdhci_cdns_phy_param_count(struct device_node * np)159 static unsigned int sdhci_cdns_phy_param_count(struct device_node *np)
160 {
161 unsigned int count = 0;
162 int i;
163
164 for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++)
165 if (of_property_present(np, sdhci_cdns_phy_cfgs[i].property))
166 count++;
167
168 return count;
169 }
170
sdhci_cdns_phy_param_parse(struct device_node * np,struct sdhci_cdns_priv * priv)171 static void sdhci_cdns_phy_param_parse(struct device_node *np,
172 struct sdhci_cdns_priv *priv)
173 {
174 struct sdhci_cdns_phy_param *p = priv->phy_params;
175 u32 val;
176 int ret, i;
177
178 for (i = 0; i < ARRAY_SIZE(sdhci_cdns_phy_cfgs); i++) {
179 ret = of_property_read_u32(np, sdhci_cdns_phy_cfgs[i].property,
180 &val);
181 if (ret)
182 continue;
183
184 p->addr = sdhci_cdns_phy_cfgs[i].addr;
185 p->data = val;
186 p++;
187 }
188 }
189
sdhci_cdns_phy_init(struct sdhci_cdns_priv * priv)190 static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv)
191 {
192 int ret, i;
193
194 for (i = 0; i < priv->nr_phy_params; i++) {
195 ret = sdhci_cdns_write_phy_reg(priv, priv->phy_params[i].addr,
196 priv->phy_params[i].data);
197 if (ret)
198 return ret;
199 }
200
201 return 0;
202 }
203
sdhci_cdns_priv(struct sdhci_host * host)204 static void *sdhci_cdns_priv(struct sdhci_host *host)
205 {
206 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
207
208 return sdhci_pltfm_priv(pltfm_host);
209 }
210
sdhci_cdns_get_timeout_clock(struct sdhci_host * host)211 static unsigned int sdhci_cdns_get_timeout_clock(struct sdhci_host *host)
212 {
213 /*
214 * Cadence's spec says the Timeout Clock Frequency is the same as the
215 * Base Clock Frequency.
216 */
217 return host->max_clk;
218 }
219
sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv * priv,u32 mode)220 static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode)
221 {
222 u32 tmp;
223
224 /* The speed mode for eMMC is selected by HRS06 register */
225 tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
226 tmp &= ~SDHCI_CDNS_HRS06_MODE;
227 tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode);
228 priv->priv_writel(priv, tmp, priv->hrs_addr + SDHCI_CDNS_HRS06);
229 }
230
sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv * priv)231 static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv)
232 {
233 u32 tmp;
234
235 tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06);
236 return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp);
237 }
238
sdhci_cdns_set_tune_val(struct sdhci_host * host,unsigned int val)239 static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val)
240 {
241 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
242 void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06;
243 u32 tmp;
244 int i, ret;
245
246 if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val)))
247 return -EINVAL;
248
249 tmp = readl(reg);
250 tmp &= ~SDHCI_CDNS_HRS06_TUNE;
251 tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val);
252
253 /*
254 * Workaround for IP errata:
255 * The IP6116 SD/eMMC PHY design has a timing issue on receive data
256 * path. Send tune request twice.
257 */
258 for (i = 0; i < 2; i++) {
259 tmp |= SDHCI_CDNS_HRS06_TUNE_UP;
260 priv->priv_writel(priv, tmp, reg);
261
262 ret = readl_poll_timeout(reg, tmp,
263 !(tmp & SDHCI_CDNS_HRS06_TUNE_UP),
264 0, 1);
265 if (ret)
266 return ret;
267 }
268
269 return 0;
270 }
271
272 /**
273 * sdhci_cdns_tune_blkgap() - tune multi-block read gap
274 * @mmc: MMC host
275 *
276 * Tune delay used in multi block read. To do so,
277 * try sending multi-block read command with incremented gap, unless
278 * it succeeds.
279 *
280 * Return: error code
281 */
sdhci_cdns_tune_blkgap(struct mmc_host * mmc)282 static int sdhci_cdns_tune_blkgap(struct mmc_host *mmc)
283 {
284 struct sdhci_host *host = mmc_priv(mmc);
285 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
286 struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host);
287 void __iomem *hrs37_reg = priv->hrs_addr + SDHCI_CDNS_HRS37;
288 void __iomem *hrs38_reg = priv->hrs_addr + SDHCI_CDNS_HRS38;
289 int ret;
290 u32 gap;
291
292 /* Currently only needed in HS200 mode */
293 if (host->timing != MMC_TIMING_MMC_HS200)
294 return 0;
295
296 writel(SDHCI_CDNS_HRS37_MODE_MMC_HS200, hrs37_reg);
297
298 for (gap = 0; gap <= SDHCI_CDNS_HRS38_BLKGAP_MAX; gap++) {
299 writel(gap, hrs38_reg);
300 ret = mmc_read_tuning(mmc, 512, 32);
301 if (!ret)
302 break;
303 }
304
305 dev_dbg(mmc_dev(mmc), "read block gap tune %s, gap %d\n", ret ? "failed" : "OK", gap);
306 return ret;
307 }
308
309 /*
310 * In SD mode, software must not use the hardware tuning and instead perform
311 * an almost identical procedure to eMMC.
312 */
sdhci_cdns_execute_tuning(struct sdhci_host * host,u32 opcode)313 static int sdhci_cdns_execute_tuning(struct sdhci_host *host, u32 opcode)
314 {
315 int cur_streak = 0;
316 int max_streak = 0;
317 int end_of_streak = 0;
318 int i;
319 int ret;
320
321 /*
322 * Do not execute tuning for UHS_SDR50 or UHS_DDR50.
323 * The delay is set by probe, based on the DT properties.
324 */
325 if (host->timing != MMC_TIMING_MMC_HS200 &&
326 host->timing != MMC_TIMING_UHS_SDR104)
327 return 0;
328
329 for (i = 0; i < SDHCI_CDNS_MAX_TUNING_LOOP; i++) {
330 if (sdhci_cdns_set_tune_val(host, i) ||
331 mmc_send_tuning(host->mmc, opcode, NULL)) { /* bad */
332 cur_streak = 0;
333 } else { /* good */
334 cur_streak++;
335 if (cur_streak > max_streak) {
336 max_streak = cur_streak;
337 end_of_streak = i;
338 }
339 }
340 }
341
342 if (!max_streak) {
343 dev_err(mmc_dev(host->mmc), "no tuning point found\n");
344 return -EIO;
345 }
346
347 ret = sdhci_cdns_set_tune_val(host, end_of_streak - max_streak / 2);
348 if (ret)
349 return ret;
350
351 return sdhci_cdns_tune_blkgap(host->mmc);
352 }
353
sdhci_cdns_set_uhs_signaling(struct sdhci_host * host,unsigned int timing)354 static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host,
355 unsigned int timing)
356 {
357 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
358 u32 mode;
359
360 switch (timing) {
361 case MMC_TIMING_MMC_HS:
362 mode = SDHCI_CDNS_HRS06_MODE_MMC_SDR;
363 break;
364 case MMC_TIMING_MMC_DDR52:
365 mode = SDHCI_CDNS_HRS06_MODE_MMC_DDR;
366 break;
367 case MMC_TIMING_MMC_HS200:
368 mode = SDHCI_CDNS_HRS06_MODE_MMC_HS200;
369 break;
370 case MMC_TIMING_MMC_HS400:
371 if (priv->enhanced_strobe)
372 mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400ES;
373 else
374 mode = SDHCI_CDNS_HRS06_MODE_MMC_HS400;
375 break;
376 default:
377 mode = SDHCI_CDNS_HRS06_MODE_SD;
378 break;
379 }
380
381 sdhci_cdns_set_emmc_mode(priv, mode);
382
383 /* For SD, fall back to the default handler */
384 if (mode == SDHCI_CDNS_HRS06_MODE_SD)
385 sdhci_set_uhs_signaling(host, timing);
386 }
387
388 /* Elba control register bits [6:3] are byte-lane enables */
389 #define ELBA_BYTE_ENABLE_MASK(x) ((x) << 3)
390
391 /*
392 * The Pensando Elba SoC explicitly controls byte-lane enabling on writes
393 * which includes writes to the HRS registers. The write lock (wrlock)
394 * is used to ensure byte-lane enable, using write control (ctl_addr),
395 * occurs before the data write.
396 */
elba_priv_writel(struct sdhci_cdns_priv * priv,u32 val,void __iomem * reg)397 static void elba_priv_writel(struct sdhci_cdns_priv *priv, u32 val,
398 void __iomem *reg)
399 {
400 unsigned long flags;
401
402 spin_lock_irqsave(&priv->wrlock, flags);
403 writel(GENMASK(7, 3), priv->ctl_addr);
404 writel(val, reg);
405 spin_unlock_irqrestore(&priv->wrlock, flags);
406 }
407
elba_write_l(struct sdhci_host * host,u32 val,int reg)408 static void elba_write_l(struct sdhci_host *host, u32 val, int reg)
409 {
410 elba_priv_writel(sdhci_cdns_priv(host), val, host->ioaddr + reg);
411 }
412
elba_write_w(struct sdhci_host * host,u16 val,int reg)413 static void elba_write_w(struct sdhci_host *host, u16 val, int reg)
414 {
415 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
416 u32 shift = reg & GENMASK(1, 0);
417 unsigned long flags;
418 u32 byte_enables;
419
420 byte_enables = GENMASK(1, 0) << shift;
421 spin_lock_irqsave(&priv->wrlock, flags);
422 writel(ELBA_BYTE_ENABLE_MASK(byte_enables), priv->ctl_addr);
423 writew(val, host->ioaddr + reg);
424 spin_unlock_irqrestore(&priv->wrlock, flags);
425 }
426
elba_write_b(struct sdhci_host * host,u8 val,int reg)427 static void elba_write_b(struct sdhci_host *host, u8 val, int reg)
428 {
429 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
430 u32 shift = reg & GENMASK(1, 0);
431 unsigned long flags;
432 u32 byte_enables;
433
434 byte_enables = BIT(0) << shift;
435 spin_lock_irqsave(&priv->wrlock, flags);
436 writel(ELBA_BYTE_ENABLE_MASK(byte_enables), priv->ctl_addr);
437 writeb(val, host->ioaddr + reg);
438 spin_unlock_irqrestore(&priv->wrlock, flags);
439 }
440
441 static const struct sdhci_ops sdhci_elba_ops = {
442 .write_l = elba_write_l,
443 .write_w = elba_write_w,
444 .write_b = elba_write_b,
445 .set_clock = sdhci_set_clock,
446 .get_timeout_clock = sdhci_cdns_get_timeout_clock,
447 .set_bus_width = sdhci_set_bus_width,
448 .reset = sdhci_reset,
449 .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
450 };
451
elba_drv_init(struct platform_device * pdev)452 static int elba_drv_init(struct platform_device *pdev)
453 {
454 struct sdhci_host *host = platform_get_drvdata(pdev);
455 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
456 void __iomem *ioaddr;
457
458 host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA;
459 spin_lock_init(&priv->wrlock);
460
461 /* Byte-lane control register */
462 ioaddr = devm_platform_ioremap_resource(pdev, 1);
463 if (IS_ERR(ioaddr))
464 return PTR_ERR(ioaddr);
465
466 priv->ctl_addr = ioaddr;
467 priv->priv_writel = elba_priv_writel;
468 writel(ELBA_BYTE_ENABLE_MASK(0xf), priv->ctl_addr);
469
470 return 0;
471 }
472
473 static const struct sdhci_ops sdhci_cdns_ops = {
474 .set_clock = sdhci_set_clock,
475 .get_timeout_clock = sdhci_cdns_get_timeout_clock,
476 .set_bus_width = sdhci_set_bus_width,
477 .reset = sdhci_reset,
478 .platform_execute_tuning = sdhci_cdns_execute_tuning,
479 .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
480 };
481
482 static const struct sdhci_cdns_drv_data sdhci_cdns_uniphier_drv_data = {
483 .pltfm_data = {
484 .ops = &sdhci_cdns_ops,
485 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
486 },
487 };
488
489 static const struct sdhci_cdns_drv_data sdhci_elba_drv_data = {
490 .init = elba_drv_init,
491 .pltfm_data = {
492 .ops = &sdhci_elba_ops,
493 },
494 };
495
496 static const struct sdhci_cdns_drv_data sdhci_eyeq_drv_data = {
497 .pltfm_data = {
498 .ops = &sdhci_cdns_ops,
499 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
500 },
501 };
502
503 static const struct sdhci_cdns_drv_data sdhci_cdns_drv_data = {
504 .pltfm_data = {
505 .ops = &sdhci_cdns_ops,
506 },
507 };
508
sdhci_cdns_hs400_enhanced_strobe(struct mmc_host * mmc,struct mmc_ios * ios)509 static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
510 struct mmc_ios *ios)
511 {
512 struct sdhci_host *host = mmc_priv(mmc);
513 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
514 u32 mode;
515
516 priv->enhanced_strobe = ios->enhanced_strobe;
517
518 mode = sdhci_cdns_get_emmc_mode(priv);
519
520 if (mode == SDHCI_CDNS_HRS06_MODE_MMC_HS400 && ios->enhanced_strobe)
521 sdhci_cdns_set_emmc_mode(priv,
522 SDHCI_CDNS_HRS06_MODE_MMC_HS400ES);
523
524 if (mode == SDHCI_CDNS_HRS06_MODE_MMC_HS400ES && !ios->enhanced_strobe)
525 sdhci_cdns_set_emmc_mode(priv,
526 SDHCI_CDNS_HRS06_MODE_MMC_HS400);
527 }
528
sdhci_cdns_mmc_hw_reset(struct mmc_host * mmc)529 static void sdhci_cdns_mmc_hw_reset(struct mmc_host *mmc)
530 {
531 struct sdhci_host *host = mmc_priv(mmc);
532 struct sdhci_cdns_priv *priv = sdhci_cdns_priv(host);
533
534 dev_dbg(mmc_dev(host->mmc), "emmc hardware reset\n");
535
536 reset_control_assert(priv->rst_hw);
537 /* For eMMC, minimum is 1us but give it 3us for good measure */
538 udelay(3);
539
540 reset_control_deassert(priv->rst_hw);
541 /* For eMMC, minimum is 200us but give it 300us for good measure */
542 usleep_range(300, 1000);
543 }
544
sdhci_cdns_probe(struct platform_device * pdev)545 static int sdhci_cdns_probe(struct platform_device *pdev)
546 {
547 struct sdhci_host *host;
548 const struct sdhci_cdns_drv_data *data;
549 struct sdhci_pltfm_host *pltfm_host;
550 struct sdhci_cdns_priv *priv;
551 struct clk *clk;
552 unsigned int nr_phy_params;
553 int ret;
554 struct device *dev = &pdev->dev;
555 static const u16 version = SDHCI_SPEC_400 << SDHCI_SPEC_VER_SHIFT;
556
557 clk = devm_clk_get_enabled(dev, NULL);
558 if (IS_ERR(clk))
559 return PTR_ERR(clk);
560
561 data = of_device_get_match_data(dev);
562 if (!data)
563 data = &sdhci_cdns_drv_data;
564
565 nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
566 host = sdhci_pltfm_init(pdev, &data->pltfm_data,
567 struct_size(priv, phy_params, nr_phy_params));
568 if (IS_ERR(host))
569 return PTR_ERR(host);
570
571 pltfm_host = sdhci_priv(host);
572 pltfm_host->clk = clk;
573
574 priv = sdhci_pltfm_priv(pltfm_host);
575 priv->nr_phy_params = nr_phy_params;
576 priv->hrs_addr = host->ioaddr;
577 priv->enhanced_strobe = false;
578 priv->priv_writel = cdns_writel;
579 host->ioaddr += SDHCI_CDNS_SRS_BASE;
580 host->mmc_host_ops.hs400_enhanced_strobe =
581 sdhci_cdns_hs400_enhanced_strobe;
582 if (data->init) {
583 ret = data->init(pdev);
584 if (ret)
585 return ret;
586 }
587 sdhci_enable_v4_mode(host);
588 __sdhci_read_caps(host, &version, NULL, NULL);
589
590 sdhci_get_of_property(pdev);
591
592 ret = mmc_of_parse(host->mmc);
593 if (ret)
594 return ret;
595
596 sdhci_cdns_phy_param_parse(dev->of_node, priv);
597
598 ret = sdhci_cdns_phy_init(priv);
599 if (ret)
600 return ret;
601
602 if (host->mmc->caps & MMC_CAP_HW_RESET) {
603 priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL);
604 if (IS_ERR(priv->rst_hw))
605 return dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
606 "reset controller error\n");
607 if (priv->rst_hw)
608 host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset;
609 }
610
611 return sdhci_add_host(host);
612 }
613
sdhci_cdns_resume(struct device * dev)614 static int sdhci_cdns_resume(struct device *dev)
615 {
616 struct sdhci_host *host = dev_get_drvdata(dev);
617 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
618 struct sdhci_cdns_priv *priv = sdhci_pltfm_priv(pltfm_host);
619 int ret;
620
621 ret = clk_prepare_enable(pltfm_host->clk);
622 if (ret)
623 return ret;
624
625 ret = sdhci_cdns_phy_init(priv);
626 if (ret)
627 goto disable_clk;
628
629 ret = sdhci_resume_host(host);
630 if (ret)
631 goto disable_clk;
632
633 return 0;
634
635 disable_clk:
636 clk_disable_unprepare(pltfm_host->clk);
637
638 return ret;
639 }
640
641 static DEFINE_SIMPLE_DEV_PM_OPS(sdhci_cdns_pm_ops, sdhci_pltfm_suspend, sdhci_cdns_resume);
642
643 static const struct of_device_id sdhci_cdns_match[] = {
644 {
645 .compatible = "socionext,uniphier-sd4hc",
646 .data = &sdhci_cdns_uniphier_drv_data,
647 },
648 {
649 .compatible = "amd,pensando-elba-sd4hc",
650 .data = &sdhci_elba_drv_data,
651 },
652 {
653 .compatible = "mobileye,eyeq-sd4hc",
654 .data = &sdhci_eyeq_drv_data,
655 },
656 { .compatible = "cdns,sd4hc" },
657 { /* sentinel */ }
658 };
659 MODULE_DEVICE_TABLE(of, sdhci_cdns_match);
660
661 static struct platform_driver sdhci_cdns_driver = {
662 .driver = {
663 .name = "sdhci-cdns",
664 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
665 .pm = pm_sleep_ptr(&sdhci_cdns_pm_ops),
666 .of_match_table = sdhci_cdns_match,
667 },
668 .probe = sdhci_cdns_probe,
669 .remove = sdhci_pltfm_remove,
670 };
671 module_platform_driver(sdhci_cdns_driver);
672
673 MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>");
674 MODULE_DESCRIPTION("Cadence SD/SDIO/eMMC Host Controller Driver");
675 MODULE_LICENSE("GPL");
676