1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
4 *
5 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
6 */
7
8 #include <linux/module.h>
9 #include <linux/delay.h>
10 #include <linux/mmc/mmc.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/pm_opp.h>
13 #include <linux/slab.h>
14 #include <linux/iopoll.h>
15 #include <linux/regulator/consumer.h>
16 #include <linux/interconnect.h>
17 #include <linux/of.h>
18 #include <linux/pinctrl/consumer.h>
19 #include <linux/reset.h>
20
21 #include <soc/qcom/ice.h>
22
23 #include "sdhci-cqhci.h"
24 #include "sdhci-pltfm.h"
25 #include "cqhci.h"
26
27 #define CORE_MCI_VERSION 0x50
28 #define CORE_VERSION_MAJOR_SHIFT 28
29 #define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
30 #define CORE_VERSION_MINOR_MASK 0xff
31
32 #define CORE_MCI_GENERICS 0x70
33 #define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
34
35 #define HC_MODE_EN 0x1
36 #define CORE_POWER 0x0
37 #define CORE_SW_RST BIT(7)
38 #define FF_CLK_SW_RST_DIS BIT(13)
39
40 #define CORE_PWRCTL_BUS_OFF BIT(0)
41 #define CORE_PWRCTL_BUS_ON BIT(1)
42 #define CORE_PWRCTL_IO_LOW BIT(2)
43 #define CORE_PWRCTL_IO_HIGH BIT(3)
44 #define CORE_PWRCTL_BUS_SUCCESS BIT(0)
45 #define CORE_PWRCTL_BUS_FAIL BIT(1)
46 #define CORE_PWRCTL_IO_SUCCESS BIT(2)
47 #define CORE_PWRCTL_IO_FAIL BIT(3)
48 #define REQ_BUS_OFF BIT(0)
49 #define REQ_BUS_ON BIT(1)
50 #define REQ_IO_LOW BIT(2)
51 #define REQ_IO_HIGH BIT(3)
52 #define INT_MASK 0xf
53 #define MAX_PHASES 16
54 #define CORE_DLL_LOCK BIT(7)
55 #define CORE_DDR_DLL_LOCK BIT(11)
56 #define CORE_DLL_EN BIT(16)
57 #define CORE_CDR_EN BIT(17)
58 #define CORE_CK_OUT_EN BIT(18)
59 #define CORE_CDR_EXT_EN BIT(19)
60 #define CORE_DLL_PDN BIT(29)
61 #define CORE_DLL_RST BIT(30)
62 #define CORE_CMD_DAT_TRACK_SEL BIT(0)
63
64 #define CORE_DDR_CAL_EN BIT(0)
65 #define CORE_FLL_CYCLE_CNT BIT(18)
66 #define CORE_DLL_CLOCK_DISABLE BIT(21)
67
68 #define DLL_USR_CTL_POR_VAL 0x10800
69 #define ENABLE_DLL_LOCK_STATUS BIT(26)
70 #define FINE_TUNE_MODE_EN BIT(27)
71 #define BIAS_OK_SIGNAL BIT(29)
72
73 #define DLL_CONFIG_3_LOW_FREQ_VAL 0x08
74 #define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10
75
76 #define CORE_VENDOR_SPEC_POR_VAL 0xa9c
77 #define CORE_CLK_PWRSAVE BIT(1)
78 #define CORE_HC_MCLK_SEL_DFLT (2 << 8)
79 #define CORE_HC_MCLK_SEL_HS400 (3 << 8)
80 #define CORE_HC_MCLK_SEL_MASK (3 << 8)
81 #define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
82 #define CORE_IO_PAD_PWR_SWITCH BIT(16)
83 #define CORE_HC_SELECT_IN_EN BIT(18)
84 #define CORE_HC_SELECT_IN_SDR50 (4 << 19)
85 #define CORE_HC_SELECT_IN_HS400 (6 << 19)
86 #define CORE_HC_SELECT_IN_MASK (7 << 19)
87
88 #define CORE_3_0V_SUPPORT BIT(25)
89 #define CORE_1_8V_SUPPORT BIT(26)
90 #define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
91
92 #define CORE_CSR_CDC_CTLR_CFG0 0x130
93 #define CORE_SW_TRIG_FULL_CALIB BIT(16)
94 #define CORE_HW_AUTOCAL_ENA BIT(17)
95
96 #define CORE_CSR_CDC_CTLR_CFG1 0x134
97 #define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
98 #define CORE_TIMER_ENA BIT(16)
99
100 #define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
101 #define CORE_CSR_CDC_REFCOUNT_CFG 0x140
102 #define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
103 #define CORE_CDC_OFFSET_CFG 0x14C
104 #define CORE_CSR_CDC_DELAY_CFG 0x150
105 #define CORE_CDC_SLAVE_DDA_CFG 0x160
106 #define CORE_CSR_CDC_STATUS0 0x164
107 #define CORE_CALIBRATION_DONE BIT(0)
108
109 #define CORE_CDC_ERROR_CODE_MASK 0x7000000
110
111 #define CORE_CSR_CDC_GEN_CFG 0x178
112 #define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
113 #define CORE_CDC_SWITCH_RC_EN BIT(1)
114
115 #define CORE_CDC_T4_DLY_SEL BIT(0)
116 #define CORE_CMDIN_RCLK_EN BIT(1)
117 #define CORE_START_CDC_TRAFFIC BIT(6)
118
119 #define CORE_PWRSAVE_DLL BIT(3)
120
121 #define DDR_CONFIG_POR_VAL 0x80040873
122
123
124 #define INVALID_TUNING_PHASE -1
125 #define SDHCI_MSM_MIN_CLOCK 400000
126 #define CORE_FREQ_100MHZ (100 * 1000 * 1000)
127
128 #define CDR_SELEXT_SHIFT 20
129 #define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
130 #define CMUX_SHIFT_PHASE_SHIFT 24
131 #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
132
133 #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
134
135 /* Timeout value to avoid infinite waiting for pwr_irq */
136 #define MSM_PWR_IRQ_TIMEOUT_MS 5000
137
138 /* Max load for eMMC Vdd supply */
139 #define MMC_VMMC_MAX_LOAD_UA 570000
140
141 /* Max load for eMMC Vdd-io supply */
142 #define MMC_VQMMC_MAX_LOAD_UA 325000
143
144 /* Max load for SD Vdd supply */
145 #define SD_VMMC_MAX_LOAD_UA 800000
146
147 /* Max load for SD Vdd-io supply */
148 #define SD_VQMMC_MAX_LOAD_UA 22000
149
150 #define msm_host_readl(msm_host, host, offset) \
151 msm_host->var_ops->msm_readl_relaxed(host, offset)
152
153 #define msm_host_writel(msm_host, val, host, offset) \
154 msm_host->var_ops->msm_writel_relaxed(val, host, offset)
155
156 /* CQHCI vendor specific registers */
157 #define CQHCI_VENDOR_CFG1 0xA00
158 #define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
159
160 struct sdhci_msm_offset {
161 u32 core_hc_mode;
162 u32 core_mci_data_cnt;
163 u32 core_mci_status;
164 u32 core_mci_fifo_cnt;
165 u32 core_mci_version;
166 u32 core_generics;
167 u32 core_testbus_config;
168 u32 core_testbus_sel2_bit;
169 u32 core_testbus_ena;
170 u32 core_testbus_sel2;
171 u32 core_pwrctl_status;
172 u32 core_pwrctl_mask;
173 u32 core_pwrctl_clear;
174 u32 core_pwrctl_ctl;
175 u32 core_sdcc_debug_reg;
176 u32 core_dll_config;
177 u32 core_dll_status;
178 u32 core_vendor_spec;
179 u32 core_vendor_spec_adma_err_addr0;
180 u32 core_vendor_spec_adma_err_addr1;
181 u32 core_vendor_spec_func2;
182 u32 core_vendor_spec_capabilities0;
183 u32 core_ddr_200_cfg;
184 u32 core_vendor_spec3;
185 u32 core_dll_config_2;
186 u32 core_dll_config_3;
187 u32 core_ddr_config_old; /* Applicable to sdcc minor ver < 0x49 */
188 u32 core_ddr_config;
189 u32 core_dll_usr_ctl; /* Present on SDCC5.1 onwards */
190 };
191
192 static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
193 .core_mci_data_cnt = 0x35c,
194 .core_mci_status = 0x324,
195 .core_mci_fifo_cnt = 0x308,
196 .core_mci_version = 0x318,
197 .core_generics = 0x320,
198 .core_testbus_config = 0x32c,
199 .core_testbus_sel2_bit = 3,
200 .core_testbus_ena = (1 << 31),
201 .core_testbus_sel2 = (1 << 3),
202 .core_pwrctl_status = 0x240,
203 .core_pwrctl_mask = 0x244,
204 .core_pwrctl_clear = 0x248,
205 .core_pwrctl_ctl = 0x24c,
206 .core_sdcc_debug_reg = 0x358,
207 .core_dll_config = 0x200,
208 .core_dll_status = 0x208,
209 .core_vendor_spec = 0x20c,
210 .core_vendor_spec_adma_err_addr0 = 0x214,
211 .core_vendor_spec_adma_err_addr1 = 0x218,
212 .core_vendor_spec_func2 = 0x210,
213 .core_vendor_spec_capabilities0 = 0x21c,
214 .core_ddr_200_cfg = 0x224,
215 .core_vendor_spec3 = 0x250,
216 .core_dll_config_2 = 0x254,
217 .core_dll_config_3 = 0x258,
218 .core_ddr_config = 0x25c,
219 .core_dll_usr_ctl = 0x388,
220 };
221
222 static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
223 .core_hc_mode = 0x78,
224 .core_mci_data_cnt = 0x30,
225 .core_mci_status = 0x34,
226 .core_mci_fifo_cnt = 0x44,
227 .core_mci_version = 0x050,
228 .core_generics = 0x70,
229 .core_testbus_config = 0x0cc,
230 .core_testbus_sel2_bit = 4,
231 .core_testbus_ena = (1 << 3),
232 .core_testbus_sel2 = (1 << 4),
233 .core_pwrctl_status = 0xdc,
234 .core_pwrctl_mask = 0xe0,
235 .core_pwrctl_clear = 0xe4,
236 .core_pwrctl_ctl = 0xe8,
237 .core_sdcc_debug_reg = 0x124,
238 .core_dll_config = 0x100,
239 .core_dll_status = 0x108,
240 .core_vendor_spec = 0x10c,
241 .core_vendor_spec_adma_err_addr0 = 0x114,
242 .core_vendor_spec_adma_err_addr1 = 0x118,
243 .core_vendor_spec_func2 = 0x110,
244 .core_vendor_spec_capabilities0 = 0x11c,
245 .core_ddr_200_cfg = 0x184,
246 .core_vendor_spec3 = 0x1b0,
247 .core_dll_config_2 = 0x1b4,
248 .core_ddr_config_old = 0x1b8,
249 .core_ddr_config = 0x1bc,
250 };
251
252 struct sdhci_msm_variant_ops {
253 u32 (*msm_readl_relaxed)(struct sdhci_host *host, u32 offset);
254 void (*msm_writel_relaxed)(u32 val, struct sdhci_host *host,
255 u32 offset);
256 };
257
258 /*
259 * From V5, register spaces have changed. Wrap this info in a structure
260 * and choose the data_structure based on version info mentioned in DT.
261 */
262 struct sdhci_msm_variant_info {
263 bool mci_removed;
264 bool restore_dll_config;
265 const struct sdhci_msm_variant_ops *var_ops;
266 const struct sdhci_msm_offset *offset;
267 };
268
269 struct sdhci_msm_host {
270 struct platform_device *pdev;
271 void __iomem *core_mem; /* MSM SDCC mapped address */
272 int pwr_irq; /* power irq */
273 struct clk *bus_clk; /* SDHC bus voter clock */
274 struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/
275 /* core, iface, cal and sleep clocks */
276 struct clk_bulk_data bulk_clks[4];
277 #ifdef CONFIG_MMC_CRYPTO
278 struct qcom_ice *ice;
279 #endif
280 unsigned long clk_rate;
281 struct mmc_host *mmc;
282 bool use_14lpp_dll_reset;
283 bool tuning_done;
284 bool calibration_done;
285 u8 saved_tuning_phase;
286 bool use_cdclp533;
287 u32 curr_pwr_state;
288 u32 curr_io_level;
289 wait_queue_head_t pwr_irq_wait;
290 bool pwr_irq_flag;
291 u32 caps_0;
292 bool mci_removed;
293 bool restore_dll_config;
294 const struct sdhci_msm_variant_ops *var_ops;
295 const struct sdhci_msm_offset *offset;
296 bool use_cdr;
297 u32 transfer_mode;
298 bool updated_ddr_cfg;
299 bool uses_tassadar_dll;
300 u32 dll_config;
301 u32 ddr_config;
302 bool vqmmc_enabled;
303 };
304
sdhci_priv_msm_offset(struct sdhci_host * host)305 static const struct sdhci_msm_offset *sdhci_priv_msm_offset(struct sdhci_host *host)
306 {
307 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
308 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
309
310 return msm_host->offset;
311 }
312
313 /*
314 * APIs to read/write to vendor specific registers which were there in the
315 * core_mem region before MCI was removed.
316 */
sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host * host,u32 offset)317 static u32 sdhci_msm_mci_variant_readl_relaxed(struct sdhci_host *host,
318 u32 offset)
319 {
320 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
321 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
322
323 return readl_relaxed(msm_host->core_mem + offset);
324 }
325
sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host * host,u32 offset)326 static u32 sdhci_msm_v5_variant_readl_relaxed(struct sdhci_host *host,
327 u32 offset)
328 {
329 return readl_relaxed(host->ioaddr + offset);
330 }
331
sdhci_msm_mci_variant_writel_relaxed(u32 val,struct sdhci_host * host,u32 offset)332 static void sdhci_msm_mci_variant_writel_relaxed(u32 val,
333 struct sdhci_host *host, u32 offset)
334 {
335 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
336 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
337
338 writel_relaxed(val, msm_host->core_mem + offset);
339 }
340
sdhci_msm_v5_variant_writel_relaxed(u32 val,struct sdhci_host * host,u32 offset)341 static void sdhci_msm_v5_variant_writel_relaxed(u32 val,
342 struct sdhci_host *host, u32 offset)
343 {
344 writel_relaxed(val, host->ioaddr + offset);
345 }
346
msm_get_clock_mult_for_bus_mode(struct sdhci_host * host,unsigned int clock,unsigned int timing)347 static unsigned int msm_get_clock_mult_for_bus_mode(struct sdhci_host *host,
348 unsigned int clock,
349 unsigned int timing)
350 {
351 /*
352 * The SDHC requires internal clock frequency to be double the
353 * actual clock that will be set for DDR mode. The controller
354 * uses the faster clock(100/400MHz) for some of its parts and
355 * send the actual required clock (50/200MHz) to the card.
356 */
357 if (timing == MMC_TIMING_UHS_DDR50 ||
358 timing == MMC_TIMING_MMC_DDR52 ||
359 (timing == MMC_TIMING_MMC_HS400 &&
360 clock == MMC_HS200_MAX_DTR) ||
361 host->flags & SDHCI_HS400_TUNING)
362 return 2;
363 return 1;
364 }
365
msm_set_clock_rate_for_bus_mode(struct sdhci_host * host,unsigned int clock,unsigned int timing)366 static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host,
367 unsigned int clock,
368 unsigned int timing)
369 {
370 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
371 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
372 struct clk *core_clk = msm_host->bulk_clks[0].clk;
373 unsigned long achieved_rate;
374 unsigned int desired_rate;
375 unsigned int mult;
376 int rc;
377
378 mult = msm_get_clock_mult_for_bus_mode(host, clock, timing);
379 desired_rate = clock * mult;
380 rc = dev_pm_opp_set_rate(mmc_dev(host->mmc), desired_rate);
381 if (rc) {
382 pr_err("%s: Failed to set clock at rate %u at timing %d\n",
383 mmc_hostname(host->mmc), desired_rate, timing);
384 return;
385 }
386
387 /*
388 * Qualcomm clock drivers by default round clock _up_ if they can't
389 * make the requested rate. This is not good for SD. Yell if we
390 * encounter it.
391 */
392 achieved_rate = clk_get_rate(core_clk);
393 if (achieved_rate > desired_rate)
394 pr_warn("%s: Card appears overclocked; req %u Hz, actual %lu Hz\n",
395 mmc_hostname(host->mmc), desired_rate, achieved_rate);
396 host->mmc->actual_clock = achieved_rate / mult;
397
398 /* Stash the rate we requested to use in sdhci_msm_runtime_resume() */
399 msm_host->clk_rate = desired_rate;
400
401 pr_debug("%s: Setting clock at rate %lu at timing %d\n",
402 mmc_hostname(host->mmc), achieved_rate, timing);
403 }
404
405 /* Platform specific tuning */
msm_dll_poll_ck_out_en(struct sdhci_host * host,u8 poll)406 static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
407 {
408 u32 wait_cnt = 50;
409 u8 ck_out_en;
410 struct mmc_host *mmc = host->mmc;
411 const struct sdhci_msm_offset *msm_offset =
412 sdhci_priv_msm_offset(host);
413
414 /* Poll for CK_OUT_EN bit. max. poll time = 50us */
415 ck_out_en = !!(readl_relaxed(host->ioaddr +
416 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
417
418 while (ck_out_en != poll) {
419 if (--wait_cnt == 0) {
420 dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
421 mmc_hostname(mmc), poll);
422 return -ETIMEDOUT;
423 }
424 udelay(1);
425
426 ck_out_en = !!(readl_relaxed(host->ioaddr +
427 msm_offset->core_dll_config) & CORE_CK_OUT_EN);
428 }
429
430 return 0;
431 }
432
msm_config_cm_dll_phase(struct sdhci_host * host,u8 phase)433 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
434 {
435 int rc;
436 static const u8 grey_coded_phase_table[] = {
437 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
438 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
439 };
440 unsigned long flags;
441 u32 config;
442 struct mmc_host *mmc = host->mmc;
443 const struct sdhci_msm_offset *msm_offset =
444 sdhci_priv_msm_offset(host);
445
446 if (phase > 0xf)
447 return -EINVAL;
448
449 spin_lock_irqsave(&host->lock, flags);
450
451 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
452 config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
453 config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
454 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
455
456 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
457 rc = msm_dll_poll_ck_out_en(host, 0);
458 if (rc)
459 goto err_out;
460
461 /*
462 * Write the selected DLL clock output phase (0 ... 15)
463 * to CDR_SELEXT bit field of DLL_CONFIG register.
464 */
465 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
466 config &= ~CDR_SELEXT_MASK;
467 config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
468 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
469
470 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
471 config |= CORE_CK_OUT_EN;
472 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
473
474 /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
475 rc = msm_dll_poll_ck_out_en(host, 1);
476 if (rc)
477 goto err_out;
478
479 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
480 config |= CORE_CDR_EN;
481 config &= ~CORE_CDR_EXT_EN;
482 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
483 goto out;
484
485 err_out:
486 dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
487 mmc_hostname(mmc), phase);
488 out:
489 spin_unlock_irqrestore(&host->lock, flags);
490 return rc;
491 }
492
493 /*
494 * Find out the greatest range of consecuitive selected
495 * DLL clock output phases that can be used as sampling
496 * setting for SD3.0 UHS-I card read operation (in SDR104
497 * timing mode) or for eMMC4.5 card read operation (in
498 * HS400/HS200 timing mode).
499 * Select the 3/4 of the range and configure the DLL with the
500 * selected DLL clock output phase.
501 */
502
msm_find_most_appropriate_phase(struct sdhci_host * host,u8 * phase_table,u8 total_phases)503 static int msm_find_most_appropriate_phase(struct sdhci_host *host,
504 u8 *phase_table, u8 total_phases)
505 {
506 int ret;
507 u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
508 u8 phases_per_row[MAX_PHASES] = { 0 };
509 int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
510 int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
511 bool phase_0_found = false, phase_15_found = false;
512 struct mmc_host *mmc = host->mmc;
513
514 if (!total_phases || (total_phases > MAX_PHASES)) {
515 dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
516 mmc_hostname(mmc), total_phases);
517 return -EINVAL;
518 }
519
520 for (cnt = 0; cnt < total_phases; cnt++) {
521 ranges[row_index][col_index] = phase_table[cnt];
522 phases_per_row[row_index] += 1;
523 col_index++;
524
525 if ((cnt + 1) == total_phases) {
526 continue;
527 /* check if next phase in phase_table is consecutive or not */
528 } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) {
529 row_index++;
530 col_index = 0;
531 }
532 }
533
534 if (row_index >= MAX_PHASES)
535 return -EINVAL;
536
537 /* Check if phase-0 is present in first valid window? */
538 if (!ranges[0][0]) {
539 phase_0_found = true;
540 phase_0_raw_index = 0;
541 /* Check if cycle exist between 2 valid windows */
542 for (cnt = 1; cnt <= row_index; cnt++) {
543 if (phases_per_row[cnt]) {
544 for (i = 0; i < phases_per_row[cnt]; i++) {
545 if (ranges[cnt][i] == 15) {
546 phase_15_found = true;
547 phase_15_raw_index = cnt;
548 break;
549 }
550 }
551 }
552 }
553 }
554
555 /* If 2 valid windows form cycle then merge them as single window */
556 if (phase_0_found && phase_15_found) {
557 /* number of phases in raw where phase 0 is present */
558 u8 phases_0 = phases_per_row[phase_0_raw_index];
559 /* number of phases in raw where phase 15 is present */
560 u8 phases_15 = phases_per_row[phase_15_raw_index];
561
562 if (phases_0 + phases_15 >= MAX_PHASES)
563 /*
564 * If there are more than 1 phase windows then total
565 * number of phases in both the windows should not be
566 * more than or equal to MAX_PHASES.
567 */
568 return -EINVAL;
569
570 /* Merge 2 cyclic windows */
571 i = phases_15;
572 for (cnt = 0; cnt < phases_0; cnt++) {
573 ranges[phase_15_raw_index][i] =
574 ranges[phase_0_raw_index][cnt];
575 if (++i >= MAX_PHASES)
576 break;
577 }
578
579 phases_per_row[phase_0_raw_index] = 0;
580 phases_per_row[phase_15_raw_index] = phases_15 + phases_0;
581 }
582
583 for (cnt = 0; cnt <= row_index; cnt++) {
584 if (phases_per_row[cnt] > curr_max) {
585 curr_max = phases_per_row[cnt];
586 selected_row_index = cnt;
587 }
588 }
589
590 i = (curr_max * 3) / 4;
591 if (i)
592 i--;
593
594 ret = ranges[selected_row_index][i];
595
596 if (ret >= MAX_PHASES) {
597 ret = -EINVAL;
598 dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
599 mmc_hostname(mmc), ret);
600 }
601
602 return ret;
603 }
604
msm_cm_dll_set_freq(struct sdhci_host * host)605 static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
606 {
607 u32 mclk_freq = 0, config;
608 const struct sdhci_msm_offset *msm_offset =
609 sdhci_priv_msm_offset(host);
610
611 /* Program the MCLK value to MCLK_FREQ bit field */
612 if (host->clock <= 112000000)
613 mclk_freq = 0;
614 else if (host->clock <= 125000000)
615 mclk_freq = 1;
616 else if (host->clock <= 137000000)
617 mclk_freq = 2;
618 else if (host->clock <= 150000000)
619 mclk_freq = 3;
620 else if (host->clock <= 162000000)
621 mclk_freq = 4;
622 else if (host->clock <= 175000000)
623 mclk_freq = 5;
624 else if (host->clock <= 187000000)
625 mclk_freq = 6;
626 else if (host->clock <= 200000000)
627 mclk_freq = 7;
628
629 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
630 config &= ~CMUX_SHIFT_PHASE_MASK;
631 config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
632 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
633 }
634
635 /* Initialize the DLL (Programmable Delay Line) */
msm_init_cm_dll(struct sdhci_host * host)636 static int msm_init_cm_dll(struct sdhci_host *host)
637 {
638 struct mmc_host *mmc = host->mmc;
639 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
640 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
641 int wait_cnt = 50;
642 unsigned long flags, xo_clk = 0;
643 u32 config;
644 const struct sdhci_msm_offset *msm_offset =
645 msm_host->offset;
646
647 if (msm_host->use_14lpp_dll_reset && !IS_ERR_OR_NULL(msm_host->xo_clk))
648 xo_clk = clk_get_rate(msm_host->xo_clk);
649
650 spin_lock_irqsave(&host->lock, flags);
651
652 /*
653 * Make sure that clock is always enabled when DLL
654 * tuning is in progress. Keeping PWRSAVE ON may
655 * turn off the clock.
656 */
657 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
658 config &= ~CORE_CLK_PWRSAVE;
659 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
660
661 if (msm_host->dll_config)
662 writel_relaxed(msm_host->dll_config,
663 host->ioaddr + msm_offset->core_dll_config);
664
665 if (msm_host->use_14lpp_dll_reset) {
666 config = readl_relaxed(host->ioaddr +
667 msm_offset->core_dll_config);
668 config &= ~CORE_CK_OUT_EN;
669 writel_relaxed(config, host->ioaddr +
670 msm_offset->core_dll_config);
671
672 config = readl_relaxed(host->ioaddr +
673 msm_offset->core_dll_config_2);
674 config |= CORE_DLL_CLOCK_DISABLE;
675 writel_relaxed(config, host->ioaddr +
676 msm_offset->core_dll_config_2);
677 }
678
679 config = readl_relaxed(host->ioaddr +
680 msm_offset->core_dll_config);
681 config |= CORE_DLL_RST;
682 writel_relaxed(config, host->ioaddr +
683 msm_offset->core_dll_config);
684
685 config = readl_relaxed(host->ioaddr +
686 msm_offset->core_dll_config);
687 config |= CORE_DLL_PDN;
688 writel_relaxed(config, host->ioaddr +
689 msm_offset->core_dll_config);
690
691 if (!msm_host->dll_config)
692 msm_cm_dll_set_freq(host);
693
694 if (msm_host->use_14lpp_dll_reset &&
695 !IS_ERR_OR_NULL(msm_host->xo_clk)) {
696 u32 mclk_freq = 0;
697
698 config = readl_relaxed(host->ioaddr +
699 msm_offset->core_dll_config_2);
700 config &= CORE_FLL_CYCLE_CNT;
701 if (config)
702 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 8),
703 xo_clk);
704 else
705 mclk_freq = DIV_ROUND_CLOSEST_ULL((host->clock * 4),
706 xo_clk);
707
708 config = readl_relaxed(host->ioaddr +
709 msm_offset->core_dll_config_2);
710 config &= ~(0xFF << 10);
711 config |= mclk_freq << 10;
712
713 writel_relaxed(config, host->ioaddr +
714 msm_offset->core_dll_config_2);
715 /* wait for 5us before enabling DLL clock */
716 udelay(5);
717 }
718
719 config = readl_relaxed(host->ioaddr +
720 msm_offset->core_dll_config);
721 config &= ~CORE_DLL_RST;
722 writel_relaxed(config, host->ioaddr +
723 msm_offset->core_dll_config);
724
725 config = readl_relaxed(host->ioaddr +
726 msm_offset->core_dll_config);
727 config &= ~CORE_DLL_PDN;
728 writel_relaxed(config, host->ioaddr +
729 msm_offset->core_dll_config);
730
731 if (msm_host->use_14lpp_dll_reset) {
732 if (!msm_host->dll_config)
733 msm_cm_dll_set_freq(host);
734 config = readl_relaxed(host->ioaddr +
735 msm_offset->core_dll_config_2);
736 config &= ~CORE_DLL_CLOCK_DISABLE;
737 writel_relaxed(config, host->ioaddr +
738 msm_offset->core_dll_config_2);
739 }
740
741 /*
742 * Configure DLL user control register to enable DLL status.
743 * This setting is applicable to SDCC v5.1 onwards only.
744 */
745 if (msm_host->uses_tassadar_dll) {
746 config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
747 ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL;
748 writel_relaxed(config, host->ioaddr +
749 msm_offset->core_dll_usr_ctl);
750
751 config = readl_relaxed(host->ioaddr +
752 msm_offset->core_dll_config_3);
753 config &= ~0xFF;
754 if (msm_host->clk_rate < 150000000)
755 config |= DLL_CONFIG_3_LOW_FREQ_VAL;
756 else
757 config |= DLL_CONFIG_3_HIGH_FREQ_VAL;
758 writel_relaxed(config, host->ioaddr +
759 msm_offset->core_dll_config_3);
760 }
761
762 config = readl_relaxed(host->ioaddr +
763 msm_offset->core_dll_config);
764 config |= CORE_DLL_EN;
765 writel_relaxed(config, host->ioaddr +
766 msm_offset->core_dll_config);
767
768 config = readl_relaxed(host->ioaddr +
769 msm_offset->core_dll_config);
770 config |= CORE_CK_OUT_EN;
771 writel_relaxed(config, host->ioaddr +
772 msm_offset->core_dll_config);
773
774 /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
775 while (!(readl_relaxed(host->ioaddr + msm_offset->core_dll_status) &
776 CORE_DLL_LOCK)) {
777 /* max. wait for 50us sec for LOCK bit to be set */
778 if (--wait_cnt == 0) {
779 dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
780 mmc_hostname(mmc));
781 spin_unlock_irqrestore(&host->lock, flags);
782 return -ETIMEDOUT;
783 }
784 udelay(1);
785 }
786
787 spin_unlock_irqrestore(&host->lock, flags);
788 return 0;
789 }
790
msm_hc_select_default(struct sdhci_host * host)791 static void msm_hc_select_default(struct sdhci_host *host)
792 {
793 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
794 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
795 u32 config;
796 const struct sdhci_msm_offset *msm_offset =
797 msm_host->offset;
798
799 if (!msm_host->use_cdclp533) {
800 config = readl_relaxed(host->ioaddr +
801 msm_offset->core_vendor_spec3);
802 config &= ~CORE_PWRSAVE_DLL;
803 writel_relaxed(config, host->ioaddr +
804 msm_offset->core_vendor_spec3);
805 }
806
807 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
808 config &= ~CORE_HC_MCLK_SEL_MASK;
809 config |= CORE_HC_MCLK_SEL_DFLT;
810 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
811
812 /*
813 * Disable HC_SELECT_IN to be able to use the UHS mode select
814 * configuration from Host Control2 register for all other
815 * modes.
816 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
817 * in VENDOR_SPEC_FUNC
818 */
819 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
820 config &= ~CORE_HC_SELECT_IN_EN;
821 config &= ~CORE_HC_SELECT_IN_MASK;
822 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
823
824 /*
825 * Make sure above writes impacting free running MCLK are completed
826 * before changing the clk_rate at GCC.
827 */
828 wmb();
829 }
830
msm_hc_select_hs400(struct sdhci_host * host)831 static void msm_hc_select_hs400(struct sdhci_host *host)
832 {
833 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
834 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
835 struct mmc_ios ios = host->mmc->ios;
836 u32 config, dll_lock;
837 int rc;
838 const struct sdhci_msm_offset *msm_offset =
839 msm_host->offset;
840
841 /* Select the divided clock (free running MCLK/2) */
842 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
843 config &= ~CORE_HC_MCLK_SEL_MASK;
844 config |= CORE_HC_MCLK_SEL_HS400;
845
846 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
847 /*
848 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
849 * register
850 */
851 if ((msm_host->tuning_done || ios.enhanced_strobe) &&
852 !msm_host->calibration_done) {
853 config = readl_relaxed(host->ioaddr +
854 msm_offset->core_vendor_spec);
855 config |= CORE_HC_SELECT_IN_HS400;
856 config |= CORE_HC_SELECT_IN_EN;
857 writel_relaxed(config, host->ioaddr +
858 msm_offset->core_vendor_spec);
859 }
860 if (!msm_host->clk_rate && !msm_host->use_cdclp533) {
861 /*
862 * Poll on DLL_LOCK or DDR_DLL_LOCK bits in
863 * core_dll_status to be set. This should get set
864 * within 15 us at 200 MHz.
865 */
866 rc = readl_relaxed_poll_timeout(host->ioaddr +
867 msm_offset->core_dll_status,
868 dll_lock,
869 (dll_lock &
870 (CORE_DLL_LOCK |
871 CORE_DDR_DLL_LOCK)), 10,
872 1000);
873 if (rc == -ETIMEDOUT)
874 pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
875 mmc_hostname(host->mmc), dll_lock);
876 }
877 /*
878 * Make sure above writes impacting free running MCLK are completed
879 * before changing the clk_rate at GCC.
880 */
881 wmb();
882 }
883
884 /*
885 * sdhci_msm_hc_select_mode :- In general all timing modes are
886 * controlled via UHS mode select in Host Control2 register.
887 * eMMC specific HS200/HS400 doesn't have their respective modes
888 * defined here, hence we use these values.
889 *
890 * HS200 - SDR104 (Since they both are equivalent in functionality)
891 * HS400 - This involves multiple configurations
892 * Initially SDR104 - when tuning is required as HS200
893 * Then when switching to DDR @ 400MHz (HS400) we use
894 * the vendor specific HC_SELECT_IN to control the mode.
895 *
896 * In addition to controlling the modes we also need to select the
897 * correct input clock for DLL depending on the mode.
898 *
899 * HS400 - divided clock (free running MCLK/2)
900 * All other modes - default (free running MCLK)
901 */
sdhci_msm_hc_select_mode(struct sdhci_host * host)902 static void sdhci_msm_hc_select_mode(struct sdhci_host *host)
903 {
904 struct mmc_ios ios = host->mmc->ios;
905
906 if (ios.timing == MMC_TIMING_MMC_HS400 ||
907 host->flags & SDHCI_HS400_TUNING)
908 msm_hc_select_hs400(host);
909 else
910 msm_hc_select_default(host);
911 }
912
sdhci_msm_cdclp533_calibration(struct sdhci_host * host)913 static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
914 {
915 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
916 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
917 u32 config, calib_done;
918 int ret;
919 const struct sdhci_msm_offset *msm_offset =
920 msm_host->offset;
921
922 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
923
924 /*
925 * Retuning in HS400 (DDR mode) will fail, just reset the
926 * tuning block and restore the saved tuning phase.
927 */
928 ret = msm_init_cm_dll(host);
929 if (ret)
930 goto out;
931
932 /* Set the selected phase in delay line hw block */
933 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
934 if (ret)
935 goto out;
936
937 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config);
938 config |= CORE_CMD_DAT_TRACK_SEL;
939 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
940
941 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
942 config &= ~CORE_CDC_T4_DLY_SEL;
943 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
944
945 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
946 config &= ~CORE_CDC_SWITCH_BYPASS_OFF;
947 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
948
949 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG);
950 config |= CORE_CDC_SWITCH_RC_EN;
951 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_GEN_CFG);
952
953 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
954 config &= ~CORE_START_CDC_TRAFFIC;
955 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
956
957 /* Perform CDC Register Initialization Sequence */
958
959 writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
960 writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
961 writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
962 writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
963 writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
964 writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
965 writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
966 writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
967 writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
968
969 /* CDC HW Calibration */
970
971 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
972 config |= CORE_SW_TRIG_FULL_CALIB;
973 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
974
975 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
976 config &= ~CORE_SW_TRIG_FULL_CALIB;
977 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
978
979 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
980 config |= CORE_HW_AUTOCAL_ENA;
981 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
982
983 config = readl_relaxed(host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
984 config |= CORE_TIMER_ENA;
985 writel_relaxed(config, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
986
987 ret = readl_relaxed_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
988 calib_done,
989 (calib_done & CORE_CALIBRATION_DONE),
990 1, 50);
991
992 if (ret == -ETIMEDOUT) {
993 pr_err("%s: %s: CDC calibration was not completed\n",
994 mmc_hostname(host->mmc), __func__);
995 goto out;
996 }
997
998 ret = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
999 & CORE_CDC_ERROR_CODE_MASK;
1000 if (ret) {
1001 pr_err("%s: %s: CDC error code %d\n",
1002 mmc_hostname(host->mmc), __func__, ret);
1003 ret = -EINVAL;
1004 goto out;
1005 }
1006
1007 config = readl_relaxed(host->ioaddr + msm_offset->core_ddr_200_cfg);
1008 config |= CORE_START_CDC_TRAFFIC;
1009 writel_relaxed(config, host->ioaddr + msm_offset->core_ddr_200_cfg);
1010 out:
1011 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1012 __func__, ret);
1013 return ret;
1014 }
1015
sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host * host)1016 static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
1017 {
1018 struct mmc_host *mmc = host->mmc;
1019 u32 dll_status, config, ddr_cfg_offset;
1020 int ret;
1021 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1022 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1023 const struct sdhci_msm_offset *msm_offset =
1024 sdhci_priv_msm_offset(host);
1025
1026 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1027
1028 /*
1029 * Currently the core_ddr_config register defaults to desired
1030 * configuration on reset. Currently reprogramming the power on
1031 * reset (POR) value in case it might have been modified by
1032 * bootloaders. In the future, if this changes, then the desired
1033 * values will need to be programmed appropriately.
1034 */
1035 if (msm_host->updated_ddr_cfg)
1036 ddr_cfg_offset = msm_offset->core_ddr_config;
1037 else
1038 ddr_cfg_offset = msm_offset->core_ddr_config_old;
1039 writel_relaxed(msm_host->ddr_config, host->ioaddr + ddr_cfg_offset);
1040
1041 if (mmc->ios.enhanced_strobe) {
1042 config = readl_relaxed(host->ioaddr +
1043 msm_offset->core_ddr_200_cfg);
1044 config |= CORE_CMDIN_RCLK_EN;
1045 writel_relaxed(config, host->ioaddr +
1046 msm_offset->core_ddr_200_cfg);
1047 }
1048
1049 config = readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2);
1050 config |= CORE_DDR_CAL_EN;
1051 writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config_2);
1052
1053 ret = readl_relaxed_poll_timeout(host->ioaddr +
1054 msm_offset->core_dll_status,
1055 dll_status,
1056 (dll_status & CORE_DDR_DLL_LOCK),
1057 10, 1000);
1058
1059 if (ret == -ETIMEDOUT) {
1060 pr_err("%s: %s: CM_DLL_SDC4 calibration was not completed\n",
1061 mmc_hostname(host->mmc), __func__);
1062 goto out;
1063 }
1064
1065 /*
1066 * Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
1067 * When MCLK is gated OFF, it is not gated for less than 0.5us
1068 * and MCLK must be switched on for at-least 1us before DATA
1069 * starts coming. Controllers with 14lpp and later tech DLL cannot
1070 * guarantee above requirement. So PWRSAVE_DLL should not be
1071 * turned on for host controllers using this DLL.
1072 */
1073 if (!msm_host->use_14lpp_dll_reset) {
1074 config = readl_relaxed(host->ioaddr +
1075 msm_offset->core_vendor_spec3);
1076 config |= CORE_PWRSAVE_DLL;
1077 writel_relaxed(config, host->ioaddr +
1078 msm_offset->core_vendor_spec3);
1079 }
1080
1081 /*
1082 * Drain writebuffer to ensure above DLL calibration
1083 * and PWRSAVE DLL is enabled.
1084 */
1085 wmb();
1086 out:
1087 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1088 __func__, ret);
1089 return ret;
1090 }
1091
sdhci_msm_hs400_dll_calibration(struct sdhci_host * host)1092 static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
1093 {
1094 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1095 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1096 struct mmc_host *mmc = host->mmc;
1097 int ret;
1098 u32 config;
1099 const struct sdhci_msm_offset *msm_offset =
1100 msm_host->offset;
1101
1102 pr_debug("%s: %s: Enter\n", mmc_hostname(host->mmc), __func__);
1103
1104 /*
1105 * Retuning in HS400 (DDR mode) will fail, just reset the
1106 * tuning block and restore the saved tuning phase.
1107 */
1108 ret = msm_init_cm_dll(host);
1109 if (ret)
1110 goto out;
1111
1112 if (!mmc->ios.enhanced_strobe) {
1113 /* Set the selected phase in delay line hw block */
1114 ret = msm_config_cm_dll_phase(host,
1115 msm_host->saved_tuning_phase);
1116 if (ret)
1117 goto out;
1118 config = readl_relaxed(host->ioaddr +
1119 msm_offset->core_dll_config);
1120 config |= CORE_CMD_DAT_TRACK_SEL;
1121 writel_relaxed(config, host->ioaddr +
1122 msm_offset->core_dll_config);
1123 }
1124
1125 if (msm_host->use_cdclp533)
1126 ret = sdhci_msm_cdclp533_calibration(host);
1127 else
1128 ret = sdhci_msm_cm_dll_sdc4_calibration(host);
1129 out:
1130 pr_debug("%s: %s: Exit, ret %d\n", mmc_hostname(host->mmc),
1131 __func__, ret);
1132 return ret;
1133 }
1134
sdhci_msm_is_tuning_needed(struct sdhci_host * host)1135 static bool sdhci_msm_is_tuning_needed(struct sdhci_host *host)
1136 {
1137 struct mmc_ios *ios = &host->mmc->ios;
1138
1139 if (ios->timing == MMC_TIMING_UHS_SDR50 &&
1140 host->flags & SDHCI_SDR50_NEEDS_TUNING)
1141 return true;
1142
1143 /*
1144 * Tuning is required for SDR104, HS200 and HS400 cards and
1145 * if clock frequency is greater than 100MHz in these modes.
1146 */
1147 if (host->clock <= CORE_FREQ_100MHZ ||
1148 !(ios->timing == MMC_TIMING_MMC_HS400 ||
1149 ios->timing == MMC_TIMING_MMC_HS200 ||
1150 ios->timing == MMC_TIMING_UHS_SDR104) ||
1151 ios->enhanced_strobe)
1152 return false;
1153
1154 return true;
1155 }
1156
sdhci_msm_restore_sdr_dll_config(struct sdhci_host * host)1157 static int sdhci_msm_restore_sdr_dll_config(struct sdhci_host *host)
1158 {
1159 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1160 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1161 int ret;
1162
1163 /*
1164 * SDR DLL comes into picture only for timing modes which needs
1165 * tuning.
1166 */
1167 if (!sdhci_msm_is_tuning_needed(host))
1168 return 0;
1169
1170 /* Reset the tuning block */
1171 ret = msm_init_cm_dll(host);
1172 if (ret)
1173 return ret;
1174
1175 /* Restore the tuning block */
1176 ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
1177
1178 return ret;
1179 }
1180
sdhci_msm_set_cdr(struct sdhci_host * host,bool enable)1181 static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
1182 {
1183 const struct sdhci_msm_offset *msm_offset = sdhci_priv_msm_offset(host);
1184 u32 config, oldconfig = readl_relaxed(host->ioaddr +
1185 msm_offset->core_dll_config);
1186
1187 config = oldconfig;
1188 if (enable) {
1189 config |= CORE_CDR_EN;
1190 config &= ~CORE_CDR_EXT_EN;
1191 } else {
1192 config &= ~CORE_CDR_EN;
1193 config |= CORE_CDR_EXT_EN;
1194 }
1195
1196 if (config != oldconfig) {
1197 writel_relaxed(config, host->ioaddr +
1198 msm_offset->core_dll_config);
1199 }
1200 }
1201
sdhci_msm_execute_tuning(struct mmc_host * mmc,u32 opcode)1202 static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
1203 {
1204 struct sdhci_host *host = mmc_priv(mmc);
1205 int tuning_seq_cnt = 10;
1206 u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
1207 int rc;
1208 struct mmc_ios ios = host->mmc->ios;
1209 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1210 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1211 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1212 u32 config;
1213
1214 if (!sdhci_msm_is_tuning_needed(host)) {
1215 msm_host->use_cdr = false;
1216 sdhci_msm_set_cdr(host, false);
1217 return 0;
1218 }
1219
1220 /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
1221 msm_host->use_cdr = true;
1222
1223 /*
1224 * Clear tuning_done flag before tuning to ensure proper
1225 * HS400 settings.
1226 */
1227 msm_host->tuning_done = 0;
1228
1229 if (ios.timing == MMC_TIMING_UHS_SDR50 &&
1230 host->flags & SDHCI_SDR50_NEEDS_TUNING) {
1231 config = readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec);
1232 config &= ~CORE_HC_SELECT_IN_MASK;
1233 config |= CORE_HC_SELECT_IN_EN | CORE_HC_SELECT_IN_SDR50;
1234 writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
1235 }
1236
1237 /*
1238 * For HS400 tuning in HS200 timing requires:
1239 * - select MCLK/2 in VENDOR_SPEC
1240 * - program MCLK to 400MHz (or nearest supported) in GCC
1241 */
1242 if (host->flags & SDHCI_HS400_TUNING) {
1243 sdhci_msm_hc_select_mode(host);
1244 msm_set_clock_rate_for_bus_mode(host, ios.clock, ios.timing);
1245 host->flags &= ~SDHCI_HS400_TUNING;
1246 }
1247
1248 retry:
1249 /* First of all reset the tuning block */
1250 rc = msm_init_cm_dll(host);
1251 if (rc)
1252 return rc;
1253
1254 phase = 0;
1255 do {
1256 /* Set the phase in delay line hw block */
1257 rc = msm_config_cm_dll_phase(host, phase);
1258 if (rc)
1259 return rc;
1260
1261 rc = mmc_send_tuning(mmc, opcode, NULL);
1262 if (!rc) {
1263 /* Tuning is successful at this tuning point */
1264 tuned_phases[tuned_phase_cnt++] = phase;
1265 dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
1266 mmc_hostname(mmc), phase);
1267 }
1268 } while (++phase < ARRAY_SIZE(tuned_phases));
1269
1270 if (tuned_phase_cnt) {
1271 if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) {
1272 /*
1273 * All phases valid is _almost_ as bad as no phases
1274 * valid. Probably all phases are not really reliable
1275 * but we didn't detect where the unreliable place is.
1276 * That means we'll essentially be guessing and hoping
1277 * we get a good phase. Better to try a few times.
1278 */
1279 dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n",
1280 mmc_hostname(mmc));
1281 if (--tuning_seq_cnt) {
1282 tuned_phase_cnt = 0;
1283 goto retry;
1284 }
1285 }
1286
1287 rc = msm_find_most_appropriate_phase(host, tuned_phases,
1288 tuned_phase_cnt);
1289 if (rc < 0)
1290 return rc;
1291 else
1292 phase = rc;
1293
1294 /*
1295 * Finally set the selected phase in delay
1296 * line hw block.
1297 */
1298 rc = msm_config_cm_dll_phase(host, phase);
1299 if (rc)
1300 return rc;
1301 msm_host->saved_tuning_phase = phase;
1302 dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
1303 mmc_hostname(mmc), phase);
1304 } else {
1305 if (--tuning_seq_cnt)
1306 goto retry;
1307 /* Tuning failed */
1308 dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
1309 mmc_hostname(mmc));
1310 rc = -EIO;
1311 }
1312
1313 if (!rc)
1314 msm_host->tuning_done = true;
1315 return rc;
1316 }
1317
1318 /*
1319 * sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation.
1320 * This needs to be done for both tuning and enhanced_strobe mode.
1321 * DLL operation is only needed for clock > 100MHz. For clock <= 100MHz
1322 * fixed feedback clock is used.
1323 */
sdhci_msm_hs400(struct sdhci_host * host,struct mmc_ios * ios)1324 static void sdhci_msm_hs400(struct sdhci_host *host, struct mmc_ios *ios)
1325 {
1326 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1327 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1328 int ret;
1329
1330 if (host->clock > CORE_FREQ_100MHZ &&
1331 (msm_host->tuning_done || ios->enhanced_strobe) &&
1332 !msm_host->calibration_done) {
1333 ret = sdhci_msm_hs400_dll_calibration(host);
1334 if (!ret)
1335 msm_host->calibration_done = true;
1336 else
1337 pr_err("%s: Failed to calibrate DLL for hs400 mode (%d)\n",
1338 mmc_hostname(host->mmc), ret);
1339 }
1340 }
1341
sdhci_msm_set_uhs_signaling(struct sdhci_host * host,unsigned int uhs)1342 static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
1343 unsigned int uhs)
1344 {
1345 struct mmc_host *mmc = host->mmc;
1346 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1347 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1348 u16 ctrl_2;
1349 u32 config;
1350 const struct sdhci_msm_offset *msm_offset =
1351 msm_host->offset;
1352
1353 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1354 /* Select Bus Speed Mode for host */
1355 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1356 switch (uhs) {
1357 case MMC_TIMING_UHS_SDR12:
1358 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
1359 break;
1360 case MMC_TIMING_UHS_SDR25:
1361 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
1362 break;
1363 case MMC_TIMING_UHS_SDR50:
1364 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
1365 break;
1366 case MMC_TIMING_MMC_HS400:
1367 case MMC_TIMING_MMC_HS200:
1368 case MMC_TIMING_UHS_SDR104:
1369 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
1370 break;
1371 case MMC_TIMING_UHS_DDR50:
1372 case MMC_TIMING_MMC_DDR52:
1373 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
1374 break;
1375 }
1376
1377 /*
1378 * When clock frequency is less than 100MHz, the feedback clock must be
1379 * provided and DLL must not be used so that tuning can be skipped. To
1380 * provide feedback clock, the mode selection can be any value less
1381 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
1382 */
1383 if (host->clock <= CORE_FREQ_100MHZ) {
1384 if (uhs == MMC_TIMING_MMC_HS400 ||
1385 uhs == MMC_TIMING_MMC_HS200 ||
1386 uhs == MMC_TIMING_UHS_SDR104)
1387 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
1388 /*
1389 * DLL is not required for clock <= 100MHz
1390 * Thus, make sure DLL it is disabled when not required
1391 */
1392 config = readl_relaxed(host->ioaddr +
1393 msm_offset->core_dll_config);
1394 config |= CORE_DLL_RST;
1395 writel_relaxed(config, host->ioaddr +
1396 msm_offset->core_dll_config);
1397
1398 config = readl_relaxed(host->ioaddr +
1399 msm_offset->core_dll_config);
1400 config |= CORE_DLL_PDN;
1401 writel_relaxed(config, host->ioaddr +
1402 msm_offset->core_dll_config);
1403
1404 /*
1405 * The DLL needs to be restored and CDCLP533 recalibrated
1406 * when the clock frequency is set back to 400MHz.
1407 */
1408 msm_host->calibration_done = false;
1409 }
1410
1411 dev_dbg(mmc_dev(mmc), "%s: clock=%u uhs=%u ctrl_2=0x%x\n",
1412 mmc_hostname(host->mmc), host->clock, uhs, ctrl_2);
1413 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
1414
1415 if (mmc->ios.timing == MMC_TIMING_MMC_HS400)
1416 sdhci_msm_hs400(host, &mmc->ios);
1417 }
1418
sdhci_msm_set_pincfg(struct sdhci_msm_host * msm_host,bool level)1419 static int sdhci_msm_set_pincfg(struct sdhci_msm_host *msm_host, bool level)
1420 {
1421 struct platform_device *pdev = msm_host->pdev;
1422 int ret;
1423
1424 if (level)
1425 ret = pinctrl_pm_select_default_state(&pdev->dev);
1426 else
1427 ret = pinctrl_pm_select_sleep_state(&pdev->dev);
1428
1429 return ret;
1430 }
1431
msm_config_vmmc_regulator(struct mmc_host * mmc,bool hpm)1432 static void msm_config_vmmc_regulator(struct mmc_host *mmc, bool hpm)
1433 {
1434 int load;
1435
1436 if (!hpm)
1437 load = 0;
1438 else if (!mmc->card)
1439 load = max(MMC_VMMC_MAX_LOAD_UA, SD_VMMC_MAX_LOAD_UA);
1440 else if (mmc_card_mmc(mmc->card))
1441 load = MMC_VMMC_MAX_LOAD_UA;
1442 else if (mmc_card_sd(mmc->card))
1443 load = SD_VMMC_MAX_LOAD_UA;
1444 else
1445 return;
1446
1447 regulator_set_load(mmc->supply.vmmc, load);
1448 }
1449
msm_config_vqmmc_regulator(struct mmc_host * mmc,bool hpm)1450 static void msm_config_vqmmc_regulator(struct mmc_host *mmc, bool hpm)
1451 {
1452 int load;
1453
1454 if (!hpm)
1455 load = 0;
1456 else if (!mmc->card)
1457 load = max(MMC_VQMMC_MAX_LOAD_UA, SD_VQMMC_MAX_LOAD_UA);
1458 else if (mmc_card_sd(mmc->card))
1459 load = SD_VQMMC_MAX_LOAD_UA;
1460 else
1461 return;
1462
1463 regulator_set_load(mmc->supply.vqmmc, load);
1464 }
1465
sdhci_msm_set_vmmc(struct sdhci_msm_host * msm_host,struct mmc_host * mmc,bool hpm)1466 static int sdhci_msm_set_vmmc(struct sdhci_msm_host *msm_host,
1467 struct mmc_host *mmc, bool hpm)
1468 {
1469 if (IS_ERR(mmc->supply.vmmc))
1470 return 0;
1471
1472 msm_config_vmmc_regulator(mmc, hpm);
1473
1474 return mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, mmc->ios.vdd);
1475 }
1476
msm_toggle_vqmmc(struct sdhci_msm_host * msm_host,struct mmc_host * mmc,bool level)1477 static int msm_toggle_vqmmc(struct sdhci_msm_host *msm_host,
1478 struct mmc_host *mmc, bool level)
1479 {
1480 int ret;
1481 struct mmc_ios ios;
1482
1483 if (msm_host->vqmmc_enabled == level)
1484 return 0;
1485
1486 msm_config_vqmmc_regulator(mmc, level);
1487
1488 if (level) {
1489 /* Set the IO voltage regulator to default voltage level */
1490 if (msm_host->caps_0 & CORE_3_0V_SUPPORT)
1491 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_330;
1492 else if (msm_host->caps_0 & CORE_1_8V_SUPPORT)
1493 ios.signal_voltage = MMC_SIGNAL_VOLTAGE_180;
1494
1495 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1496 ret = mmc_regulator_set_vqmmc(mmc, &ios);
1497 if (ret < 0) {
1498 dev_err(mmc_dev(mmc), "%s: vqmmc set volgate failed: %d\n",
1499 mmc_hostname(mmc), ret);
1500 goto out;
1501 }
1502 }
1503 ret = regulator_enable(mmc->supply.vqmmc);
1504 } else {
1505 ret = regulator_disable(mmc->supply.vqmmc);
1506 }
1507
1508 if (ret)
1509 dev_err(mmc_dev(mmc), "%s: vqmm %sable failed: %d\n",
1510 mmc_hostname(mmc), level ? "en":"dis", ret);
1511 else
1512 msm_host->vqmmc_enabled = level;
1513 out:
1514 return ret;
1515 }
1516
msm_config_vqmmc_mode(struct sdhci_msm_host * msm_host,struct mmc_host * mmc,bool hpm)1517 static int msm_config_vqmmc_mode(struct sdhci_msm_host *msm_host,
1518 struct mmc_host *mmc, bool hpm)
1519 {
1520 int load, ret;
1521
1522 load = hpm ? MMC_VQMMC_MAX_LOAD_UA : 0;
1523 ret = regulator_set_load(mmc->supply.vqmmc, load);
1524 if (ret)
1525 dev_err(mmc_dev(mmc), "%s: vqmmc set load failed: %d\n",
1526 mmc_hostname(mmc), ret);
1527 return ret;
1528 }
1529
sdhci_msm_set_vqmmc(struct sdhci_msm_host * msm_host,struct mmc_host * mmc,bool level)1530 static int sdhci_msm_set_vqmmc(struct sdhci_msm_host *msm_host,
1531 struct mmc_host *mmc, bool level)
1532 {
1533 int ret;
1534 bool always_on;
1535
1536 if (IS_ERR(mmc->supply.vqmmc) ||
1537 (mmc->ios.power_mode == MMC_POWER_UNDEFINED))
1538 return 0;
1539 /*
1540 * For eMMC don't turn off Vqmmc, Instead just configure it in LPM
1541 * and HPM modes by setting the corresponding load.
1542 *
1543 * Till eMMC is initialized (i.e. always_on == 0), just turn on/off
1544 * Vqmmc. Vqmmc gets turned off only if init fails and mmc_power_off
1545 * gets invoked. Once eMMC is initialized (i.e. always_on == 1),
1546 * Vqmmc should remain ON, So just set the load instead of turning it
1547 * off/on.
1548 */
1549 always_on = !mmc_card_is_removable(mmc) &&
1550 mmc->card && mmc_card_mmc(mmc->card);
1551
1552 if (always_on)
1553 ret = msm_config_vqmmc_mode(msm_host, mmc, level);
1554 else
1555 ret = msm_toggle_vqmmc(msm_host, mmc, level);
1556
1557 return ret;
1558 }
1559
sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host * msm_host)1560 static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host)
1561 {
1562 init_waitqueue_head(&msm_host->pwr_irq_wait);
1563 }
1564
sdhci_msm_complete_pwr_irq_wait(struct sdhci_msm_host * msm_host)1565 static inline void sdhci_msm_complete_pwr_irq_wait(
1566 struct sdhci_msm_host *msm_host)
1567 {
1568 wake_up(&msm_host->pwr_irq_wait);
1569 }
1570
1571 /*
1572 * sdhci_msm_check_power_status API should be called when registers writes
1573 * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens.
1574 * To what state the register writes will change the IO lines should be passed
1575 * as the argument req_type. This API will check whether the IO line's state
1576 * is already the expected state and will wait for power irq only if
1577 * power irq is expected to be triggered based on the current IO line state
1578 * and expected IO line state.
1579 */
sdhci_msm_check_power_status(struct sdhci_host * host,u32 req_type)1580 static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
1581 {
1582 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1583 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1584 struct mmc_host *mmc = host->mmc;
1585 bool done = false;
1586 u32 val = SWITCHABLE_SIGNALING_VOLTAGE;
1587 const struct sdhci_msm_offset *msm_offset =
1588 msm_host->offset;
1589
1590 pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
1591 mmc_hostname(host->mmc), __func__, req_type,
1592 msm_host->curr_pwr_state, msm_host->curr_io_level);
1593
1594 /*
1595 * The power interrupt will not be generated for signal voltage
1596 * switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set.
1597 * Since sdhci-msm-v5, this bit has been removed and SW must consider
1598 * it as always set.
1599 */
1600 if (!msm_host->mci_removed)
1601 val = msm_host_readl(msm_host, host,
1602 msm_offset->core_generics);
1603 if ((req_type & REQ_IO_HIGH || req_type & REQ_IO_LOW) &&
1604 !(val & SWITCHABLE_SIGNALING_VOLTAGE)) {
1605 return;
1606 }
1607
1608 /*
1609 * The IRQ for request type IO High/LOW will be generated when -
1610 * there is a state change in 1.8V enable bit (bit 3) of
1611 * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0
1612 * which indicates 3.3V IO voltage. So, when MMC core layer tries
1613 * to set it to 3.3V before card detection happens, the
1614 * IRQ doesn't get triggered as there is no state change in this bit.
1615 * The driver already handles this case by changing the IO voltage
1616 * level to high as part of controller power up sequence. Hence, check
1617 * for host->pwr to handle a case where IO voltage high request is
1618 * issued even before controller power up.
1619 */
1620 if ((req_type & REQ_IO_HIGH) && !host->pwr) {
1621 pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n",
1622 mmc_hostname(host->mmc), req_type);
1623 return;
1624 }
1625 if ((req_type & msm_host->curr_pwr_state) ||
1626 (req_type & msm_host->curr_io_level))
1627 done = true;
1628 /*
1629 * This is needed here to handle cases where register writes will
1630 * not change the current bus state or io level of the controller.
1631 * In this case, no power irq will be triggerred and we should
1632 * not wait.
1633 */
1634 if (!done) {
1635 if (!wait_event_timeout(msm_host->pwr_irq_wait,
1636 msm_host->pwr_irq_flag,
1637 msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
1638 dev_warn(&msm_host->pdev->dev,
1639 "%s: pwr_irq for req: (%d) timed out\n",
1640 mmc_hostname(host->mmc), req_type);
1641 }
1642
1643 if ((req_type & REQ_BUS_ON) && mmc->card && !mmc->ops->get_cd(mmc)) {
1644 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1645 host->pwr = 0;
1646 }
1647
1648 pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
1649 __func__, req_type);
1650 }
1651
sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host * host)1652 static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
1653 {
1654 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1655 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1656 const struct sdhci_msm_offset *msm_offset =
1657 msm_host->offset;
1658
1659 pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n",
1660 mmc_hostname(host->mmc),
1661 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_status),
1662 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_mask),
1663 msm_host_readl(msm_host, host, msm_offset->core_pwrctl_ctl));
1664 }
1665
sdhci_msm_handle_pwr_irq(struct sdhci_host * host,int irq)1666 static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq)
1667 {
1668 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1669 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1670 struct mmc_host *mmc = host->mmc;
1671 u32 irq_status, irq_ack = 0;
1672 int retry = 10, ret;
1673 u32 pwr_state = 0, io_level = 0;
1674 u32 config;
1675 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
1676
1677 irq_status = msm_host_readl(msm_host, host,
1678 msm_offset->core_pwrctl_status);
1679 irq_status &= INT_MASK;
1680
1681 msm_host_writel(msm_host, irq_status, host,
1682 msm_offset->core_pwrctl_clear);
1683
1684 /*
1685 * There is a rare HW scenario where the first clear pulse could be
1686 * lost when actual reset and clear/read of status register is
1687 * happening at a time. Hence, retry for at least 10 times to make
1688 * sure status register is cleared. Otherwise, this will result in
1689 * a spurious power IRQ resulting in system instability.
1690 */
1691 while (irq_status & msm_host_readl(msm_host, host,
1692 msm_offset->core_pwrctl_status)) {
1693 if (retry == 0) {
1694 pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
1695 mmc_hostname(host->mmc), irq_status);
1696 sdhci_msm_dump_pwr_ctrl_regs(host);
1697 WARN_ON(1);
1698 break;
1699 }
1700 msm_host_writel(msm_host, irq_status, host,
1701 msm_offset->core_pwrctl_clear);
1702 retry--;
1703 udelay(10);
1704 }
1705
1706 if ((irq_status & CORE_PWRCTL_BUS_ON) && mmc->card &&
1707 !mmc->ops->get_cd(mmc)) {
1708 msm_host_writel(msm_host, CORE_PWRCTL_BUS_FAIL, host,
1709 msm_offset->core_pwrctl_ctl);
1710 return;
1711 }
1712
1713 /* Handle BUS ON/OFF*/
1714 if (irq_status & CORE_PWRCTL_BUS_ON) {
1715 pwr_state = REQ_BUS_ON;
1716 io_level = REQ_IO_HIGH;
1717 }
1718 if (irq_status & CORE_PWRCTL_BUS_OFF) {
1719 pwr_state = REQ_BUS_OFF;
1720 io_level = REQ_IO_LOW;
1721 }
1722
1723 if (pwr_state) {
1724 ret = sdhci_msm_set_vmmc(msm_host, mmc,
1725 pwr_state & REQ_BUS_ON);
1726 if (!ret)
1727 ret = sdhci_msm_set_vqmmc(msm_host, mmc,
1728 pwr_state & REQ_BUS_ON);
1729 if (!ret)
1730 ret = sdhci_msm_set_pincfg(msm_host,
1731 pwr_state & REQ_BUS_ON);
1732 if (!ret)
1733 irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
1734 else
1735 irq_ack |= CORE_PWRCTL_BUS_FAIL;
1736 }
1737
1738 /* Handle IO LOW/HIGH */
1739 if (irq_status & CORE_PWRCTL_IO_LOW)
1740 io_level = REQ_IO_LOW;
1741
1742 if (irq_status & CORE_PWRCTL_IO_HIGH)
1743 io_level = REQ_IO_HIGH;
1744
1745 if (io_level)
1746 irq_ack |= CORE_PWRCTL_IO_SUCCESS;
1747
1748 if (io_level && !IS_ERR(mmc->supply.vqmmc) && !pwr_state) {
1749 ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
1750 if (ret < 0) {
1751 dev_err(mmc_dev(mmc), "%s: IO_level setting failed(%d). signal_voltage: %d, vdd: %d irq_status: 0x%08x\n",
1752 mmc_hostname(mmc), ret,
1753 mmc->ios.signal_voltage, mmc->ios.vdd,
1754 irq_status);
1755 irq_ack |= CORE_PWRCTL_IO_FAIL;
1756 }
1757 }
1758
1759 /*
1760 * The driver has to acknowledge the interrupt, switch voltages and
1761 * report back if it succeded or not to this register. The voltage
1762 * switches are handled by the sdhci core, so just report success.
1763 */
1764 msm_host_writel(msm_host, irq_ack, host,
1765 msm_offset->core_pwrctl_ctl);
1766
1767 /*
1768 * If we don't have info regarding the voltage levels supported by
1769 * regulators, don't change the IO PAD PWR SWITCH.
1770 */
1771 if (msm_host->caps_0 & CORE_VOLT_SUPPORT) {
1772 u32 new_config;
1773 /*
1774 * We should unset IO PAD PWR switch only if the register write
1775 * can set IO lines high and the regulator also switches to 3 V.
1776 * Else, we should keep the IO PAD PWR switch set.
1777 * This is applicable to certain targets where eMMC vccq supply
1778 * is only 1.8V. In such targets, even during REQ_IO_HIGH, the
1779 * IO PAD PWR switch must be kept set to reflect actual
1780 * regulator voltage. This way, during initialization of
1781 * controllers with only 1.8V, we will set the IO PAD bit
1782 * without waiting for a REQ_IO_LOW.
1783 */
1784 config = readl_relaxed(host->ioaddr +
1785 msm_offset->core_vendor_spec);
1786 new_config = config;
1787
1788 if ((io_level & REQ_IO_HIGH) &&
1789 (msm_host->caps_0 & CORE_3_0V_SUPPORT))
1790 new_config &= ~CORE_IO_PAD_PWR_SWITCH;
1791 else if ((io_level & REQ_IO_LOW) ||
1792 (msm_host->caps_0 & CORE_1_8V_SUPPORT))
1793 new_config |= CORE_IO_PAD_PWR_SWITCH;
1794
1795 if (config ^ new_config)
1796 writel_relaxed(new_config, host->ioaddr +
1797 msm_offset->core_vendor_spec);
1798 }
1799
1800 if (pwr_state)
1801 msm_host->curr_pwr_state = pwr_state;
1802 if (io_level)
1803 msm_host->curr_io_level = io_level;
1804
1805 dev_dbg(mmc_dev(mmc), "%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n",
1806 mmc_hostname(msm_host->mmc), __func__, irq, irq_status,
1807 irq_ack);
1808 }
1809
sdhci_msm_pwr_irq(int irq,void * data)1810 static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
1811 {
1812 struct sdhci_host *host = (struct sdhci_host *)data;
1813 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1814 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1815
1816 sdhci_msm_handle_pwr_irq(host, irq);
1817 msm_host->pwr_irq_flag = 1;
1818 sdhci_msm_complete_pwr_irq_wait(msm_host);
1819
1820
1821 return IRQ_HANDLED;
1822 }
1823
sdhci_msm_get_max_clock(struct sdhci_host * host)1824 static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
1825 {
1826 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1827 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1828 struct clk *core_clk = msm_host->bulk_clks[0].clk;
1829
1830 return clk_round_rate(core_clk, ULONG_MAX);
1831 }
1832
sdhci_msm_get_min_clock(struct sdhci_host * host)1833 static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
1834 {
1835 return SDHCI_MSM_MIN_CLOCK;
1836 }
1837
1838 /*
1839 * __sdhci_msm_set_clock - sdhci_msm clock control.
1840 *
1841 * Description:
1842 * MSM controller does not use internal divider and
1843 * instead directly control the GCC clock as per
1844 * HW recommendation.
1845 **/
__sdhci_msm_set_clock(struct sdhci_host * host,unsigned int clock)1846 static void __sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1847 {
1848 u16 clk;
1849
1850 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1851
1852 if (clock == 0)
1853 return;
1854
1855 /*
1856 * MSM controller do not use clock divider.
1857 * Thus read SDHCI_CLOCK_CONTROL and only enable
1858 * clock with no divider value programmed.
1859 */
1860 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1861 sdhci_enable_clk(host, clk);
1862 }
1863
1864 /* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
sdhci_msm_set_clock(struct sdhci_host * host,unsigned int clock)1865 static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
1866 {
1867 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1868 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1869 struct mmc_ios ios = host->mmc->ios;
1870
1871 if (!clock) {
1872 host->mmc->actual_clock = msm_host->clk_rate = 0;
1873 goto out;
1874 }
1875
1876 sdhci_msm_hc_select_mode(host);
1877
1878 msm_set_clock_rate_for_bus_mode(host, ios.clock, ios.timing);
1879 out:
1880 __sdhci_msm_set_clock(host, clock);
1881 }
1882
1883 /*****************************************************************************\
1884 * *
1885 * Inline Crypto Engine (ICE) support *
1886 * *
1887 \*****************************************************************************/
1888
1889 #ifdef CONFIG_MMC_CRYPTO
1890
1891 static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops; /* forward decl */
1892
sdhci_msm_ice_init(struct sdhci_msm_host * msm_host,struct cqhci_host * cq_host)1893 static int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
1894 struct cqhci_host *cq_host)
1895 {
1896 struct mmc_host *mmc = msm_host->mmc;
1897 struct blk_crypto_profile *profile = &mmc->crypto_profile;
1898 struct device *dev = mmc_dev(mmc);
1899 struct qcom_ice *ice;
1900 union cqhci_crypto_capabilities caps;
1901 union cqhci_crypto_cap_entry cap;
1902 int err;
1903 int i;
1904
1905 if (!(cqhci_readl(cq_host, CQHCI_CAP) & CQHCI_CAP_CS))
1906 return 0;
1907
1908 ice = devm_of_qcom_ice_get(dev);
1909 if (ice == ERR_PTR(-EOPNOTSUPP)) {
1910 dev_warn(dev, "Disabling inline encryption support\n");
1911 ice = NULL;
1912 }
1913
1914 if (IS_ERR_OR_NULL(ice))
1915 return PTR_ERR_OR_ZERO(ice);
1916
1917 if (qcom_ice_get_supported_key_type(ice) != BLK_CRYPTO_KEY_TYPE_RAW) {
1918 dev_warn(dev, "Wrapped keys not supported. Disabling inline encryption support.\n");
1919 return 0;
1920 }
1921
1922 msm_host->ice = ice;
1923
1924 /* Initialize the blk_crypto_profile */
1925
1926 caps.reg_val = cpu_to_le32(cqhci_readl(cq_host, CQHCI_CCAP));
1927
1928 /* The number of keyslots supported is (CFGC+1) */
1929 err = devm_blk_crypto_profile_init(dev, profile, caps.config_count + 1);
1930 if (err)
1931 return err;
1932
1933 profile->ll_ops = sdhci_msm_crypto_ops;
1934 profile->max_dun_bytes_supported = 4;
1935 profile->key_types_supported = BLK_CRYPTO_KEY_TYPE_RAW;
1936 profile->dev = dev;
1937
1938 /*
1939 * Currently this driver only supports AES-256-XTS. All known versions
1940 * of ICE support it, but to be safe make sure it is really declared in
1941 * the crypto capability registers. The crypto capability registers
1942 * also give the supported data unit size(s).
1943 */
1944 for (i = 0; i < caps.num_crypto_cap; i++) {
1945 cap.reg_val = cpu_to_le32(cqhci_readl(cq_host,
1946 CQHCI_CRYPTOCAP +
1947 i * sizeof(__le32)));
1948 if (cap.algorithm_id == CQHCI_CRYPTO_ALG_AES_XTS &&
1949 cap.key_size == CQHCI_CRYPTO_KEY_SIZE_256)
1950 profile->modes_supported[BLK_ENCRYPTION_MODE_AES_256_XTS] |=
1951 cap.sdus_mask * 512;
1952 }
1953
1954 mmc->caps2 |= MMC_CAP2_CRYPTO;
1955 return 0;
1956 }
1957
sdhci_msm_ice_enable(struct sdhci_msm_host * msm_host)1958 static void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
1959 {
1960 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
1961 qcom_ice_enable(msm_host->ice);
1962 }
1963
sdhci_msm_ice_resume(struct sdhci_msm_host * msm_host)1964 static int sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
1965 {
1966 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
1967 return qcom_ice_resume(msm_host->ice);
1968
1969 return 0;
1970 }
1971
sdhci_msm_ice_suspend(struct sdhci_msm_host * msm_host)1972 static int sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
1973 {
1974 if (msm_host->mmc->caps2 & MMC_CAP2_CRYPTO)
1975 return qcom_ice_suspend(msm_host->ice);
1976
1977 return 0;
1978 }
1979
1980 static inline struct sdhci_msm_host *
sdhci_msm_host_from_crypto_profile(struct blk_crypto_profile * profile)1981 sdhci_msm_host_from_crypto_profile(struct blk_crypto_profile *profile)
1982 {
1983 struct mmc_host *mmc = mmc_from_crypto_profile(profile);
1984 struct sdhci_host *host = mmc_priv(mmc);
1985 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1986 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
1987
1988 return msm_host;
1989 }
1990
1991 /*
1992 * Program a key into a QC ICE keyslot. QC ICE requires a QC-specific SCM call
1993 * for this; it doesn't support the standard way.
1994 */
sdhci_msm_ice_keyslot_program(struct blk_crypto_profile * profile,const struct blk_crypto_key * key,unsigned int slot)1995 static int sdhci_msm_ice_keyslot_program(struct blk_crypto_profile *profile,
1996 const struct blk_crypto_key *key,
1997 unsigned int slot)
1998 {
1999 struct sdhci_msm_host *msm_host =
2000 sdhci_msm_host_from_crypto_profile(profile);
2001
2002 return qcom_ice_program_key(msm_host->ice, slot, key);
2003 }
2004
sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile * profile,const struct blk_crypto_key * key,unsigned int slot)2005 static int sdhci_msm_ice_keyslot_evict(struct blk_crypto_profile *profile,
2006 const struct blk_crypto_key *key,
2007 unsigned int slot)
2008 {
2009 struct sdhci_msm_host *msm_host =
2010 sdhci_msm_host_from_crypto_profile(profile);
2011
2012 return qcom_ice_evict_key(msm_host->ice, slot);
2013 }
2014
2015 static const struct blk_crypto_ll_ops sdhci_msm_crypto_ops = {
2016 .keyslot_program = sdhci_msm_ice_keyslot_program,
2017 .keyslot_evict = sdhci_msm_ice_keyslot_evict,
2018 };
2019
2020 #else /* CONFIG_MMC_CRYPTO */
2021
sdhci_msm_ice_init(struct sdhci_msm_host * msm_host,struct cqhci_host * cq_host)2022 static inline int sdhci_msm_ice_init(struct sdhci_msm_host *msm_host,
2023 struct cqhci_host *cq_host)
2024 {
2025 return 0;
2026 }
2027
sdhci_msm_ice_enable(struct sdhci_msm_host * msm_host)2028 static inline void sdhci_msm_ice_enable(struct sdhci_msm_host *msm_host)
2029 {
2030 }
2031
2032 static inline int
sdhci_msm_ice_resume(struct sdhci_msm_host * msm_host)2033 sdhci_msm_ice_resume(struct sdhci_msm_host *msm_host)
2034 {
2035 return 0;
2036 }
2037
2038 static inline int
sdhci_msm_ice_suspend(struct sdhci_msm_host * msm_host)2039 sdhci_msm_ice_suspend(struct sdhci_msm_host *msm_host)
2040 {
2041 return 0;
2042 }
2043 #endif /* !CONFIG_MMC_CRYPTO */
2044
2045 /*****************************************************************************\
2046 * *
2047 * MSM Command Queue Engine (CQE) *
2048 * *
2049 \*****************************************************************************/
2050
sdhci_msm_cqe_irq(struct sdhci_host * host,u32 intmask)2051 static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
2052 {
2053 int cmd_error = 0;
2054 int data_error = 0;
2055
2056 if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error))
2057 return intmask;
2058
2059 cqhci_irq(host->mmc, intmask, cmd_error, data_error);
2060 return 0;
2061 }
2062
sdhci_msm_cqe_enable(struct mmc_host * mmc)2063 static void sdhci_msm_cqe_enable(struct mmc_host *mmc)
2064 {
2065 struct sdhci_host *host = mmc_priv(mmc);
2066 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2067 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2068
2069 sdhci_cqe_enable(mmc);
2070 sdhci_msm_ice_enable(msm_host);
2071 }
2072
sdhci_msm_cqe_disable(struct mmc_host * mmc,bool recovery)2073 static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
2074 {
2075 struct sdhci_host *host = mmc_priv(mmc);
2076 unsigned long flags;
2077 u32 ctrl;
2078
2079 /*
2080 * When CQE is halted, the legacy SDHCI path operates only
2081 * on 16-byte descriptors in 64bit mode.
2082 */
2083 if (host->flags & SDHCI_USE_64_BIT_DMA)
2084 host->desc_sz = 16;
2085
2086 spin_lock_irqsave(&host->lock, flags);
2087
2088 /*
2089 * During CQE command transfers, command complete bit gets latched.
2090 * So s/w should clear command complete interrupt status when CQE is
2091 * either halted or disabled. Otherwise unexpected SDCHI legacy
2092 * interrupt gets triggered when CQE is halted/disabled.
2093 */
2094 ctrl = sdhci_readl(host, SDHCI_INT_ENABLE);
2095 ctrl |= SDHCI_INT_RESPONSE;
2096 sdhci_writel(host, ctrl, SDHCI_INT_ENABLE);
2097 sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
2098
2099 spin_unlock_irqrestore(&host->lock, flags);
2100
2101 sdhci_cqe_disable(mmc, recovery);
2102 }
2103
sdhci_msm_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)2104 static void sdhci_msm_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
2105 {
2106 u32 count, start = 15;
2107
2108 __sdhci_set_timeout(host, cmd);
2109 count = sdhci_readb(host, SDHCI_TIMEOUT_CONTROL);
2110 /*
2111 * Update software timeout value if its value is less than hardware data
2112 * timeout value. Qcom SoC hardware data timeout value was calculated
2113 * using 4 * MCLK * 2^(count + 13). where MCLK = 1 / host->clock.
2114 */
2115 if (cmd && cmd->data && host->clock > 400000 &&
2116 host->clock <= 50000000 &&
2117 ((1 << (count + start)) > (10 * host->clock)))
2118 host->data_timeout = 22LL * NSEC_PER_SEC;
2119 }
2120
2121 static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
2122 .enable = sdhci_msm_cqe_enable,
2123 .disable = sdhci_msm_cqe_disable,
2124 #ifdef CONFIG_MMC_CRYPTO
2125 .uses_custom_crypto_profile = true,
2126 #endif
2127 };
2128
sdhci_msm_cqe_add_host(struct sdhci_host * host,struct platform_device * pdev)2129 static int sdhci_msm_cqe_add_host(struct sdhci_host *host,
2130 struct platform_device *pdev)
2131 {
2132 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2133 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2134 struct cqhci_host *cq_host;
2135 bool dma64;
2136 u32 cqcfg;
2137 int ret;
2138
2139 /*
2140 * When CQE is halted, SDHC operates only on 16byte ADMA descriptors.
2141 * So ensure ADMA table is allocated for 16byte descriptors.
2142 */
2143 if (host->caps & SDHCI_CAN_64BIT)
2144 host->alloc_desc_sz = 16;
2145
2146 ret = sdhci_setup_host(host);
2147 if (ret)
2148 return ret;
2149
2150 cq_host = cqhci_pltfm_init(pdev);
2151 if (IS_ERR(cq_host)) {
2152 ret = PTR_ERR(cq_host);
2153 dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret);
2154 goto cleanup;
2155 }
2156
2157 msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
2158 cq_host->ops = &sdhci_msm_cqhci_ops;
2159
2160 dma64 = host->flags & SDHCI_USE_64_BIT_DMA;
2161
2162 ret = sdhci_msm_ice_init(msm_host, cq_host);
2163 if (ret)
2164 goto cleanup;
2165
2166 ret = cqhci_init(cq_host, host->mmc, dma64);
2167 if (ret) {
2168 dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n",
2169 mmc_hostname(host->mmc), ret);
2170 goto cleanup;
2171 }
2172
2173 /* Disable cqe reset due to cqe enable signal */
2174 cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1);
2175 cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN;
2176 cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1);
2177
2178 /*
2179 * SDHC expects 12byte ADMA descriptors till CQE is enabled.
2180 * So limit desc_sz to 12 so that the data commands that are sent
2181 * during card initialization (before CQE gets enabled) would
2182 * get executed without any issues.
2183 */
2184 if (host->flags & SDHCI_USE_64_BIT_DMA)
2185 host->desc_sz = 12;
2186
2187 ret = __sdhci_add_host(host);
2188 if (ret)
2189 goto cleanup;
2190
2191 dev_info(&pdev->dev, "%s: CQE init: success\n",
2192 mmc_hostname(host->mmc));
2193 return ret;
2194
2195 cleanup:
2196 sdhci_cleanup_host(host);
2197 return ret;
2198 }
2199
2200 /*
2201 * Platform specific register write functions. This is so that, if any
2202 * register write needs to be followed up by platform specific actions,
2203 * they can be added here. These functions can go to sleep when writes
2204 * to certain registers are done.
2205 * These functions are relying on sdhci_set_ios not using spinlock.
2206 */
__sdhci_msm_check_write(struct sdhci_host * host,u16 val,int reg)2207 static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg)
2208 {
2209 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2210 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2211 u32 req_type = 0;
2212
2213 switch (reg) {
2214 case SDHCI_HOST_CONTROL2:
2215 req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW :
2216 REQ_IO_HIGH;
2217 break;
2218 case SDHCI_SOFTWARE_RESET:
2219 if (host->pwr && (val & SDHCI_RESET_ALL))
2220 req_type = REQ_BUS_OFF;
2221 break;
2222 case SDHCI_POWER_CONTROL:
2223 req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON;
2224 break;
2225 case SDHCI_TRANSFER_MODE:
2226 msm_host->transfer_mode = val;
2227 break;
2228 case SDHCI_COMMAND:
2229 if (!msm_host->use_cdr)
2230 break;
2231 if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
2232 !mmc_op_tuning(SDHCI_GET_CMD(val)))
2233 sdhci_msm_set_cdr(host, true);
2234 else
2235 sdhci_msm_set_cdr(host, false);
2236 break;
2237 }
2238
2239 if (req_type) {
2240 msm_host->pwr_irq_flag = 0;
2241 /*
2242 * Since this register write may trigger a power irq, ensure
2243 * all previous register writes are complete by this point.
2244 */
2245 mb();
2246 }
2247 return req_type;
2248 }
2249
2250 /* This function may sleep*/
sdhci_msm_writew(struct sdhci_host * host,u16 val,int reg)2251 static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg)
2252 {
2253 u32 req_type = 0;
2254
2255 req_type = __sdhci_msm_check_write(host, val, reg);
2256 writew_relaxed(val, host->ioaddr + reg);
2257
2258 if (req_type)
2259 sdhci_msm_check_power_status(host, req_type);
2260 }
2261
2262 /* This function may sleep*/
sdhci_msm_writeb(struct sdhci_host * host,u8 val,int reg)2263 static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg)
2264 {
2265 u32 req_type = 0;
2266
2267 req_type = __sdhci_msm_check_write(host, val, reg);
2268
2269 writeb_relaxed(val, host->ioaddr + reg);
2270
2271 if (req_type)
2272 sdhci_msm_check_power_status(host, req_type);
2273 }
2274
sdhci_msm_set_regulator_caps(struct sdhci_msm_host * msm_host)2275 static void sdhci_msm_set_regulator_caps(struct sdhci_msm_host *msm_host)
2276 {
2277 struct mmc_host *mmc = msm_host->mmc;
2278 struct regulator *supply = mmc->supply.vqmmc;
2279 u32 caps = 0, config;
2280 struct sdhci_host *host = mmc_priv(mmc);
2281 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
2282
2283 if (!IS_ERR(mmc->supply.vqmmc)) {
2284 if (regulator_is_supported_voltage(supply, 1700000, 1950000))
2285 caps |= CORE_1_8V_SUPPORT;
2286 if (regulator_is_supported_voltage(supply, 2700000, 3600000))
2287 caps |= CORE_3_0V_SUPPORT;
2288
2289 if (!caps)
2290 pr_warn("%s: 1.8/3V not supported for vqmmc\n",
2291 mmc_hostname(mmc));
2292 }
2293
2294 if (caps) {
2295 /*
2296 * Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
2297 * bit can be used as required later on.
2298 */
2299 u32 io_level = msm_host->curr_io_level;
2300
2301 config = readl_relaxed(host->ioaddr +
2302 msm_offset->core_vendor_spec);
2303 config |= CORE_IO_PAD_PWR_SWITCH_EN;
2304
2305 if ((io_level & REQ_IO_HIGH) && (caps & CORE_3_0V_SUPPORT))
2306 config &= ~CORE_IO_PAD_PWR_SWITCH;
2307 else if ((io_level & REQ_IO_LOW) || (caps & CORE_1_8V_SUPPORT))
2308 config |= CORE_IO_PAD_PWR_SWITCH;
2309
2310 writel_relaxed(config,
2311 host->ioaddr + msm_offset->core_vendor_spec);
2312 }
2313 msm_host->caps_0 |= caps;
2314 pr_debug("%s: supported caps: 0x%08x\n", mmc_hostname(mmc), caps);
2315 }
2316
sdhci_msm_register_vreg(struct sdhci_msm_host * msm_host)2317 static int sdhci_msm_register_vreg(struct sdhci_msm_host *msm_host)
2318 {
2319 int ret;
2320
2321 ret = mmc_regulator_get_supply(msm_host->mmc);
2322 if (ret)
2323 return ret;
2324
2325 sdhci_msm_set_regulator_caps(msm_host);
2326
2327 return 0;
2328 }
2329
sdhci_msm_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2330 static int sdhci_msm_start_signal_voltage_switch(struct mmc_host *mmc,
2331 struct mmc_ios *ios)
2332 {
2333 struct sdhci_host *host = mmc_priv(mmc);
2334 u16 ctrl, status;
2335
2336 /*
2337 * Signal Voltage Switching is only applicable for Host Controllers
2338 * v3.00 and above.
2339 */
2340 if (host->version < SDHCI_SPEC_300)
2341 return 0;
2342
2343 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2344
2345 switch (ios->signal_voltage) {
2346 case MMC_SIGNAL_VOLTAGE_330:
2347 if (!(host->flags & SDHCI_SIGNALING_330))
2348 return -EINVAL;
2349
2350 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2351 ctrl &= ~SDHCI_CTRL_VDD_180;
2352 break;
2353 case MMC_SIGNAL_VOLTAGE_180:
2354 if (!(host->flags & SDHCI_SIGNALING_180))
2355 return -EINVAL;
2356
2357 /* Enable 1.8V Signal Enable in the Host Control2 register */
2358 ctrl |= SDHCI_CTRL_VDD_180;
2359 break;
2360
2361 default:
2362 return -EINVAL;
2363 }
2364
2365 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2366
2367 /* Wait for 5ms */
2368 usleep_range(5000, 5500);
2369
2370 /* regulator output should be stable within 5 ms */
2371 status = ctrl & SDHCI_CTRL_VDD_180;
2372 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2373 if ((ctrl & SDHCI_CTRL_VDD_180) == status)
2374 return 0;
2375
2376 dev_warn(mmc_dev(mmc), "%s: Regulator output did not became stable\n",
2377 mmc_hostname(mmc));
2378
2379 return -EAGAIN;
2380 }
2381
2382 #define DRIVER_NAME "sdhci_msm"
2383 #define SDHCI_MSM_DUMP(f, x...) \
2384 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
2385
sdhci_msm_dump_vendor_regs(struct sdhci_host * host)2386 static void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
2387 {
2388 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2389 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2390 const struct sdhci_msm_offset *msm_offset = msm_host->offset;
2391
2392 SDHCI_MSM_DUMP("----------- VENDOR REGISTER DUMP -----------\n");
2393
2394 SDHCI_MSM_DUMP(
2395 "DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x\n",
2396 readl_relaxed(host->ioaddr + msm_offset->core_dll_status),
2397 readl_relaxed(host->ioaddr + msm_offset->core_dll_config),
2398 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_2));
2399 SDHCI_MSM_DUMP(
2400 "DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x\n",
2401 readl_relaxed(host->ioaddr + msm_offset->core_dll_config_3),
2402 readl_relaxed(host->ioaddr + msm_offset->core_dll_usr_ctl),
2403 readl_relaxed(host->ioaddr + msm_offset->core_ddr_config));
2404 SDHCI_MSM_DUMP(
2405 "Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x\n",
2406 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec),
2407 readl_relaxed(host->ioaddr +
2408 msm_offset->core_vendor_spec_func2),
2409 readl_relaxed(host->ioaddr + msm_offset->core_vendor_spec3));
2410 }
2411
2412 static const struct sdhci_msm_variant_ops mci_var_ops = {
2413 .msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed,
2414 .msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed,
2415 };
2416
2417 static const struct sdhci_msm_variant_ops v5_var_ops = {
2418 .msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed,
2419 .msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed,
2420 };
2421
2422 static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
2423 .var_ops = &mci_var_ops,
2424 .offset = &sdhci_msm_mci_offset,
2425 };
2426
2427 static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
2428 .mci_removed = true,
2429 .var_ops = &v5_var_ops,
2430 .offset = &sdhci_msm_v5_offset,
2431 };
2432
2433 static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
2434 .mci_removed = true,
2435 .restore_dll_config = true,
2436 .var_ops = &v5_var_ops,
2437 .offset = &sdhci_msm_v5_offset,
2438 };
2439
2440 static const struct of_device_id sdhci_msm_dt_match[] = {
2441 /*
2442 * Do not add new variants to the driver which are compatible with
2443 * generic ones, unless they need customization.
2444 */
2445 {.compatible = "qcom,sdhci-msm-v4", .data = &sdhci_msm_mci_var},
2446 {.compatible = "qcom,sdhci-msm-v5", .data = &sdhci_msm_v5_var},
2447 {.compatible = "qcom,sdm670-sdhci", .data = &sdm845_sdhci_var},
2448 {.compatible = "qcom,sdm845-sdhci", .data = &sdm845_sdhci_var},
2449 {.compatible = "qcom,sc7180-sdhci", .data = &sdm845_sdhci_var},
2450 {},
2451 };
2452
2453 MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
2454
2455 static const struct sdhci_ops sdhci_msm_ops = {
2456 .reset = sdhci_and_cqhci_reset,
2457 .set_clock = sdhci_msm_set_clock,
2458 .get_min_clock = sdhci_msm_get_min_clock,
2459 .get_max_clock = sdhci_msm_get_max_clock,
2460 .set_bus_width = sdhci_set_bus_width,
2461 .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
2462 .write_w = sdhci_msm_writew,
2463 .write_b = sdhci_msm_writeb,
2464 .irq = sdhci_msm_cqe_irq,
2465 .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
2466 .set_power = sdhci_set_power_noreg,
2467 .set_timeout = sdhci_msm_set_timeout,
2468 };
2469
2470 static const struct sdhci_pltfm_data sdhci_msm_pdata = {
2471 .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
2472 SDHCI_QUIRK_SINGLE_POWER_WRITE |
2473 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
2474 SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
2475
2476 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
2477 .ops = &sdhci_msm_ops,
2478 };
2479
sdhci_msm_get_of_property(struct platform_device * pdev,struct sdhci_host * host)2480 static inline void sdhci_msm_get_of_property(struct platform_device *pdev,
2481 struct sdhci_host *host)
2482 {
2483 struct device_node *node = pdev->dev.of_node;
2484 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2485 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2486
2487 if (of_property_read_u32(node, "qcom,ddr-config",
2488 &msm_host->ddr_config))
2489 msm_host->ddr_config = DDR_CONFIG_POR_VAL;
2490
2491 of_property_read_u32(node, "qcom,dll-config", &msm_host->dll_config);
2492
2493 if (of_device_is_compatible(node, "qcom,msm8916-sdhci"))
2494 host->quirks2 |= SDHCI_QUIRK2_BROKEN_64_BIT_DMA;
2495 }
2496
sdhci_msm_gcc_reset(struct device * dev,struct sdhci_host * host)2497 static int sdhci_msm_gcc_reset(struct device *dev, struct sdhci_host *host)
2498 {
2499 struct reset_control *reset;
2500 int ret = 0;
2501
2502 reset = reset_control_get_optional_exclusive(dev, NULL);
2503 if (IS_ERR(reset))
2504 return dev_err_probe(dev, PTR_ERR(reset),
2505 "unable to acquire core_reset\n");
2506
2507 if (!reset)
2508 return ret;
2509
2510 ret = reset_control_assert(reset);
2511 if (ret) {
2512 reset_control_put(reset);
2513 return dev_err_probe(dev, ret, "core_reset assert failed\n");
2514 }
2515
2516 /*
2517 * The hardware requirement for delay between assert/deassert
2518 * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
2519 * ~125us (4/32768). To be on the safe side add 200us delay.
2520 */
2521 usleep_range(200, 210);
2522
2523 ret = reset_control_deassert(reset);
2524 if (ret) {
2525 reset_control_put(reset);
2526 return dev_err_probe(dev, ret, "core_reset deassert failed\n");
2527 }
2528
2529 usleep_range(200, 210);
2530 reset_control_put(reset);
2531
2532 return ret;
2533 }
2534
sdhci_msm_probe(struct platform_device * pdev)2535 static int sdhci_msm_probe(struct platform_device *pdev)
2536 {
2537 struct sdhci_host *host;
2538 struct sdhci_pltfm_host *pltfm_host;
2539 struct sdhci_msm_host *msm_host;
2540 struct clk *clk;
2541 int ret;
2542 u16 host_version, core_minor;
2543 u32 core_version, config;
2544 u8 core_major;
2545 const struct sdhci_msm_offset *msm_offset;
2546 const struct sdhci_msm_variant_info *var_info;
2547 struct device_node *node = pdev->dev.of_node;
2548
2549 host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host));
2550 if (IS_ERR(host))
2551 return PTR_ERR(host);
2552
2553 host->sdma_boundary = 0;
2554 pltfm_host = sdhci_priv(host);
2555 msm_host = sdhci_pltfm_priv(pltfm_host);
2556 msm_host->mmc = host->mmc;
2557 msm_host->pdev = pdev;
2558
2559 ret = mmc_of_parse(host->mmc);
2560 if (ret)
2561 return ret;
2562
2563 /*
2564 * Based on the compatible string, load the required msm host info from
2565 * the data associated with the version info.
2566 */
2567 var_info = of_device_get_match_data(&pdev->dev);
2568
2569 msm_host->mci_removed = var_info->mci_removed;
2570 msm_host->restore_dll_config = var_info->restore_dll_config;
2571 msm_host->var_ops = var_info->var_ops;
2572 msm_host->offset = var_info->offset;
2573
2574 msm_offset = msm_host->offset;
2575
2576 sdhci_get_of_property(pdev);
2577 sdhci_msm_get_of_property(pdev, host);
2578
2579 msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
2580
2581 ret = sdhci_msm_gcc_reset(&pdev->dev, host);
2582 if (ret)
2583 return ret;
2584
2585 /* Setup SDCC bus voter clock. */
2586 msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
2587 if (!IS_ERR(msm_host->bus_clk)) {
2588 /* Vote for max. clk rate for max. performance */
2589 ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
2590 if (ret)
2591 return ret;
2592 ret = clk_prepare_enable(msm_host->bus_clk);
2593 if (ret)
2594 return ret;
2595 }
2596
2597 /* Setup main peripheral bus clock */
2598 clk = devm_clk_get(&pdev->dev, "iface");
2599 if (IS_ERR(clk)) {
2600 ret = PTR_ERR(clk);
2601 dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret);
2602 goto bus_clk_disable;
2603 }
2604 msm_host->bulk_clks[1].clk = clk;
2605
2606 /* Setup SDC MMC clock */
2607 clk = devm_clk_get(&pdev->dev, "core");
2608 if (IS_ERR(clk)) {
2609 ret = PTR_ERR(clk);
2610 dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
2611 goto bus_clk_disable;
2612 }
2613 msm_host->bulk_clks[0].clk = clk;
2614
2615 /* Check for optional interconnect paths */
2616 ret = dev_pm_opp_of_find_icc_paths(&pdev->dev, NULL);
2617 if (ret)
2618 goto bus_clk_disable;
2619
2620 ret = devm_pm_opp_set_clkname(&pdev->dev, "core");
2621 if (ret)
2622 goto bus_clk_disable;
2623
2624 /* OPP table is optional */
2625 ret = devm_pm_opp_of_add_table(&pdev->dev);
2626 if (ret && ret != -ENODEV) {
2627 dev_err(&pdev->dev, "Invalid OPP table in Device tree\n");
2628 goto bus_clk_disable;
2629 }
2630
2631 /* Vote for maximum clock rate for maximum performance */
2632 ret = dev_pm_opp_set_rate(&pdev->dev, INT_MAX);
2633 if (ret)
2634 dev_warn(&pdev->dev, "core clock boost failed\n");
2635
2636 clk = devm_clk_get(&pdev->dev, "cal");
2637 if (IS_ERR(clk))
2638 clk = NULL;
2639 msm_host->bulk_clks[2].clk = clk;
2640
2641 clk = devm_clk_get(&pdev->dev, "sleep");
2642 if (IS_ERR(clk))
2643 clk = NULL;
2644 msm_host->bulk_clks[3].clk = clk;
2645
2646 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2647 msm_host->bulk_clks);
2648 if (ret)
2649 goto bus_clk_disable;
2650
2651 /*
2652 * xo clock is needed for FLL feature of cm_dll.
2653 * In case if xo clock is not mentioned in DT, warn and proceed.
2654 */
2655 msm_host->xo_clk = devm_clk_get(&pdev->dev, "xo");
2656 if (IS_ERR(msm_host->xo_clk)) {
2657 ret = PTR_ERR(msm_host->xo_clk);
2658 dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret);
2659 }
2660
2661 if (!msm_host->mci_removed) {
2662 msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1);
2663 if (IS_ERR(msm_host->core_mem)) {
2664 ret = PTR_ERR(msm_host->core_mem);
2665 goto clk_disable;
2666 }
2667 }
2668
2669 /* Reset the vendor spec register to power on reset state */
2670 writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
2671 host->ioaddr + msm_offset->core_vendor_spec);
2672
2673 if (!msm_host->mci_removed) {
2674 /* Set HC_MODE_EN bit in HC_MODE register */
2675 msm_host_writel(msm_host, HC_MODE_EN, host,
2676 msm_offset->core_hc_mode);
2677 config = msm_host_readl(msm_host, host,
2678 msm_offset->core_hc_mode);
2679 config |= FF_CLK_SW_RST_DIS;
2680 msm_host_writel(msm_host, config, host,
2681 msm_offset->core_hc_mode);
2682 }
2683
2684 host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
2685 dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
2686 host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
2687 SDHCI_VENDOR_VER_SHIFT));
2688
2689 core_version = msm_host_readl(msm_host, host,
2690 msm_offset->core_mci_version);
2691 core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
2692 CORE_VERSION_MAJOR_SHIFT;
2693 core_minor = core_version & CORE_VERSION_MINOR_MASK;
2694 dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
2695 core_version, core_major, core_minor);
2696
2697 if (core_major == 1 && core_minor >= 0x42)
2698 msm_host->use_14lpp_dll_reset = true;
2699
2700 /*
2701 * SDCC 5 controller with major version 1, minor version 0x34 and later
2702 * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
2703 */
2704 if (core_major == 1 && core_minor < 0x34)
2705 msm_host->use_cdclp533 = true;
2706
2707 /*
2708 * Support for some capabilities is not advertised by newer
2709 * controller versions and must be explicitly enabled.
2710 */
2711 if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
2712 config = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
2713 config |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
2714 writel_relaxed(config, host->ioaddr +
2715 msm_offset->core_vendor_spec_capabilities0);
2716 }
2717
2718 if (core_major == 1 && core_minor >= 0x49)
2719 msm_host->updated_ddr_cfg = true;
2720
2721 if (core_major == 1 && core_minor >= 0x71)
2722 msm_host->uses_tassadar_dll = true;
2723
2724 ret = sdhci_msm_register_vreg(msm_host);
2725 if (ret)
2726 goto clk_disable;
2727
2728 /*
2729 * Power on reset state may trigger power irq if previous status of
2730 * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq
2731 * interrupt in GIC, any pending power irq interrupt should be
2732 * acknowledged. Otherwise power irq interrupt handler would be
2733 * fired prematurely.
2734 */
2735 sdhci_msm_handle_pwr_irq(host, 0);
2736
2737 /*
2738 * Ensure that above writes are propagated before interrupt enablement
2739 * in GIC.
2740 */
2741 mb();
2742
2743 /* Setup IRQ for handling power/voltage tasks with PMIC */
2744 msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
2745 if (msm_host->pwr_irq < 0) {
2746 ret = msm_host->pwr_irq;
2747 goto clk_disable;
2748 }
2749
2750 sdhci_msm_init_pwr_irq_wait(msm_host);
2751 /* Enable pwr irq interrupts */
2752 msm_host_writel(msm_host, INT_MASK, host,
2753 msm_offset->core_pwrctl_mask);
2754
2755 ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
2756 sdhci_msm_pwr_irq, IRQF_ONESHOT,
2757 dev_name(&pdev->dev), host);
2758 if (ret) {
2759 dev_err(&pdev->dev, "Request IRQ failed (%d)\n", ret);
2760 goto clk_disable;
2761 }
2762
2763 msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
2764
2765 /* Set the timeout value to max possible */
2766 host->max_timeout_count = 0xF;
2767
2768 pm_runtime_get_noresume(&pdev->dev);
2769 pm_runtime_set_active(&pdev->dev);
2770 pm_runtime_enable(&pdev->dev);
2771 pm_runtime_set_autosuspend_delay(&pdev->dev,
2772 MSM_MMC_AUTOSUSPEND_DELAY_MS);
2773 pm_runtime_use_autosuspend(&pdev->dev);
2774
2775 host->mmc_host_ops.start_signal_voltage_switch =
2776 sdhci_msm_start_signal_voltage_switch;
2777 host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning;
2778 if (of_property_read_bool(node, "supports-cqe"))
2779 ret = sdhci_msm_cqe_add_host(host, pdev);
2780 else
2781 ret = sdhci_add_host(host);
2782 if (ret)
2783 goto pm_runtime_disable;
2784
2785 pm_runtime_put_autosuspend(&pdev->dev);
2786
2787 return 0;
2788
2789 pm_runtime_disable:
2790 pm_runtime_disable(&pdev->dev);
2791 pm_runtime_set_suspended(&pdev->dev);
2792 pm_runtime_put_noidle(&pdev->dev);
2793 clk_disable:
2794 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2795 msm_host->bulk_clks);
2796 bus_clk_disable:
2797 if (!IS_ERR(msm_host->bus_clk))
2798 clk_disable_unprepare(msm_host->bus_clk);
2799 return ret;
2800 }
2801
sdhci_msm_remove(struct platform_device * pdev)2802 static void sdhci_msm_remove(struct platform_device *pdev)
2803 {
2804 struct sdhci_host *host = platform_get_drvdata(pdev);
2805 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2806 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2807 int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
2808 0xffffffff);
2809
2810 sdhci_remove_host(host, dead);
2811
2812 pm_runtime_get_sync(&pdev->dev);
2813 pm_runtime_disable(&pdev->dev);
2814 pm_runtime_put_noidle(&pdev->dev);
2815
2816 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2817 msm_host->bulk_clks);
2818 if (!IS_ERR(msm_host->bus_clk))
2819 clk_disable_unprepare(msm_host->bus_clk);
2820 }
2821
sdhci_msm_runtime_suspend(struct device * dev)2822 static int sdhci_msm_runtime_suspend(struct device *dev)
2823 {
2824 struct sdhci_host *host = dev_get_drvdata(dev);
2825 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2826 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2827 unsigned long flags;
2828
2829 spin_lock_irqsave(&host->lock, flags);
2830 host->runtime_suspended = true;
2831 spin_unlock_irqrestore(&host->lock, flags);
2832
2833 /* Drop the performance vote */
2834 dev_pm_opp_set_rate(dev, 0);
2835 clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks),
2836 msm_host->bulk_clks);
2837
2838 return sdhci_msm_ice_suspend(msm_host);
2839 }
2840
sdhci_msm_runtime_resume(struct device * dev)2841 static int sdhci_msm_runtime_resume(struct device *dev)
2842 {
2843 struct sdhci_host *host = dev_get_drvdata(dev);
2844 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2845 struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
2846 unsigned long flags;
2847 int ret;
2848
2849 ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks),
2850 msm_host->bulk_clks);
2851 if (ret)
2852 return ret;
2853 /*
2854 * Whenever core-clock is gated dynamically, it's needed to
2855 * restore the SDR DLL settings when the clock is ungated.
2856 */
2857 if (msm_host->restore_dll_config && msm_host->clk_rate) {
2858 ret = sdhci_msm_restore_sdr_dll_config(host);
2859 if (ret)
2860 return ret;
2861 }
2862
2863 dev_pm_opp_set_rate(dev, msm_host->clk_rate);
2864
2865 ret = sdhci_msm_ice_resume(msm_host);
2866 if (ret)
2867 return ret;
2868
2869 spin_lock_irqsave(&host->lock, flags);
2870 host->runtime_suspended = false;
2871 spin_unlock_irqrestore(&host->lock, flags);
2872
2873 return ret;
2874 }
2875
2876 static const struct dev_pm_ops sdhci_msm_pm_ops = {
2877 SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
2878 RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume, NULL)
2879 };
2880
2881 static struct platform_driver sdhci_msm_driver = {
2882 .probe = sdhci_msm_probe,
2883 .remove = sdhci_msm_remove,
2884 .driver = {
2885 .name = "sdhci_msm",
2886 .of_match_table = sdhci_msm_dt_match,
2887 .pm = pm_ptr(&sdhci_msm_pm_ops),
2888 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2889 },
2890 };
2891
2892 module_platform_driver(sdhci_msm_driver);
2893
2894 MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
2895 MODULE_LICENSE("GPL v2");
2896