xref: /linux/drivers/ufs/host/ufs-mediatek.c (revision 0074281bb6316108e0cff094bd4db78ab3eee236)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2019 MediaTek Inc.
4  * Authors:
5  *	Stanley Chu <stanley.chu@mediatek.com>
6  *	Peter Wang <peter.wang@mediatek.com>
7  */
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/delay.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_platform.h>
18 #include <linux/phy/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/regulator/consumer.h>
21 #include <linux/reset.h>
22 
23 #include <ufs/ufshcd.h>
24 #include "ufshcd-pltfrm.h"
25 #include <ufs/ufs_quirks.h>
26 #include <ufs/unipro.h>
27 
28 #include "ufs-mediatek.h"
29 #include "ufs-mediatek-sip.h"
30 
31 static int  ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
32 
33 #define CREATE_TRACE_POINTS
34 #include "ufs-mediatek-trace.h"
35 #undef CREATE_TRACE_POINTS
36 
37 #define MAX_SUPP_MAC 64
38 #define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
39 
40 static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
41 	{ .wmanufacturerid = UFS_ANY_VENDOR,
42 	  .model = UFS_ANY_MODEL,
43 	  .quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
44 		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
45 	{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
46 	  .model = "H9HQ21AFAMZDAR",
47 	  .quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
48 	{}
49 };
50 
51 static const struct of_device_id ufs_mtk_of_match[] = {
52 	{ .compatible = "mediatek,mt8183-ufshci" },
53 	{ .compatible = "mediatek,mt8195-ufshci" },
54 	{},
55 };
56 MODULE_DEVICE_TABLE(of, ufs_mtk_of_match);
57 
58 /*
59  * Details of UIC Errors
60  */
61 static const char *const ufs_uic_err_str[] = {
62 	"PHY Adapter Layer",
63 	"Data Link Layer",
64 	"Network Link Layer",
65 	"Transport Link Layer",
66 	"DME"
67 };
68 
69 static const char *const ufs_uic_pa_err_str[] = {
70 	"PHY error on Lane 0",
71 	"PHY error on Lane 1",
72 	"PHY error on Lane 2",
73 	"PHY error on Lane 3",
74 	"Generic PHY Adapter Error. This should be the LINERESET indication"
75 };
76 
77 static const char *const ufs_uic_dl_err_str[] = {
78 	"NAC_RECEIVED",
79 	"TCx_REPLAY_TIMER_EXPIRED",
80 	"AFCx_REQUEST_TIMER_EXPIRED",
81 	"FCx_PROTECTION_TIMER_EXPIRED",
82 	"CRC_ERROR",
83 	"RX_BUFFER_OVERFLOW",
84 	"MAX_FRAME_LENGTH_EXCEEDED",
85 	"WRONG_SEQUENCE_NUMBER",
86 	"AFC_FRAME_SYNTAX_ERROR",
87 	"NAC_FRAME_SYNTAX_ERROR",
88 	"EOF_SYNTAX_ERROR",
89 	"FRAME_SYNTAX_ERROR",
90 	"BAD_CTRL_SYMBOL_TYPE",
91 	"PA_INIT_ERROR",
92 	"PA_ERROR_IND_RECEIVED",
93 	"PA_INIT"
94 };
95 
ufs_mtk_is_boost_crypt_enabled(struct ufs_hba * hba)96 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
97 {
98 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
99 
100 	return host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
101 }
102 
ufs_mtk_is_va09_supported(struct ufs_hba * hba)103 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
104 {
105 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
106 
107 	return host->caps & UFS_MTK_CAP_VA09_PWR_CTRL;
108 }
109 
ufs_mtk_is_broken_vcc(struct ufs_hba * hba)110 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
111 {
112 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
113 
114 	return host->caps & UFS_MTK_CAP_BROKEN_VCC;
115 }
116 
ufs_mtk_is_pmc_via_fastauto(struct ufs_hba * hba)117 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
118 {
119 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
120 
121 	return host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO;
122 }
123 
ufs_mtk_is_tx_skew_fix(struct ufs_hba * hba)124 static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba)
125 {
126 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
127 
128 	return host->caps & UFS_MTK_CAP_TX_SKEW_FIX;
129 }
130 
ufs_mtk_is_rtff_mtcmos(struct ufs_hba * hba)131 static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba)
132 {
133 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
134 
135 	return host->caps & UFS_MTK_CAP_RTFF_MTCMOS;
136 }
137 
ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba * hba)138 static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba)
139 {
140 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
141 
142 	return host->caps & UFS_MTK_CAP_ALLOW_VCCQX_LPM;
143 }
144 
ufs_mtk_is_clk_scale_ready(struct ufs_hba * hba)145 static bool ufs_mtk_is_clk_scale_ready(struct ufs_hba *hba)
146 {
147 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
148 	struct ufs_mtk_clk *mclk = &host->mclk;
149 
150 	return mclk->ufs_sel_clki &&
151 		mclk->ufs_sel_max_clki &&
152 		mclk->ufs_sel_min_clki;
153 }
154 
ufs_mtk_cfg_unipro_cg(struct ufs_hba * hba,bool enable)155 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
156 {
157 	u32 tmp;
158 
159 	if (enable) {
160 		ufshcd_dme_get(hba,
161 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
162 		tmp = tmp |
163 		      (1 << RX_SYMBOL_CLK_GATE_EN) |
164 		      (1 << SYS_CLK_GATE_EN) |
165 		      (1 << TX_CLK_GATE_EN);
166 		ufshcd_dme_set(hba,
167 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
168 
169 		ufshcd_dme_get(hba,
170 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
171 		tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
172 		ufshcd_dme_set(hba,
173 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
174 	} else {
175 		ufshcd_dme_get(hba,
176 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
177 		tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
178 			      (1 << SYS_CLK_GATE_EN) |
179 			      (1 << TX_CLK_GATE_EN));
180 		ufshcd_dme_set(hba,
181 			       UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
182 
183 		ufshcd_dme_get(hba,
184 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
185 		tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
186 		ufshcd_dme_set(hba,
187 			       UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
188 	}
189 }
190 
ufs_mtk_crypto_enable(struct ufs_hba * hba)191 static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
192 {
193 	struct arm_smccc_res res;
194 
195 	ufs_mtk_crypto_ctrl(res, 1);
196 	if (res.a0) {
197 		dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
198 			 __func__, res.a0);
199 		hba->caps &= ~UFSHCD_CAP_CRYPTO;
200 	}
201 }
202 
ufs_mtk_host_reset(struct ufs_hba * hba)203 static void ufs_mtk_host_reset(struct ufs_hba *hba)
204 {
205 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
206 	struct arm_smccc_res res;
207 
208 	reset_control_assert(host->hci_reset);
209 	reset_control_assert(host->crypto_reset);
210 	reset_control_assert(host->unipro_reset);
211 	reset_control_assert(host->mphy_reset);
212 
213 	usleep_range(100, 110);
214 
215 	reset_control_deassert(host->unipro_reset);
216 	reset_control_deassert(host->crypto_reset);
217 	reset_control_deassert(host->hci_reset);
218 	reset_control_deassert(host->mphy_reset);
219 
220 	/* restore mphy setting aftre mphy reset */
221 	if (host->mphy_reset)
222 		ufs_mtk_mphy_ctrl(UFS_MPHY_RESTORE, res);
223 }
224 
ufs_mtk_init_reset_control(struct ufs_hba * hba,struct reset_control ** rc,char * str)225 static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
226 				       struct reset_control **rc,
227 				       char *str)
228 {
229 	*rc = devm_reset_control_get(hba->dev, str);
230 	if (IS_ERR(*rc)) {
231 		dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
232 			 str, PTR_ERR(*rc));
233 		*rc = NULL;
234 	}
235 }
236 
ufs_mtk_init_reset(struct ufs_hba * hba)237 static void ufs_mtk_init_reset(struct ufs_hba *hba)
238 {
239 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
240 
241 	ufs_mtk_init_reset_control(hba, &host->hci_reset,
242 				   "hci_rst");
243 	ufs_mtk_init_reset_control(hba, &host->unipro_reset,
244 				   "unipro_rst");
245 	ufs_mtk_init_reset_control(hba, &host->crypto_reset,
246 				   "crypto_rst");
247 	ufs_mtk_init_reset_control(hba, &host->mphy_reset,
248 				   "mphy_rst");
249 }
250 
ufs_mtk_hce_enable_notify(struct ufs_hba * hba,enum ufs_notify_change_status status)251 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
252 				     enum ufs_notify_change_status status)
253 {
254 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
255 
256 	if (status == PRE_CHANGE) {
257 		if (host->unipro_lpm) {
258 			hba->vps->hba_enable_delay_us = 0;
259 		} else {
260 			hba->vps->hba_enable_delay_us = 600;
261 			ufs_mtk_host_reset(hba);
262 		}
263 
264 		if (hba->caps & UFSHCD_CAP_CRYPTO)
265 			ufs_mtk_crypto_enable(hba);
266 
267 		if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
268 			ufshcd_writel(hba, 0,
269 				      REG_AUTO_HIBERNATE_IDLE_TIMER);
270 			hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
271 			hba->ahit = 0;
272 		}
273 
274 		/*
275 		 * Turn on CLK_CG early to bypass abnormal ERR_CHK signal
276 		 * to prevent host hang issue
277 		 */
278 		ufshcd_writel(hba,
279 			      ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
280 			      REG_UFS_XOUFS_CTRL);
281 
282 		/* DDR_EN setting */
283 		if (host->ip_ver >= IP_VER_MT6989) {
284 			ufshcd_rmwl(hba, UFS_MASK(0x7FFF, 8),
285 				0x453000, REG_UFS_MMIO_OPT_CTRL_0);
286 		}
287 
288 	}
289 
290 	return 0;
291 }
292 
ufs_mtk_bind_mphy(struct ufs_hba * hba)293 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
294 {
295 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
296 	struct device *dev = hba->dev;
297 	struct device_node *np = dev->of_node;
298 	int err = 0;
299 
300 	host->mphy = devm_of_phy_get_by_index(dev, np, 0);
301 
302 	if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
303 		/*
304 		 * UFS driver might be probed before the phy driver does.
305 		 * In that case we would like to return EPROBE_DEFER code.
306 		 */
307 		err = -EPROBE_DEFER;
308 		dev_info(dev,
309 			 "%s: required phy hasn't probed yet. err = %d\n",
310 			__func__, err);
311 	} else if (IS_ERR(host->mphy)) {
312 		err = PTR_ERR(host->mphy);
313 		if (err != -ENODEV) {
314 			dev_info(dev, "%s: PHY get failed %d\n", __func__,
315 				 err);
316 		}
317 	}
318 
319 	if (err)
320 		host->mphy = NULL;
321 	/*
322 	 * Allow unbound mphy because not every platform needs specific
323 	 * mphy control.
324 	 */
325 	if (err == -ENODEV)
326 		err = 0;
327 
328 	return err;
329 }
330 
ufs_mtk_setup_ref_clk(struct ufs_hba * hba,bool on)331 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
332 {
333 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
334 	struct arm_smccc_res res;
335 	ktime_t timeout, time_checked;
336 	u32 value;
337 
338 	if (host->ref_clk_enabled == on)
339 		return 0;
340 
341 	ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
342 
343 	if (on) {
344 		ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
345 	} else {
346 		ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
347 		ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
348 	}
349 
350 	/* Wait for ack */
351 	timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
352 	do {
353 		time_checked = ktime_get();
354 		value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
355 
356 		/* Wait until ack bit equals to req bit */
357 		if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
358 			goto out;
359 
360 		usleep_range(100, 200);
361 	} while (ktime_before(time_checked, timeout));
362 
363 	dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
364 
365 	/*
366 	 * If clock on timeout, assume clock is off, notify tfa do clock
367 	 * off setting.(keep DIFN disable, release resource)
368 	 * If clock off timeout, assume clock will off finally,
369 	 * set ref_clk_enabled directly.(keep DIFN disable, keep resource)
370 	 */
371 	if (on)
372 		ufs_mtk_ref_clk_notify(false, POST_CHANGE, res);
373 	else
374 		host->ref_clk_enabled = false;
375 
376 	return -ETIMEDOUT;
377 
378 out:
379 	host->ref_clk_enabled = on;
380 	if (on)
381 		ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
382 
383 	ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
384 
385 	return 0;
386 }
387 
ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba * hba,u16 gating_us)388 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
389 					  u16 gating_us)
390 {
391 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
392 
393 	if (hba->dev_info.clk_gating_wait_us) {
394 		host->ref_clk_gating_wait_us =
395 			hba->dev_info.clk_gating_wait_us;
396 	} else {
397 		host->ref_clk_gating_wait_us = gating_us;
398 	}
399 
400 	host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
401 }
402 
ufs_mtk_dbg_sel(struct ufs_hba * hba)403 static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
404 {
405 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
406 
407 	if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
408 		ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
409 		ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
410 		ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
411 		ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
412 		ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
413 	} else {
414 		ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
415 	}
416 }
417 
ufs_mtk_wait_idle_state(struct ufs_hba * hba,unsigned long retry_ms)418 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
419 			    unsigned long retry_ms)
420 {
421 	u64 timeout, time_checked;
422 	u32 val, sm;
423 	bool wait_idle;
424 
425 	/* cannot use plain ktime_get() in suspend */
426 	timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
427 
428 	/* wait a specific time after check base */
429 	udelay(10);
430 	wait_idle = false;
431 
432 	do {
433 		time_checked = ktime_get_mono_fast_ns();
434 		ufs_mtk_dbg_sel(hba);
435 		val = ufshcd_readl(hba, REG_UFS_PROBE);
436 
437 		sm = val & 0x1f;
438 
439 		/*
440 		 * if state is in H8 enter and H8 enter confirm
441 		 * wait until return to idle state.
442 		 */
443 		if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
444 			wait_idle = true;
445 			udelay(50);
446 			continue;
447 		} else if (!wait_idle)
448 			break;
449 
450 		if (wait_idle && (sm == VS_HCE_BASE))
451 			break;
452 	} while (time_checked < timeout);
453 
454 	if (wait_idle && sm != VS_HCE_BASE)
455 		dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
456 }
457 
ufs_mtk_wait_link_state(struct ufs_hba * hba,u32 state,unsigned long max_wait_ms)458 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
459 				   unsigned long max_wait_ms)
460 {
461 	ktime_t timeout, time_checked;
462 	u32 val;
463 
464 	timeout = ktime_add_ms(ktime_get(), max_wait_ms);
465 	do {
466 		time_checked = ktime_get();
467 		ufs_mtk_dbg_sel(hba);
468 		val = ufshcd_readl(hba, REG_UFS_PROBE);
469 		val = val >> 28;
470 
471 		if (val == state)
472 			return 0;
473 
474 		/* Sleep for max. 200us */
475 		usleep_range(100, 200);
476 	} while (ktime_before(time_checked, timeout));
477 
478 	return -ETIMEDOUT;
479 }
480 
ufs_mtk_mphy_power_on(struct ufs_hba * hba,bool on)481 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
482 {
483 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
484 	struct phy *mphy = host->mphy;
485 	struct arm_smccc_res res;
486 	int ret = 0;
487 
488 	if (!mphy || !(on ^ host->mphy_powered_on))
489 		return 0;
490 
491 	if (on) {
492 		if (ufs_mtk_is_va09_supported(hba)) {
493 			ret = regulator_enable(host->reg_va09);
494 			if (ret < 0)
495 				goto out;
496 			/* wait 200 us to stablize VA09 */
497 			usleep_range(200, 210);
498 			ufs_mtk_va09_pwr_ctrl(res, 1);
499 		}
500 		phy_power_on(mphy);
501 	} else {
502 		phy_power_off(mphy);
503 		if (ufs_mtk_is_va09_supported(hba)) {
504 			ufs_mtk_va09_pwr_ctrl(res, 0);
505 			ret = regulator_disable(host->reg_va09);
506 		}
507 	}
508 out:
509 	if (ret) {
510 		dev_info(hba->dev,
511 			 "failed to %s va09: %d\n",
512 			 on ? "enable" : "disable",
513 			 ret);
514 	} else {
515 		host->mphy_powered_on = on;
516 	}
517 
518 	return ret;
519 }
520 
ufs_mtk_get_host_clk(struct device * dev,const char * name,struct clk ** clk_out)521 static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
522 				struct clk **clk_out)
523 {
524 	struct clk *clk;
525 	int err = 0;
526 
527 	clk = devm_clk_get(dev, name);
528 	if (IS_ERR(clk))
529 		err = PTR_ERR(clk);
530 	else
531 		*clk_out = clk;
532 
533 	return err;
534 }
535 
ufs_mtk_boost_crypt(struct ufs_hba * hba,bool boost)536 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
537 {
538 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
539 	struct ufs_mtk_crypt_cfg *cfg;
540 	struct regulator *reg;
541 	int volt, ret;
542 
543 	if (!ufs_mtk_is_boost_crypt_enabled(hba))
544 		return;
545 
546 	cfg = host->crypt;
547 	volt = cfg->vcore_volt;
548 	reg = cfg->reg_vcore;
549 
550 	ret = clk_prepare_enable(cfg->clk_crypt_mux);
551 	if (ret) {
552 		dev_info(hba->dev, "clk_prepare_enable(): %d\n",
553 			 ret);
554 		return;
555 	}
556 
557 	if (boost) {
558 		ret = regulator_set_voltage(reg, volt, INT_MAX);
559 		if (ret) {
560 			dev_info(hba->dev,
561 				 "failed to set vcore to %d\n", volt);
562 			goto out;
563 		}
564 
565 		ret = clk_set_parent(cfg->clk_crypt_mux,
566 				     cfg->clk_crypt_perf);
567 		if (ret) {
568 			dev_info(hba->dev,
569 				 "failed to set clk_crypt_perf\n");
570 			regulator_set_voltage(reg, 0, INT_MAX);
571 			goto out;
572 		}
573 	} else {
574 		ret = clk_set_parent(cfg->clk_crypt_mux,
575 				     cfg->clk_crypt_lp);
576 		if (ret) {
577 			dev_info(hba->dev,
578 				 "failed to set clk_crypt_lp\n");
579 			goto out;
580 		}
581 
582 		ret = regulator_set_voltage(reg, 0, INT_MAX);
583 		if (ret) {
584 			dev_info(hba->dev,
585 				 "failed to set vcore to MIN\n");
586 		}
587 	}
588 out:
589 	clk_disable_unprepare(cfg->clk_crypt_mux);
590 }
591 
ufs_mtk_init_host_clk(struct ufs_hba * hba,const char * name,struct clk ** clk)592 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
593 				 struct clk **clk)
594 {
595 	int ret;
596 
597 	ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
598 	if (ret) {
599 		dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
600 			 name, ret);
601 	}
602 
603 	return ret;
604 }
605 
ufs_mtk_init_boost_crypt(struct ufs_hba * hba)606 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
607 {
608 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
609 	struct ufs_mtk_crypt_cfg *cfg;
610 	struct device *dev = hba->dev;
611 	struct regulator *reg;
612 	u32 volt;
613 
614 	host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
615 				   GFP_KERNEL);
616 	if (!host->crypt)
617 		goto disable_caps;
618 
619 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
620 	if (IS_ERR(reg)) {
621 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
622 			 PTR_ERR(reg));
623 		goto disable_caps;
624 	}
625 
626 	if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
627 				 &volt)) {
628 		dev_info(dev, "failed to get boost-crypt-vcore-min");
629 		goto disable_caps;
630 	}
631 
632 	cfg = host->crypt;
633 	if (ufs_mtk_init_host_clk(hba, "crypt_mux",
634 				  &cfg->clk_crypt_mux))
635 		goto disable_caps;
636 
637 	if (ufs_mtk_init_host_clk(hba, "crypt_lp",
638 				  &cfg->clk_crypt_lp))
639 		goto disable_caps;
640 
641 	if (ufs_mtk_init_host_clk(hba, "crypt_perf",
642 				  &cfg->clk_crypt_perf))
643 		goto disable_caps;
644 
645 	cfg->reg_vcore = reg;
646 	cfg->vcore_volt = volt;
647 	host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
648 
649 disable_caps:
650 	return;
651 }
652 
ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba * hba)653 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
654 {
655 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
656 
657 	host->reg_va09 = regulator_get(hba->dev, "va09");
658 	if (IS_ERR(host->reg_va09))
659 		dev_info(hba->dev, "failed to get va09");
660 	else
661 		host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
662 }
663 
ufs_mtk_init_host_caps(struct ufs_hba * hba)664 static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
665 {
666 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
667 	struct device_node *np = hba->dev->of_node;
668 
669 	if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
670 		ufs_mtk_init_boost_crypt(hba);
671 
672 	if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
673 		ufs_mtk_init_va09_pwr_ctrl(hba);
674 
675 	if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
676 		host->caps |= UFS_MTK_CAP_DISABLE_AH8;
677 
678 	if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
679 		host->caps |= UFS_MTK_CAP_BROKEN_VCC;
680 
681 	if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
682 		host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
683 
684 	if (of_property_read_bool(np, "mediatek,ufs-tx-skew-fix"))
685 		host->caps |= UFS_MTK_CAP_TX_SKEW_FIX;
686 
687 	if (of_property_read_bool(np, "mediatek,ufs-disable-mcq"))
688 		host->caps |= UFS_MTK_CAP_DISABLE_MCQ;
689 
690 	if (of_property_read_bool(np, "mediatek,ufs-rtff-mtcmos"))
691 		host->caps |= UFS_MTK_CAP_RTFF_MTCMOS;
692 
693 	if (of_property_read_bool(np, "mediatek,ufs-broken-rtc"))
694 		host->caps |= UFS_MTK_CAP_MCQ_BROKEN_RTC;
695 
696 	dev_info(hba->dev, "caps: 0x%x", host->caps);
697 }
698 
ufs_mtk_scale_perf(struct ufs_hba * hba,bool scale_up)699 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
700 {
701 	ufs_mtk_boost_crypt(hba, scale_up);
702 }
703 
ufs_mtk_pwr_ctrl(struct ufs_hba * hba,bool on)704 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
705 {
706 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
707 
708 	if (on) {
709 		phy_power_on(host->mphy);
710 		ufs_mtk_setup_ref_clk(hba, on);
711 		if (!ufshcd_is_clkscaling_supported(hba))
712 			ufs_mtk_scale_perf(hba, on);
713 	} else {
714 		if (!ufshcd_is_clkscaling_supported(hba))
715 			ufs_mtk_scale_perf(hba, on);
716 		ufs_mtk_setup_ref_clk(hba, on);
717 		phy_power_off(host->mphy);
718 	}
719 }
720 
ufs_mtk_mcq_disable_irq(struct ufs_hba * hba)721 static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba)
722 {
723 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
724 	u32 irq, i;
725 
726 	if (!hba->mcq_enabled)
727 		return;
728 
729 	if (host->mcq_nr_intr == 0)
730 		return;
731 
732 	for (i = 0; i < host->mcq_nr_intr; i++) {
733 		irq = host->mcq_intr_info[i].irq;
734 		disable_irq(irq);
735 	}
736 	host->is_mcq_intr_enabled = false;
737 }
738 
ufs_mtk_mcq_enable_irq(struct ufs_hba * hba)739 static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba)
740 {
741 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
742 	u32 irq, i;
743 
744 	if (!hba->mcq_enabled)
745 		return;
746 
747 	if (host->mcq_nr_intr == 0)
748 		return;
749 
750 	if (host->is_mcq_intr_enabled == true)
751 		return;
752 
753 	for (i = 0; i < host->mcq_nr_intr; i++) {
754 		irq = host->mcq_intr_info[i].irq;
755 		enable_irq(irq);
756 	}
757 	host->is_mcq_intr_enabled = true;
758 }
759 
760 /**
761  * ufs_mtk_setup_clocks - enables/disable clocks
762  * @hba: host controller instance
763  * @on: If true, enable clocks else disable them.
764  * @status: PRE_CHANGE or POST_CHANGE notify
765  *
766  * Return: 0 on success, non-zero on failure.
767  */
ufs_mtk_setup_clocks(struct ufs_hba * hba,bool on,enum ufs_notify_change_status status)768 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
769 				enum ufs_notify_change_status status)
770 {
771 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
772 	bool clk_pwr_off = false;
773 	int ret = 0;
774 
775 	/*
776 	 * In case ufs_mtk_init() is not yet done, simply ignore.
777 	 * This ufs_mtk_setup_clocks() shall be called from
778 	 * ufs_mtk_init() after init is done.
779 	 */
780 	if (!host)
781 		return 0;
782 
783 	if (!on && status == PRE_CHANGE) {
784 		if (ufshcd_is_link_off(hba)) {
785 			clk_pwr_off = true;
786 		} else if (ufshcd_is_link_hibern8(hba) ||
787 			 (!ufshcd_can_hibern8_during_gating(hba) &&
788 			 ufshcd_is_auto_hibern8_enabled(hba))) {
789 			/*
790 			 * Gate ref-clk and poweroff mphy if link state is in
791 			 * OFF or Hibern8 by either Auto-Hibern8 or
792 			 * ufshcd_link_state_transition().
793 			 */
794 			ret = ufs_mtk_wait_link_state(hba,
795 						      VS_LINK_HIBERN8,
796 						      15);
797 			if (!ret)
798 				clk_pwr_off = true;
799 		}
800 
801 		if (clk_pwr_off)
802 			ufs_mtk_pwr_ctrl(hba, false);
803 		ufs_mtk_mcq_disable_irq(hba);
804 	} else if (on && status == POST_CHANGE) {
805 		ufs_mtk_pwr_ctrl(hba, true);
806 		ufs_mtk_mcq_enable_irq(hba);
807 	}
808 
809 	return ret;
810 }
811 
ufs_mtk_mcq_get_irq(struct ufs_hba * hba,unsigned int cpu)812 static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu)
813 {
814 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
815 	struct blk_mq_tag_set *tag_set = &hba->host->tag_set;
816 	struct blk_mq_queue_map	*map = &tag_set->map[HCTX_TYPE_DEFAULT];
817 	unsigned int nr = map->nr_queues;
818 	unsigned int q_index;
819 
820 	q_index = map->mq_map[cpu];
821 	if (q_index > nr) {
822 		dev_err(hba->dev, "hwq index %d exceed %d\n",
823 			q_index, nr);
824 		return MTK_MCQ_INVALID_IRQ;
825 	}
826 
827 	return host->mcq_intr_info[q_index].irq;
828 }
829 
ufs_mtk_mcq_set_irq_affinity(struct ufs_hba * hba,unsigned int cpu)830 static void ufs_mtk_mcq_set_irq_affinity(struct ufs_hba *hba, unsigned int cpu)
831 {
832 	unsigned int irq, _cpu;
833 	int ret;
834 
835 	irq = ufs_mtk_mcq_get_irq(hba, cpu);
836 	if (irq == MTK_MCQ_INVALID_IRQ) {
837 		dev_err(hba->dev, "invalid irq. unable to bind irq to cpu%d", cpu);
838 		return;
839 	}
840 
841 	/* force migrate irq of cpu0 to cpu3 */
842 	_cpu = (cpu == 0) ? 3 : cpu;
843 	ret = irq_set_affinity(irq, cpumask_of(_cpu));
844 	if (ret) {
845 		dev_err(hba->dev, "set irq %d affinity to CPU %d failed\n",
846 			irq, _cpu);
847 		return;
848 	}
849 	dev_info(hba->dev, "set irq %d affinity to CPU: %d\n", irq, _cpu);
850 }
851 
ufs_mtk_is_legacy_chipset(struct ufs_hba * hba,u32 hw_ip_ver)852 static bool ufs_mtk_is_legacy_chipset(struct ufs_hba *hba, u32 hw_ip_ver)
853 {
854 	bool is_legacy = false;
855 
856 	switch (hw_ip_ver) {
857 	case IP_LEGACY_VER_MT6893:
858 	case IP_LEGACY_VER_MT6781:
859 		/* can add other legacy chipset ID here accordingly */
860 		is_legacy = true;
861 		break;
862 	default:
863 		break;
864 	}
865 	dev_info(hba->dev, "legacy IP version - 0x%x, is legacy : %d", hw_ip_ver, is_legacy);
866 
867 	return is_legacy;
868 }
869 
870 /*
871  * HW version format has been changed from 01MMmmmm to 1MMMmmmm, since
872  * project MT6878. In order to perform correct version comparison,
873  * version number is changed by SW for the following projects.
874  * IP_VER_MT6983	0x00360000 to 0x10360000
875  * IP_VER_MT6897	0x01440000 to 0x10440000
876  * IP_VER_MT6989	0x01450000 to 0x10450000
877  * IP_VER_MT6991	0x01460000 to 0x10460000
878  */
ufs_mtk_get_hw_ip_version(struct ufs_hba * hba)879 static void ufs_mtk_get_hw_ip_version(struct ufs_hba *hba)
880 {
881 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
882 	u32 hw_ip_ver;
883 
884 	hw_ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
885 
886 	if (((hw_ip_ver & (0xFF << 24)) == (0x1 << 24)) ||
887 	    ((hw_ip_ver & (0xFF << 24)) == 0)) {
888 		hw_ip_ver &= ~(0xFF << 24);
889 		hw_ip_ver |= (0x1 << 28);
890 	}
891 
892 	host->ip_ver = hw_ip_ver;
893 
894 	host->legacy_ip_ver = ufs_mtk_is_legacy_chipset(hba, hw_ip_ver);
895 }
896 
ufs_mtk_get_controller_version(struct ufs_hba * hba)897 static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
898 {
899 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
900 	int ret, ver = 0;
901 
902 	if (host->hw_ver.major)
903 		return;
904 
905 	/* Set default (minimum) version anyway */
906 	host->hw_ver.major = 2;
907 
908 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
909 	if (!ret) {
910 		if (ver >= UFS_UNIPRO_VER_1_8) {
911 			host->hw_ver.major = 3;
912 			/*
913 			 * Fix HCI version for some platforms with
914 			 * incorrect version
915 			 */
916 			if (hba->ufs_version < ufshci_version(3, 0))
917 				hba->ufs_version = ufshci_version(3, 0);
918 		}
919 	}
920 }
921 
ufs_mtk_get_ufs_hci_version(struct ufs_hba * hba)922 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
923 {
924 	return hba->ufs_version;
925 }
926 
927 /**
928  * ufs_mtk_init_clocks - Init mtk driver private clocks
929  *
930  * @hba: per adapter instance
931  */
ufs_mtk_init_clocks(struct ufs_hba * hba)932 static void ufs_mtk_init_clocks(struct ufs_hba *hba)
933 {
934 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
935 	struct list_head *head = &hba->clk_list_head;
936 	struct ufs_clk_info *clki, *clki_tmp;
937 	struct device *dev = hba->dev;
938 	struct regulator *reg;
939 	u32 volt;
940 
941 	/*
942 	 * Find private clocks and store them in struct ufs_mtk_clk.
943 	 * Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
944 	 * being switched on/off in clock gating.
945 	 */
946 	list_for_each_entry_safe(clki, clki_tmp, head, list) {
947 		if (!strcmp(clki->name, "ufs_sel")) {
948 			host->mclk.ufs_sel_clki = clki;
949 		} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
950 			host->mclk.ufs_sel_max_clki = clki;
951 			clk_disable_unprepare(clki->clk);
952 			list_del(&clki->list);
953 		} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
954 			host->mclk.ufs_sel_min_clki = clki;
955 			clk_disable_unprepare(clki->clk);
956 			list_del(&clki->list);
957 		} else if (!strcmp(clki->name, "ufs_fde")) {
958 			host->mclk.ufs_fde_clki = clki;
959 		} else if (!strcmp(clki->name, "ufs_fde_max_src")) {
960 			host->mclk.ufs_fde_max_clki = clki;
961 			clk_disable_unprepare(clki->clk);
962 			list_del(&clki->list);
963 		} else if (!strcmp(clki->name, "ufs_fde_min_src")) {
964 			host->mclk.ufs_fde_min_clki = clki;
965 			clk_disable_unprepare(clki->clk);
966 			list_del(&clki->list);
967 		}
968 	}
969 
970 	list_for_each_entry(clki, head, list) {
971 		dev_info(hba->dev, "clk \"%s\" present", clki->name);
972 	}
973 
974 	if (!ufs_mtk_is_clk_scale_ready(hba)) {
975 		hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
976 		dev_info(hba->dev,
977 			 "%s: Clk-scaling not ready. Feature disabled.",
978 			 __func__);
979 		return;
980 	}
981 
982 	/*
983 	 * Default get vcore if dts have these settings.
984 	 * No matter clock scaling support or not. (may disable by customer)
985 	 */
986 	reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
987 	if (IS_ERR(reg)) {
988 		dev_info(dev, "failed to get dvfsrc-vcore: %ld",
989 			 PTR_ERR(reg));
990 		return;
991 	}
992 
993 	if (of_property_read_u32(dev->of_node, "clk-scale-up-vcore-min",
994 				 &volt)) {
995 		dev_info(dev, "failed to get clk-scale-up-vcore-min");
996 		return;
997 	}
998 
999 	host->mclk.reg_vcore = reg;
1000 	host->mclk.vcore_volt = volt;
1001 
1002 	/* If default boot is max gear, request vcore */
1003 	if (reg && volt && host->clk_scale_up) {
1004 		if (regulator_set_voltage(reg, volt, INT_MAX)) {
1005 			dev_info(hba->dev,
1006 				"Failed to set vcore to %d\n", volt);
1007 		}
1008 	}
1009 }
1010 
1011 #define MAX_VCC_NAME 30
ufs_mtk_vreg_fix_vcc(struct ufs_hba * hba)1012 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
1013 {
1014 	struct ufs_vreg_info *info = &hba->vreg_info;
1015 	struct device_node *np = hba->dev->of_node;
1016 	struct device *dev = hba->dev;
1017 	char vcc_name[MAX_VCC_NAME];
1018 	struct arm_smccc_res res;
1019 	int err, ver;
1020 
1021 	if (hba->vreg_info.vcc)
1022 		return 0;
1023 
1024 	if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
1025 		ufs_mtk_get_vcc_num(res);
1026 		if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
1027 			snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
1028 		else
1029 			return -ENODEV;
1030 	} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
1031 		ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
1032 		snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
1033 	} else {
1034 		return 0;
1035 	}
1036 
1037 	err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc, false);
1038 	if (err)
1039 		return err;
1040 
1041 	err = ufshcd_get_vreg(dev, info->vcc);
1042 	if (err)
1043 		return err;
1044 
1045 	err = regulator_enable(info->vcc->reg);
1046 	if (!err) {
1047 		info->vcc->enabled = true;
1048 		dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
1049 	}
1050 
1051 	return err;
1052 }
1053 
ufs_mtk_vreg_fix_vccqx(struct ufs_hba * hba)1054 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
1055 {
1056 	struct ufs_vreg_info *info = &hba->vreg_info;
1057 	struct ufs_vreg **vreg_on, **vreg_off;
1058 
1059 	if (hba->dev_info.wspecversion >= 0x0300) {
1060 		vreg_on = &info->vccq;
1061 		vreg_off = &info->vccq2;
1062 	} else {
1063 		vreg_on = &info->vccq2;
1064 		vreg_off = &info->vccq;
1065 	}
1066 
1067 	if (*vreg_on)
1068 		(*vreg_on)->always_on = true;
1069 
1070 	if (*vreg_off) {
1071 		regulator_disable((*vreg_off)->reg);
1072 		devm_kfree(hba->dev, (*vreg_off)->name);
1073 		devm_kfree(hba->dev, *vreg_off);
1074 		*vreg_off = NULL;
1075 	}
1076 }
1077 
ufs_mtk_init_mcq_irq(struct ufs_hba * hba)1078 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
1079 {
1080 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1081 	struct platform_device *pdev;
1082 	int i;
1083 	int irq;
1084 
1085 	host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
1086 	pdev = container_of(hba->dev, struct platform_device, dev);
1087 
1088 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1089 		goto failed;
1090 
1091 	for (i = 0; i < host->mcq_nr_intr; i++) {
1092 		/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
1093 		irq = platform_get_irq(pdev, i + 1);
1094 		if (irq < 0) {
1095 			host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
1096 			goto failed;
1097 		}
1098 		host->mcq_intr_info[i].hba = hba;
1099 		host->mcq_intr_info[i].irq = irq;
1100 		dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
1101 	}
1102 
1103 	return;
1104 failed:
1105        /* invalidate irq info */
1106 	for (i = 0; i < host->mcq_nr_intr; i++)
1107 		host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
1108 
1109 	host->mcq_nr_intr = 0;
1110 }
1111 
1112 /**
1113  * ufs_mtk_init - find other essential mmio bases
1114  * @hba: host controller instance
1115  *
1116  * Binds PHY with controller and powers up PHY enabling clocks
1117  * and regulators.
1118  *
1119  * Return: -EPROBE_DEFER if binding fails, returns negative error
1120  * on phy power up failure and returns zero on success.
1121  */
ufs_mtk_init(struct ufs_hba * hba)1122 static int ufs_mtk_init(struct ufs_hba *hba)
1123 {
1124 	const struct of_device_id *id;
1125 	struct device *dev = hba->dev;
1126 	struct ufs_mtk_host *host;
1127 	struct Scsi_Host *shost = hba->host;
1128 	int err = 0;
1129 	struct arm_smccc_res res;
1130 
1131 	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1132 	if (!host) {
1133 		err = -ENOMEM;
1134 		dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
1135 		goto out;
1136 	}
1137 
1138 	host->hba = hba;
1139 	ufshcd_set_variant(hba, host);
1140 
1141 	id = of_match_device(ufs_mtk_of_match, dev);
1142 	if (!id) {
1143 		err = -EINVAL;
1144 		goto out;
1145 	}
1146 
1147 	/* Initialize host capability */
1148 	ufs_mtk_init_host_caps(hba);
1149 
1150 	ufs_mtk_init_mcq_irq(hba);
1151 
1152 	err = ufs_mtk_bind_mphy(hba);
1153 	if (err)
1154 		goto out_variant_clear;
1155 
1156 	ufs_mtk_init_reset(hba);
1157 
1158 	/* backup mphy setting if mphy can reset */
1159 	if (host->mphy_reset)
1160 		ufs_mtk_mphy_ctrl(UFS_MPHY_BACKUP, res);
1161 
1162 	/* Enable runtime autosuspend */
1163 	hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
1164 
1165 	/* Enable clock-gating */
1166 	hba->caps |= UFSHCD_CAP_CLK_GATING;
1167 
1168 	/* Enable inline encryption */
1169 	hba->caps |= UFSHCD_CAP_CRYPTO;
1170 
1171 	/* Enable WriteBooster */
1172 	hba->caps |= UFSHCD_CAP_WB_EN;
1173 
1174 	/* Enable clk scaling*/
1175 	hba->caps |= UFSHCD_CAP_CLK_SCALING;
1176 	host->clk_scale_up = true; /* default is max freq */
1177 
1178 	/* Set runtime pm delay to replace default */
1179 	shost->rpm_autosuspend_delay = MTK_RPM_AUTOSUSPEND_DELAY_MS;
1180 
1181 	hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
1182 
1183 	hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
1184 	if (host->caps & UFS_MTK_CAP_MCQ_BROKEN_RTC)
1185 		hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
1186 
1187 	hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
1188 
1189 	if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
1190 		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1191 
1192 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1193 		hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP;
1194 
1195 	ufs_mtk_init_clocks(hba);
1196 
1197 	/*
1198 	 * ufshcd_vops_init() is invoked after
1199 	 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
1200 	 * phy clock setup is skipped.
1201 	 *
1202 	 * Enable phy clocks specifically here.
1203 	 */
1204 	ufs_mtk_mphy_power_on(hba, true);
1205 
1206 	if (ufs_mtk_is_rtff_mtcmos(hba)) {
1207 		/* First Restore here, to avoid backup unexpected value */
1208 		ufs_mtk_mtcmos_ctrl(false, res);
1209 
1210 		/* Power on to init */
1211 		ufs_mtk_mtcmos_ctrl(true, res);
1212 	}
1213 
1214 	ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
1215 
1216 	ufs_mtk_get_hw_ip_version(hba);
1217 
1218 	goto out;
1219 
1220 out_variant_clear:
1221 	ufshcd_set_variant(hba, NULL);
1222 out:
1223 	return err;
1224 }
1225 
ufs_mtk_pmc_via_fastauto(struct ufs_hba * hba,struct ufs_pa_layer_attr * dev_req_params)1226 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
1227 				     struct ufs_pa_layer_attr *dev_req_params)
1228 {
1229 	if (!ufs_mtk_is_pmc_via_fastauto(hba))
1230 		return false;
1231 
1232 	if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
1233 		return false;
1234 
1235 	if (dev_req_params->pwr_tx != FAST_MODE &&
1236 	    dev_req_params->gear_tx < UFS_HS_G4)
1237 		return false;
1238 
1239 	if (dev_req_params->pwr_rx != FAST_MODE &&
1240 	    dev_req_params->gear_rx < UFS_HS_G4)
1241 		return false;
1242 
1243 	return true;
1244 }
1245 
ufs_mtk_pre_pwr_change(struct ufs_hba * hba,const struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1246 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
1247 				const struct ufs_pa_layer_attr *dev_max_params,
1248 				struct ufs_pa_layer_attr *dev_req_params)
1249 {
1250 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1251 	struct ufs_host_params host_params;
1252 	int ret;
1253 
1254 	ufshcd_init_host_params(&host_params);
1255 	host_params.hs_rx_gear = UFS_HS_G5;
1256 	host_params.hs_tx_gear = UFS_HS_G5;
1257 
1258 	ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
1259 	if (ret) {
1260 		pr_info("%s: failed to determine capabilities\n",
1261 			__func__);
1262 	}
1263 
1264 	if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
1265 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
1266 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
1267 
1268 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
1269 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
1270 
1271 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
1272 			       dev_req_params->lane_tx);
1273 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
1274 			       dev_req_params->lane_rx);
1275 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
1276 			       dev_req_params->hs_rate);
1277 
1278 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
1279 			       PA_NO_ADAPT);
1280 
1281 		ret = ufshcd_uic_change_pwr_mode(hba,
1282 					FASTAUTO_MODE << 4 | FASTAUTO_MODE);
1283 
1284 		if (ret) {
1285 			dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
1286 				__func__, ret);
1287 		}
1288 	}
1289 
1290 	if (host->hw_ver.major >= 3) {
1291 		ret = ufshcd_dme_configure_adapt(hba,
1292 					   dev_req_params->gear_tx,
1293 					   PA_INITIAL_ADAPT);
1294 	}
1295 
1296 	return ret;
1297 }
1298 
ufs_mtk_pwr_change_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage,const struct ufs_pa_layer_attr * dev_max_params,struct ufs_pa_layer_attr * dev_req_params)1299 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
1300 				enum ufs_notify_change_status stage,
1301 				const struct ufs_pa_layer_attr *dev_max_params,
1302 				struct ufs_pa_layer_attr *dev_req_params)
1303 {
1304 	int ret = 0;
1305 
1306 	switch (stage) {
1307 	case PRE_CHANGE:
1308 		ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
1309 					     dev_req_params);
1310 		break;
1311 	case POST_CHANGE:
1312 		break;
1313 	default:
1314 		ret = -EINVAL;
1315 		break;
1316 	}
1317 
1318 	return ret;
1319 }
1320 
ufs_mtk_unipro_set_lpm(struct ufs_hba * hba,bool lpm)1321 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
1322 {
1323 	int ret;
1324 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1325 
1326 	ret = ufshcd_dme_set(hba,
1327 			     UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
1328 			     lpm ? 1 : 0);
1329 	if (!ret || !lpm) {
1330 		/*
1331 		 * Forcibly set as non-LPM mode if UIC commands is failed
1332 		 * to use default hba_enable_delay_us value for re-enabling
1333 		 * the host.
1334 		 */
1335 		host->unipro_lpm = lpm;
1336 	}
1337 
1338 	return ret;
1339 }
1340 
ufs_mtk_pre_link(struct ufs_hba * hba)1341 static int ufs_mtk_pre_link(struct ufs_hba *hba)
1342 {
1343 	int ret;
1344 	u32 tmp;
1345 
1346 	ufs_mtk_get_controller_version(hba);
1347 
1348 	ret = ufs_mtk_unipro_set_lpm(hba, false);
1349 	if (ret)
1350 		return ret;
1351 
1352 	/*
1353 	 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
1354 	 * to make sure that both host and device TX LCC are disabled
1355 	 * once link startup is completed.
1356 	 */
1357 	ret = ufshcd_disable_host_tx_lcc(hba);
1358 	if (ret)
1359 		return ret;
1360 
1361 	/* disable deep stall */
1362 	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
1363 	if (ret)
1364 		return ret;
1365 
1366 	tmp &= ~(1 << 6);
1367 
1368 	ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
1369 
1370 	return ret;
1371 }
1372 
ufs_mtk_setup_clk_gating(struct ufs_hba * hba)1373 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
1374 {
1375 	u32 ah_ms;
1376 
1377 	if (ufshcd_is_clkgating_allowed(hba)) {
1378 		if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
1379 			ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
1380 					  hba->ahit);
1381 		else
1382 			ah_ms = 10;
1383 		ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
1384 	}
1385 }
1386 
ufs_mtk_post_link(struct ufs_hba * hba)1387 static void ufs_mtk_post_link(struct ufs_hba *hba)
1388 {
1389 	/* enable unipro clock gating feature */
1390 	ufs_mtk_cfg_unipro_cg(hba, true);
1391 
1392 	/* will be configured during probe hba */
1393 	if (ufshcd_is_auto_hibern8_supported(hba))
1394 		hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
1395 			FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
1396 
1397 	ufs_mtk_setup_clk_gating(hba);
1398 }
1399 
ufs_mtk_link_startup_notify(struct ufs_hba * hba,enum ufs_notify_change_status stage)1400 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
1401 				       enum ufs_notify_change_status stage)
1402 {
1403 	int ret = 0;
1404 
1405 	switch (stage) {
1406 	case PRE_CHANGE:
1407 		ret = ufs_mtk_pre_link(hba);
1408 		break;
1409 	case POST_CHANGE:
1410 		ufs_mtk_post_link(hba);
1411 		break;
1412 	default:
1413 		ret = -EINVAL;
1414 		break;
1415 	}
1416 
1417 	return ret;
1418 }
1419 
ufs_mtk_device_reset(struct ufs_hba * hba)1420 static int ufs_mtk_device_reset(struct ufs_hba *hba)
1421 {
1422 	struct arm_smccc_res res;
1423 
1424 	/* disable hba before device reset */
1425 	ufshcd_hba_stop(hba);
1426 
1427 	ufs_mtk_device_reset_ctrl(0, res);
1428 
1429 	/*
1430 	 * The reset signal is active low. UFS devices shall detect
1431 	 * more than or equal to 1us of positive or negative RST_n
1432 	 * pulse width.
1433 	 *
1434 	 * To be on safe side, keep the reset low for at least 10us.
1435 	 */
1436 	usleep_range(10, 15);
1437 
1438 	ufs_mtk_device_reset_ctrl(1, res);
1439 
1440 	/* Some devices may need time to respond to rst_n */
1441 	usleep_range(10000, 15000);
1442 
1443 	dev_info(hba->dev, "device reset done\n");
1444 
1445 	return 0;
1446 }
1447 
ufs_mtk_link_set_hpm(struct ufs_hba * hba)1448 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
1449 {
1450 	int err;
1451 
1452 	err = ufshcd_hba_enable(hba);
1453 	if (err)
1454 		return err;
1455 
1456 	err = ufs_mtk_unipro_set_lpm(hba, false);
1457 	if (err)
1458 		return err;
1459 
1460 	err = ufshcd_uic_hibern8_exit(hba);
1461 	if (err)
1462 		return err;
1463 
1464 	/* Check link state to make sure exit h8 success */
1465 	ufs_mtk_wait_idle_state(hba, 5);
1466 	err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1467 	if (err) {
1468 		dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
1469 		return err;
1470 	}
1471 	ufshcd_set_link_active(hba);
1472 
1473 	err = ufshcd_make_hba_operational(hba);
1474 	if (err)
1475 		return err;
1476 
1477 	if (hba->mcq_enabled) {
1478 		ufs_mtk_config_mcq(hba, false);
1479 		ufshcd_mcq_make_queues_operational(hba);
1480 		ufshcd_mcq_config_mac(hba, hba->nutrs);
1481 		ufshcd_mcq_enable(hba);
1482 	}
1483 
1484 	return 0;
1485 }
1486 
ufs_mtk_link_set_lpm(struct ufs_hba * hba)1487 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
1488 {
1489 	int err;
1490 
1491 	/* Disable reset confirm feature by UniPro */
1492 	ufshcd_writel(hba,
1493 		      (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
1494 		      REG_UFS_XOUFS_CTRL);
1495 
1496 	err = ufs_mtk_unipro_set_lpm(hba, true);
1497 	if (err) {
1498 		/* Resume UniPro state for following error recovery */
1499 		ufs_mtk_unipro_set_lpm(hba, false);
1500 		return err;
1501 	}
1502 
1503 	return 0;
1504 }
1505 
ufs_mtk_vccqx_set_lpm(struct ufs_hba * hba,bool lpm)1506 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
1507 {
1508 	struct ufs_vreg *vccqx = NULL;
1509 
1510 	if (hba->vreg_info.vccq)
1511 		vccqx = hba->vreg_info.vccq;
1512 	else
1513 		vccqx = hba->vreg_info.vccq2;
1514 
1515 	regulator_set_mode(vccqx->reg,
1516 			   lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
1517 }
1518 
ufs_mtk_vsx_set_lpm(struct ufs_hba * hba,bool lpm)1519 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
1520 {
1521 	struct arm_smccc_res res;
1522 
1523 	ufs_mtk_device_pwr_ctrl(!lpm,
1524 				(unsigned long)hba->dev_info.wspecversion,
1525 				res);
1526 }
1527 
ufs_mtk_dev_vreg_set_lpm(struct ufs_hba * hba,bool lpm)1528 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
1529 {
1530 	bool skip_vccqx = false;
1531 
1532 	/* Prevent entering LPM when device is still active */
1533 	if (lpm && ufshcd_is_ufs_dev_active(hba))
1534 		return;
1535 
1536 	/* Skip vccqx lpm control and control vsx only */
1537 	if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
1538 		skip_vccqx = true;
1539 
1540 	/* VCC is always-on, control vsx only */
1541 	if (!hba->vreg_info.vcc)
1542 		skip_vccqx = true;
1543 
1544 	/* Broken vcc keep vcc always on, most case control vsx only */
1545 	if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) {
1546 		/* Some device vccqx/vsx can enter lpm */
1547 		if (ufs_mtk_is_allow_vccqx_lpm(hba))
1548 			skip_vccqx = false;
1549 		else /* control vsx only */
1550 			skip_vccqx = true;
1551 	}
1552 
1553 	if (lpm) {
1554 		if (!skip_vccqx)
1555 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1556 		ufs_mtk_vsx_set_lpm(hba, lpm);
1557 	} else {
1558 		ufs_mtk_vsx_set_lpm(hba, lpm);
1559 		if (!skip_vccqx)
1560 			ufs_mtk_vccqx_set_lpm(hba, lpm);
1561 	}
1562 }
1563 
ufs_mtk_auto_hibern8_disable(struct ufs_hba * hba)1564 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
1565 {
1566 	int ret;
1567 
1568 	/* disable auto-hibern8 */
1569 	ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
1570 
1571 	/* wait host return to idle state when auto-hibern8 off */
1572 	ufs_mtk_wait_idle_state(hba, 5);
1573 
1574 	ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
1575 	if (ret)
1576 		dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
1577 }
1578 
ufs_mtk_suspend(struct ufs_hba * hba,enum ufs_pm_op pm_op,enum ufs_notify_change_status status)1579 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
1580 	enum ufs_notify_change_status status)
1581 {
1582 	int err;
1583 	struct arm_smccc_res res;
1584 
1585 	if (status == PRE_CHANGE) {
1586 		if (ufshcd_is_auto_hibern8_supported(hba))
1587 			ufs_mtk_auto_hibern8_disable(hba);
1588 		return 0;
1589 	}
1590 
1591 	if (ufshcd_is_link_hibern8(hba)) {
1592 		err = ufs_mtk_link_set_lpm(hba);
1593 		if (err)
1594 			goto fail;
1595 	}
1596 
1597 	if (!ufshcd_is_link_active(hba)) {
1598 		/*
1599 		 * Make sure no error will be returned to prevent
1600 		 * ufshcd_suspend() re-enabling regulators while vreg is still
1601 		 * in low-power mode.
1602 		 */
1603 		err = ufs_mtk_mphy_power_on(hba, false);
1604 		if (err)
1605 			goto fail;
1606 	}
1607 
1608 	if (ufshcd_is_link_off(hba))
1609 		ufs_mtk_device_reset_ctrl(0, res);
1610 
1611 	ufs_mtk_sram_pwr_ctrl(false, res);
1612 
1613 	return 0;
1614 fail:
1615 	/*
1616 	 * Set link as off state enforcedly to trigger
1617 	 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
1618 	 * for completed host reset.
1619 	 */
1620 	ufshcd_set_link_off(hba);
1621 	return -EAGAIN;
1622 }
1623 
ufs_mtk_resume(struct ufs_hba * hba,enum ufs_pm_op pm_op)1624 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
1625 {
1626 	int err;
1627 	struct arm_smccc_res res;
1628 
1629 	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
1630 		ufs_mtk_dev_vreg_set_lpm(hba, false);
1631 
1632 	ufs_mtk_sram_pwr_ctrl(true, res);
1633 
1634 	err = ufs_mtk_mphy_power_on(hba, true);
1635 	if (err)
1636 		goto fail;
1637 
1638 	if (ufshcd_is_link_hibern8(hba)) {
1639 		err = ufs_mtk_link_set_hpm(hba);
1640 		if (err)
1641 			goto fail;
1642 	}
1643 
1644 	return 0;
1645 fail:
1646 	return ufshcd_link_recovery(hba);
1647 }
1648 
ufs_mtk_dbg_register_dump(struct ufs_hba * hba)1649 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
1650 {
1651 	/* Dump ufshci register 0x140 ~ 0x14C */
1652 	ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
1653 			 "XOUFS Ctrl (0x140): ");
1654 
1655 	ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
1656 
1657 	/* Dump ufshci register 0x2200 ~ 0x22AC */
1658 	ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
1659 			 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
1660 			 "MPHY Ctrl (0x2200): ");
1661 
1662 	/* Direct debugging information to REG_MTK_PROBE */
1663 	ufs_mtk_dbg_sel(hba);
1664 	ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
1665 }
1666 
ufs_mtk_apply_dev_quirks(struct ufs_hba * hba)1667 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
1668 {
1669 	struct ufs_dev_info *dev_info = &hba->dev_info;
1670 	u16 mid = dev_info->wmanufacturerid;
1671 	unsigned int cpu;
1672 
1673 	if (hba->mcq_enabled) {
1674 		/* Iterate all cpus to set affinity for mcq irqs */
1675 		for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1676 			ufs_mtk_mcq_set_irq_affinity(hba, cpu);
1677 	}
1678 
1679 	if (mid == UFS_VENDOR_SAMSUNG) {
1680 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
1681 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
1682 	} else if (mid == UFS_VENDOR_MICRON) {
1683 		/* Only for the host which have TX skew issue */
1684 		if (ufs_mtk_is_tx_skew_fix(hba) &&
1685 			(STR_PRFX_EQUAL("MT128GBCAV2U31", dev_info->model) ||
1686 			STR_PRFX_EQUAL("MT256GBCAV4U31", dev_info->model) ||
1687 			STR_PRFX_EQUAL("MT512GBCAV8U31", dev_info->model) ||
1688 			STR_PRFX_EQUAL("MT256GBEAX4U40", dev_info->model) ||
1689 			STR_PRFX_EQUAL("MT512GAYAX4U40", dev_info->model) ||
1690 			STR_PRFX_EQUAL("MT001TAYAX8U40", dev_info->model))) {
1691 			ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8);
1692 		}
1693 	}
1694 
1695 	/*
1696 	 * Decide waiting time before gating reference clock and
1697 	 * after ungating reference clock according to vendors'
1698 	 * requirements.
1699 	 */
1700 	if (mid == UFS_VENDOR_SAMSUNG)
1701 		ufs_mtk_setup_ref_clk_wait_us(hba, 1);
1702 	else if (mid == UFS_VENDOR_SKHYNIX)
1703 		ufs_mtk_setup_ref_clk_wait_us(hba, 30);
1704 	else if (mid == UFS_VENDOR_TOSHIBA)
1705 		ufs_mtk_setup_ref_clk_wait_us(hba, 100);
1706 	else
1707 		ufs_mtk_setup_ref_clk_wait_us(hba,
1708 					      REFCLK_DEFAULT_WAIT_US);
1709 	return 0;
1710 }
1711 
ufs_mtk_fixup_dev_quirks(struct ufs_hba * hba)1712 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
1713 {
1714 	ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
1715 
1716 	if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
1717 	    (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
1718 		hba->vreg_info.vcc->always_on = true;
1719 		/*
1720 		 * VCC will be kept always-on thus we don't
1721 		 * need any delay during regulator operations
1722 		 */
1723 		hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
1724 			UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
1725 	}
1726 
1727 	ufs_mtk_vreg_fix_vcc(hba);
1728 	ufs_mtk_vreg_fix_vccqx(hba);
1729 }
1730 
ufs_mtk_event_notify(struct ufs_hba * hba,enum ufs_event_type evt,void * data)1731 static void ufs_mtk_event_notify(struct ufs_hba *hba,
1732 				 enum ufs_event_type evt, void *data)
1733 {
1734 	unsigned int val = *(u32 *)data;
1735 	unsigned long reg;
1736 	u8 bit;
1737 
1738 	trace_ufs_mtk_event(evt, val);
1739 
1740 	/* Print details of UIC Errors */
1741 	if (evt <= UFS_EVT_DME_ERR) {
1742 		dev_info(hba->dev,
1743 			 "Host UIC Error Code (%s): %08x\n",
1744 			 ufs_uic_err_str[evt], val);
1745 		reg = val;
1746 	}
1747 
1748 	if (evt == UFS_EVT_PA_ERR) {
1749 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_pa_err_str))
1750 			dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
1751 	}
1752 
1753 	if (evt == UFS_EVT_DL_ERR) {
1754 		for_each_set_bit(bit, &reg, ARRAY_SIZE(ufs_uic_dl_err_str))
1755 			dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
1756 	}
1757 }
1758 
ufs_mtk_config_scaling_param(struct ufs_hba * hba,struct devfreq_dev_profile * profile,struct devfreq_simple_ondemand_data * data)1759 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
1760 				struct devfreq_dev_profile *profile,
1761 				struct devfreq_simple_ondemand_data *data)
1762 {
1763 	/* Customize min gear in clk scaling */
1764 	hba->clk_scaling.min_gear = UFS_HS_G4;
1765 
1766 	hba->vps->devfreq_profile.polling_ms = 200;
1767 	hba->vps->ondemand_data.upthreshold = 50;
1768 	hba->vps->ondemand_data.downdifferential = 20;
1769 }
1770 
_ufs_mtk_clk_scale(struct ufs_hba * hba,bool scale_up)1771 static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1772 {
1773 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1774 	struct ufs_mtk_clk *mclk = &host->mclk;
1775 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1776 	struct ufs_clk_info *fde_clki = mclk->ufs_fde_clki;
1777 	struct regulator *reg;
1778 	int volt, ret = 0;
1779 	bool clk_bind_vcore = false;
1780 	bool clk_fde_scale = false;
1781 
1782 	if (!hba->clk_scaling.is_initialized)
1783 		return;
1784 
1785 	if (!clki || !fde_clki)
1786 		return;
1787 
1788 	reg = host->mclk.reg_vcore;
1789 	volt = host->mclk.vcore_volt;
1790 	if (reg && volt != 0)
1791 		clk_bind_vcore = true;
1792 
1793 	if (mclk->ufs_fde_max_clki && mclk->ufs_fde_min_clki)
1794 		clk_fde_scale = true;
1795 
1796 	ret = clk_prepare_enable(clki->clk);
1797 	if (ret) {
1798 		dev_info(hba->dev,
1799 			 "clk_prepare_enable() fail, ret: %d\n", ret);
1800 		return;
1801 	}
1802 
1803 	if (clk_fde_scale) {
1804 		ret = clk_prepare_enable(fde_clki->clk);
1805 		if (ret) {
1806 			dev_info(hba->dev,
1807 				 "fde clk_prepare_enable() fail, ret: %d\n", ret);
1808 			return;
1809 		}
1810 	}
1811 
1812 	if (scale_up) {
1813 		if (clk_bind_vcore) {
1814 			ret = regulator_set_voltage(reg, volt, INT_MAX);
1815 			if (ret) {
1816 				dev_info(hba->dev,
1817 					"Failed to set vcore to %d\n", volt);
1818 				goto out;
1819 			}
1820 		}
1821 
1822 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
1823 		if (ret) {
1824 			dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
1825 				ret);
1826 		}
1827 
1828 		if (clk_fde_scale) {
1829 			ret = clk_set_parent(fde_clki->clk,
1830 				mclk->ufs_fde_max_clki->clk);
1831 			if (ret) {
1832 				dev_info(hba->dev,
1833 					"Failed to set fde clk mux, ret = %d\n",
1834 					ret);
1835 			}
1836 		}
1837 	} else {
1838 		if (clk_fde_scale) {
1839 			ret = clk_set_parent(fde_clki->clk,
1840 				mclk->ufs_fde_min_clki->clk);
1841 			if (ret) {
1842 				dev_info(hba->dev,
1843 					"Failed to set fde clk mux, ret = %d\n",
1844 					ret);
1845 				goto out;
1846 			}
1847 		}
1848 
1849 		ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
1850 		if (ret) {
1851 			dev_info(hba->dev, "Failed to set clk mux, ret = %d\n",
1852 				ret);
1853 			goto out;
1854 		}
1855 
1856 		if (clk_bind_vcore) {
1857 			ret = regulator_set_voltage(reg, 0, INT_MAX);
1858 			if (ret) {
1859 				dev_info(hba->dev,
1860 					"failed to set vcore to MIN\n");
1861 			}
1862 		}
1863 	}
1864 
1865 out:
1866 	clk_disable_unprepare(clki->clk);
1867 
1868 	if (clk_fde_scale)
1869 		clk_disable_unprepare(fde_clki->clk);
1870 }
1871 
1872 /**
1873  * ufs_mtk_clk_scale - Internal clk scaling operation
1874  *
1875  * MTK platform supports clk scaling by switching parent of ufs_sel(mux).
1876  * The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
1877  * Max and min clocks rate of ufs_sel defined in dts should match rate of
1878  * "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
1879  * This prevent changing rate of pll clock that is shared between modules.
1880  *
1881  * @hba: per adapter instance
1882  * @scale_up: True for scaling up and false for scaling down
1883  */
ufs_mtk_clk_scale(struct ufs_hba * hba,bool scale_up)1884 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
1885 {
1886 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1887 	struct ufs_mtk_clk *mclk = &host->mclk;
1888 	struct ufs_clk_info *clki = mclk->ufs_sel_clki;
1889 
1890 	if (host->clk_scale_up == scale_up)
1891 		goto out;
1892 
1893 	if (scale_up)
1894 		_ufs_mtk_clk_scale(hba, true);
1895 	else
1896 		_ufs_mtk_clk_scale(hba, false);
1897 
1898 	host->clk_scale_up = scale_up;
1899 
1900 	/* Must always set before clk_set_rate() */
1901 	if (scale_up)
1902 		clki->curr_freq = clki->max_freq;
1903 	else
1904 		clki->curr_freq = clki->min_freq;
1905 out:
1906 	trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
1907 }
1908 
ufs_mtk_clk_scale_notify(struct ufs_hba * hba,bool scale_up,unsigned long target_freq,enum ufs_notify_change_status status)1909 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
1910 				    unsigned long target_freq,
1911 				    enum ufs_notify_change_status status)
1912 {
1913 	if (!ufshcd_is_clkscaling_supported(hba))
1914 		return 0;
1915 
1916 	if (status == PRE_CHANGE) {
1917 		/* Switch parent before clk_set_rate() */
1918 		ufs_mtk_clk_scale(hba, scale_up);
1919 	} else {
1920 		/* Request interrupt latency QoS accordingly */
1921 		ufs_mtk_scale_perf(hba, scale_up);
1922 	}
1923 
1924 	return 0;
1925 }
1926 
ufs_mtk_get_hba_mac(struct ufs_hba * hba)1927 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
1928 {
1929 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1930 
1931 	/* MCQ operation not permitted */
1932 	if (host->caps & UFS_MTK_CAP_DISABLE_MCQ)
1933 		return -EPERM;
1934 
1935 	return MAX_SUPP_MAC;
1936 }
1937 
ufs_mtk_op_runtime_config(struct ufs_hba * hba)1938 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
1939 {
1940 	struct ufshcd_mcq_opr_info_t *opr;
1941 	int i;
1942 
1943 	hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
1944 	hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
1945 	hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
1946 	hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
1947 
1948 	for (i = 0; i < OPR_MAX; i++) {
1949 		opr = &hba->mcq_opr[i];
1950 		opr->stride = REG_UFS_MCQ_STRIDE;
1951 		opr->base = hba->mmio_base + opr->offset;
1952 	}
1953 
1954 	return 0;
1955 }
1956 
ufs_mtk_mcq_config_resource(struct ufs_hba * hba)1957 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
1958 {
1959 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1960 
1961 	/* fail mcq initialization if interrupt is not filled properly */
1962 	if (!host->mcq_nr_intr) {
1963 		dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
1964 		return -EINVAL;
1965 	}
1966 
1967 	hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
1968 	return 0;
1969 }
1970 
ufs_mtk_mcq_intr(int irq,void * __intr_info)1971 static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
1972 {
1973 	struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
1974 	struct ufs_hba *hba = mcq_intr_info->hba;
1975 	struct ufs_hw_queue *hwq;
1976 	u32 events;
1977 	int qid = mcq_intr_info->qid;
1978 
1979 	hwq = &hba->uhq[qid];
1980 
1981 	events = ufshcd_mcq_read_cqis(hba, qid);
1982 	if (events)
1983 		ufshcd_mcq_write_cqis(hba, events, qid);
1984 
1985 	if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
1986 		ufshcd_mcq_poll_cqe_lock(hba, hwq);
1987 
1988 	return IRQ_HANDLED;
1989 }
1990 
ufs_mtk_config_mcq_irq(struct ufs_hba * hba)1991 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
1992 {
1993 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
1994 	u32 irq, i;
1995 	int ret;
1996 
1997 	for (i = 0; i < host->mcq_nr_intr; i++) {
1998 		irq = host->mcq_intr_info[i].irq;
1999 		if (irq == MTK_MCQ_INVALID_IRQ) {
2000 			dev_err(hba->dev, "invalid irq. %d\n", i);
2001 			return -ENOPARAM;
2002 		}
2003 
2004 		host->mcq_intr_info[i].qid = i;
2005 		ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
2006 				       &host->mcq_intr_info[i]);
2007 
2008 		dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
2009 
2010 		if (ret) {
2011 			dev_err(hba->dev, "Cannot request irq %d\n", ret);
2012 			return ret;
2013 		}
2014 	}
2015 
2016 	return 0;
2017 }
2018 
ufs_mtk_config_mcq(struct ufs_hba * hba,bool irq)2019 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
2020 {
2021 	struct ufs_mtk_host *host = ufshcd_get_variant(hba);
2022 	int ret = 0;
2023 
2024 	if (!host->mcq_set_intr) {
2025 		/* Disable irq option register */
2026 		ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
2027 
2028 		if (irq) {
2029 			ret = ufs_mtk_config_mcq_irq(hba);
2030 			if (ret)
2031 				return ret;
2032 		}
2033 
2034 		host->mcq_set_intr = true;
2035 	}
2036 
2037 	ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
2038 	ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
2039 
2040 	return 0;
2041 }
2042 
ufs_mtk_config_esi(struct ufs_hba * hba)2043 static int ufs_mtk_config_esi(struct ufs_hba *hba)
2044 {
2045 	return ufs_mtk_config_mcq(hba, true);
2046 }
2047 
ufs_mtk_config_scsi_dev(struct scsi_device * sdev)2048 static void ufs_mtk_config_scsi_dev(struct scsi_device *sdev)
2049 {
2050 	struct ufs_hba *hba = shost_priv(sdev->host);
2051 
2052 	dev_dbg(hba->dev, "lu %llu scsi device configured", sdev->lun);
2053 	if (sdev->lun == 2)
2054 		blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, sdev->request_queue);
2055 }
2056 
2057 /*
2058  * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
2059  *
2060  * The variant operations configure the necessary controller and PHY
2061  * handshake during initialization.
2062  */
2063 static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
2064 	.name                = "mediatek.ufshci",
2065 	.max_num_rtt         = MTK_MAX_NUM_RTT,
2066 	.init                = ufs_mtk_init,
2067 	.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
2068 	.setup_clocks        = ufs_mtk_setup_clocks,
2069 	.hce_enable_notify   = ufs_mtk_hce_enable_notify,
2070 	.link_startup_notify = ufs_mtk_link_startup_notify,
2071 	.pwr_change_notify   = ufs_mtk_pwr_change_notify,
2072 	.apply_dev_quirks    = ufs_mtk_apply_dev_quirks,
2073 	.fixup_dev_quirks    = ufs_mtk_fixup_dev_quirks,
2074 	.suspend             = ufs_mtk_suspend,
2075 	.resume              = ufs_mtk_resume,
2076 	.dbg_register_dump   = ufs_mtk_dbg_register_dump,
2077 	.device_reset        = ufs_mtk_device_reset,
2078 	.event_notify        = ufs_mtk_event_notify,
2079 	.config_scaling_param = ufs_mtk_config_scaling_param,
2080 	.clk_scale_notify    = ufs_mtk_clk_scale_notify,
2081 	/* mcq vops */
2082 	.get_hba_mac         = ufs_mtk_get_hba_mac,
2083 	.op_runtime_config   = ufs_mtk_op_runtime_config,
2084 	.mcq_config_resource = ufs_mtk_mcq_config_resource,
2085 	.config_esi          = ufs_mtk_config_esi,
2086 	.config_scsi_dev     = ufs_mtk_config_scsi_dev,
2087 };
2088 
2089 /**
2090  * ufs_mtk_probe - probe routine of the driver
2091  * @pdev: pointer to Platform device handle
2092  *
2093  * Return: zero for success and non-zero for failure.
2094  */
ufs_mtk_probe(struct platform_device * pdev)2095 static int ufs_mtk_probe(struct platform_device *pdev)
2096 {
2097 	int err;
2098 	struct device *dev = &pdev->dev;
2099 	struct device_node *reset_node;
2100 	struct platform_device *reset_pdev;
2101 	struct device_link *link;
2102 
2103 	reset_node = of_find_compatible_node(NULL, NULL,
2104 					     "ti,syscon-reset");
2105 	if (!reset_node) {
2106 		dev_notice(dev, "find ti,syscon-reset fail\n");
2107 		goto skip_reset;
2108 	}
2109 	reset_pdev = of_find_device_by_node(reset_node);
2110 	if (!reset_pdev) {
2111 		dev_notice(dev, "find reset_pdev fail\n");
2112 		goto skip_reset;
2113 	}
2114 	link = device_link_add(dev, &reset_pdev->dev,
2115 		DL_FLAG_AUTOPROBE_CONSUMER);
2116 	put_device(&reset_pdev->dev);
2117 	if (!link) {
2118 		dev_notice(dev, "add reset device_link fail\n");
2119 		goto skip_reset;
2120 	}
2121 	/* supplier is not probed */
2122 	if (link->status == DL_STATE_DORMANT) {
2123 		err = -EPROBE_DEFER;
2124 		goto out;
2125 	}
2126 
2127 skip_reset:
2128 	/* perform generic probe */
2129 	err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
2130 
2131 out:
2132 	if (err)
2133 		dev_err(dev, "probe failed %d\n", err);
2134 
2135 	of_node_put(reset_node);
2136 	return err;
2137 }
2138 
2139 /**
2140  * ufs_mtk_remove - set driver_data of the device to NULL
2141  * @pdev: pointer to platform device handle
2142  *
2143  * Always return 0
2144  */
ufs_mtk_remove(struct platform_device * pdev)2145 static void ufs_mtk_remove(struct platform_device *pdev)
2146 {
2147 	ufshcd_pltfrm_remove(pdev);
2148 }
2149 
2150 #ifdef CONFIG_PM_SLEEP
ufs_mtk_system_suspend(struct device * dev)2151 static int ufs_mtk_system_suspend(struct device *dev)
2152 {
2153 	struct ufs_hba *hba = dev_get_drvdata(dev);
2154 	struct arm_smccc_res res;
2155 	int ret;
2156 
2157 	ret = ufshcd_system_suspend(dev);
2158 	if (ret)
2159 		return ret;
2160 
2161 	ufs_mtk_dev_vreg_set_lpm(hba, true);
2162 
2163 	if (ufs_mtk_is_rtff_mtcmos(hba))
2164 		ufs_mtk_mtcmos_ctrl(false, res);
2165 
2166 	return 0;
2167 }
2168 
ufs_mtk_system_resume(struct device * dev)2169 static int ufs_mtk_system_resume(struct device *dev)
2170 {
2171 	struct ufs_hba *hba = dev_get_drvdata(dev);
2172 	struct arm_smccc_res res;
2173 
2174 	ufs_mtk_dev_vreg_set_lpm(hba, false);
2175 
2176 	if (ufs_mtk_is_rtff_mtcmos(hba))
2177 		ufs_mtk_mtcmos_ctrl(true, res);
2178 
2179 	return ufshcd_system_resume(dev);
2180 }
2181 #endif
2182 
2183 #ifdef CONFIG_PM
ufs_mtk_runtime_suspend(struct device * dev)2184 static int ufs_mtk_runtime_suspend(struct device *dev)
2185 {
2186 	struct ufs_hba *hba = dev_get_drvdata(dev);
2187 	struct arm_smccc_res res;
2188 	int ret = 0;
2189 
2190 	ret = ufshcd_runtime_suspend(dev);
2191 	if (ret)
2192 		return ret;
2193 
2194 	ufs_mtk_dev_vreg_set_lpm(hba, true);
2195 
2196 	if (ufs_mtk_is_rtff_mtcmos(hba))
2197 		ufs_mtk_mtcmos_ctrl(false, res);
2198 
2199 	return 0;
2200 }
2201 
ufs_mtk_runtime_resume(struct device * dev)2202 static int ufs_mtk_runtime_resume(struct device *dev)
2203 {
2204 	struct ufs_hba *hba = dev_get_drvdata(dev);
2205 	struct arm_smccc_res res;
2206 
2207 	if (ufs_mtk_is_rtff_mtcmos(hba))
2208 		ufs_mtk_mtcmos_ctrl(true, res);
2209 
2210 	ufs_mtk_dev_vreg_set_lpm(hba, false);
2211 
2212 	return ufshcd_runtime_resume(dev);
2213 }
2214 #endif
2215 
2216 static const struct dev_pm_ops ufs_mtk_pm_ops = {
2217 	SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
2218 				ufs_mtk_system_resume)
2219 	SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
2220 			   ufs_mtk_runtime_resume, NULL)
2221 	.prepare	 = ufshcd_suspend_prepare,
2222 	.complete	 = ufshcd_resume_complete,
2223 };
2224 
2225 static struct platform_driver ufs_mtk_pltform = {
2226 	.probe      = ufs_mtk_probe,
2227 	.remove = ufs_mtk_remove,
2228 	.driver = {
2229 		.name   = "ufshcd-mtk",
2230 		.pm     = &ufs_mtk_pm_ops,
2231 		.of_match_table = ufs_mtk_of_match,
2232 	},
2233 };
2234 
2235 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
2236 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
2237 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
2238 MODULE_LICENSE("GPL v2");
2239 
2240 module_platform_driver(ufs_mtk_pltform);
2241