Lines Matching defs:hba

23 static int ufs_rockchip_hce_enable_notify(struct ufs_hba *hba,
29 err = ufshcd_dme_reset(hba);
33 err = ufshcd_dme_enable(hba);
37 return ufshcd_vops_phy_initialization(hba);
43 static void ufs_rockchip_set_pm_lvl(struct ufs_hba *hba)
45 hba->rpm_lvl = UFS_PM_LVL_5;
46 hba->spm_lvl = UFS_PM_LVL_5;
49 static int ufs_rockchip_rk3576_phy_init(struct ufs_hba *hba)
51 struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
53 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(PA_LOCAL_TX_LCC_ENABLE, 0x0), 0x0);
55 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_ENABLE);
58 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, SEL_TX_LANE0 + i), 0x06);
59 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, SEL_TX_LANE0 + i), 0x02);
60 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_VALUE, SEL_TX_LANE0 + i), 0x44);
61 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, SEL_TX_LANE0 + i), 0xe6);
62 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, SEL_TX_LANE0 + i), 0x07);
63 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_TASE_VALUE, SEL_TX_LANE0 + i), 0x93);
64 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_BASE_NVALUE, SEL_TX_LANE0 + i), 0xc9);
65 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_POWER_SAVING_CTRL, SEL_TX_LANE0 + i), 0x00);
67 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, SEL_RX_LANE0 + i), 0x06);
68 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, SEL_RX_LANE0 + i), 0x00);
69 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE, SEL_RX_LANE0 + i), 0x58);
70 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE1, SEL_RX_LANE0 + i), 0x8c);
71 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_PVALUE2, SEL_RX_LANE0 + i), 0x02);
72 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_OPTION, SEL_RX_LANE0 + i), 0xf6);
73 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_POWER_SAVING_CTRL, SEL_RX_LANE0 + i), 0x69);
77 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MPHY_CFG, 0x0), MPHY_CFG_DISABLE);
118 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_TX_ENDIAN, 0), 0x0);
119 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(MIB_T_DBG_CPORT_RX_ENDIAN, 0), 0x0);
120 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID, 0), 0x0);
121 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(N_DEVICEID_VALID, 0), 0x1);
122 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_PEERDEVICEID, 0), 0x1);
123 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(T_CONNECTIONSTATE, 0), 0x1);
128 static int ufs_rockchip_common_init(struct ufs_hba *hba)
130 struct device *dev = hba->dev;
177 host->hba = hba;
179 ufshcd_set_variant(hba, host);
184 static int ufs_rockchip_rk3576_init(struct ufs_hba *hba)
186 struct device *dev = hba->dev;
189 hba->quirks = UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING;
192 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
194 hba->caps |= UFSHCD_CAP_DEEPSLEEP;
196 hba->caps |= UFSHCD_CAP_CLK_SCALING;
198 hba->caps |= UFSHCD_CAP_WB_EN;
201 ufs_rockchip_set_pm_lvl(hba);
203 ret = ufs_rockchip_common_init(hba);
210 static int ufs_rockchip_device_reset(struct ufs_hba *hba)
212 struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
262 struct ufs_hba *hba = dev_get_drvdata(dev);
263 struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
268 dev_pm_genpd_rpm_always_on(dev, hba->rpm_lvl < UFS_PM_LVL_5);
275 struct ufs_hba *hba = dev_get_drvdata(dev);
276 struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
281 dev_err(hba->dev, "failed to enable ref_out clock %d\n", err);
296 struct ufs_hba *hba = dev_get_drvdata(dev);
297 struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
305 if (hba->spm_lvl < UFS_PM_LVL_5)
310 dev_err(hba->dev, "UFSHCD system suspend failed %d\n", err);
321 struct ufs_hba *hba = dev_get_drvdata(dev);
322 struct ufs_rockchip_host *host = ufshcd_get_variant(hba);
327 dev_err(hba->dev, "failed to enable ref_out clock %d\n", err);