Lines Matching +full:clk +full:- +full:phase +full:-
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
8 #include <linux/dma-mapping.h>
71 int phase, bool sampler __maybe_unused);
84 struct sdmmc_idma *idma = host->dma_priv; in sdmmc_idma_validate_data()
85 struct device *dev = mmc_dev(host->mmc); in sdmmc_idma_validate_data()
93 idma->use_bounce_buffer = false; in sdmmc_idma_validate_data()
94 for_each_sg(data->sg, sg, data->sg_len - 1, i) { in sdmmc_idma_validate_data()
95 if (!IS_ALIGNED(sg->offset, sizeof(u32)) || in sdmmc_idma_validate_data()
96 !IS_ALIGNED(sg->length, in sdmmc_idma_validate_data()
97 host->variant->stm32_idmabsize_align)) { in sdmmc_idma_validate_data()
98 dev_dbg(mmc_dev(host->mmc), in sdmmc_idma_validate_data()
100 data->sg->offset, data->sg->length); in sdmmc_idma_validate_data()
105 if (!IS_ALIGNED(sg->offset, sizeof(u32))) { in sdmmc_idma_validate_data()
106 dev_dbg(mmc_dev(host->mmc), in sdmmc_idma_validate_data()
108 data->sg->offset, data->sg->length); in sdmmc_idma_validate_data()
115 if (!idma->bounce_buf) { in sdmmc_idma_validate_data()
116 idma->bounce_buf = dmam_alloc_coherent(dev, in sdmmc_idma_validate_data()
117 host->mmc->max_req_size, in sdmmc_idma_validate_data()
118 &idma->bounce_dma_addr, in sdmmc_idma_validate_data()
120 if (!idma->bounce_buf) { in sdmmc_idma_validate_data()
122 return -ENOMEM; in sdmmc_idma_validate_data()
126 idma->use_bounce_buffer = true; in sdmmc_idma_validate_data()
134 struct sdmmc_idma *idma = host->dma_priv; in _sdmmc_idma_prep_data()
136 if (idma->use_bounce_buffer) { in _sdmmc_idma_prep_data()
137 if (data->flags & MMC_DATA_WRITE) { in _sdmmc_idma_prep_data()
138 unsigned int xfer_bytes = data->blksz * data->blocks; in _sdmmc_idma_prep_data()
140 sg_copy_to_buffer(data->sg, data->sg_len, in _sdmmc_idma_prep_data()
141 idma->bounce_buf, xfer_bytes); in _sdmmc_idma_prep_data()
147 n_elem = dma_map_sg(mmc_dev(host->mmc), in _sdmmc_idma_prep_data()
148 data->sg, in _sdmmc_idma_prep_data()
149 data->sg_len, in _sdmmc_idma_prep_data()
153 dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); in _sdmmc_idma_prep_data()
154 return -EINVAL; in _sdmmc_idma_prep_data()
164 if (!next && data->host_cookie == host->next_cookie) in sdmmc_idma_prep_data()
173 struct sdmmc_idma *idma = host->dma_priv; in sdmmc_idma_unprep_data()
175 if (idma->use_bounce_buffer) { in sdmmc_idma_unprep_data()
176 if (data->flags & MMC_DATA_READ) { in sdmmc_idma_unprep_data()
177 unsigned int xfer_bytes = data->blksz * data->blocks; in sdmmc_idma_unprep_data()
179 sg_copy_from_buffer(data->sg, data->sg_len, in sdmmc_idma_unprep_data()
180 idma->bounce_buf, xfer_bytes); in sdmmc_idma_unprep_data()
183 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, in sdmmc_idma_unprep_data()
191 struct device *dev = mmc_dev(host->mmc); in sdmmc_idma_setup()
195 return -ENOMEM; in sdmmc_idma_setup()
197 host->dma_priv = idma; in sdmmc_idma_setup()
199 if (host->variant->dma_lli) { in sdmmc_idma_setup()
200 idma->sg_cpu = dmam_alloc_coherent(dev, SDMMC_LLI_BUF_LEN, in sdmmc_idma_setup()
201 &idma->sg_dma, GFP_KERNEL); in sdmmc_idma_setup()
202 if (!idma->sg_cpu) { in sdmmc_idma_setup()
204 return -ENOMEM; in sdmmc_idma_setup()
206 host->mmc->max_segs = SDMMC_LLI_BUF_LEN / in sdmmc_idma_setup()
208 host->mmc->max_seg_size = host->variant->stm32_idmabsize_mask; in sdmmc_idma_setup()
210 host->mmc->max_req_size = SZ_1M; in sdmmc_idma_setup()
212 host->mmc->max_segs = 1; in sdmmc_idma_setup()
213 host->mmc->max_seg_size = host->mmc->max_req_size; in sdmmc_idma_setup()
216 return dma_set_max_seg_size(dev, host->mmc->max_seg_size); in sdmmc_idma_setup()
222 struct sdmmc_idma *idma = host->dma_priv; in sdmmc_idma_start()
223 struct sdmmc_lli_desc *desc = (struct sdmmc_lli_desc *)idma->sg_cpu; in sdmmc_idma_start()
224 struct mmc_data *data = host->data; in sdmmc_idma_start()
228 host->dma_in_progress = true; in sdmmc_idma_start()
230 if (!host->variant->dma_lli || data->sg_len == 1 || in sdmmc_idma_start()
231 idma->use_bounce_buffer) { in sdmmc_idma_start()
234 if (idma->use_bounce_buffer) in sdmmc_idma_start()
235 dma_addr = idma->bounce_dma_addr; in sdmmc_idma_start()
237 dma_addr = sg_dma_address(data->sg); in sdmmc_idma_start()
240 host->base + MMCI_STM32_IDMABASE0R); in sdmmc_idma_start()
242 host->base + MMCI_STM32_IDMACTRLR); in sdmmc_idma_start()
246 for_each_sg(data->sg, sg, data->sg_len, i) { in sdmmc_idma_start()
255 desc[data->sg_len - 1].idmalar &= ~MMCI_STM32_ULA; in sdmmc_idma_start()
258 writel_relaxed(idma->sg_dma, host->base + MMCI_STM32_IDMABAR); in sdmmc_idma_start()
259 writel_relaxed(desc[0].idmalar, host->base + MMCI_STM32_IDMALAR); in sdmmc_idma_start()
260 writel_relaxed(desc[0].idmabase, host->base + MMCI_STM32_IDMABASE0R); in sdmmc_idma_start()
261 writel_relaxed(desc[0].idmasize, host->base + MMCI_STM32_IDMABSIZER); in sdmmc_idma_start()
263 host->base + MMCI_STM32_IDMACTRLR); in sdmmc_idma_start()
270 struct mmc_data *data = host->data; in sdmmc_idma_error()
271 struct sdmmc_idma *idma = host->dma_priv; in sdmmc_idma_error()
276 writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); in sdmmc_idma_error()
277 host->dma_in_progress = false; in sdmmc_idma_error()
278 data->host_cookie = 0; in sdmmc_idma_error()
280 if (!idma->use_bounce_buffer) in sdmmc_idma_error()
281 dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, in sdmmc_idma_error()
290 writel_relaxed(0, host->base + MMCI_STM32_IDMACTRLR); in sdmmc_idma_finalize()
291 host->dma_in_progress = false; in sdmmc_idma_finalize()
293 if (!data->host_cookie) in sdmmc_idma_finalize()
299 unsigned int clk = 0, ddr = 0; in mmci_sdmmc_set_clkreg() local
301 if (host->mmc->ios.timing == MMC_TIMING_MMC_DDR52 || in mmci_sdmmc_set_clkreg()
302 host->mmc->ios.timing == MMC_TIMING_UHS_DDR50) in mmci_sdmmc_set_clkreg()
311 if (desired >= host->mclk && !ddr) { in mmci_sdmmc_set_clkreg()
312 host->cclk = host->mclk; in mmci_sdmmc_set_clkreg()
314 clk = DIV_ROUND_UP(host->mclk, 2 * desired); in mmci_sdmmc_set_clkreg()
315 if (clk > MCI_STM32_CLK_CLKDIV_MSK) in mmci_sdmmc_set_clkreg()
316 clk = MCI_STM32_CLK_CLKDIV_MSK; in mmci_sdmmc_set_clkreg()
317 host->cclk = host->mclk / (2 * clk); in mmci_sdmmc_set_clkreg()
321 * while power-on phase the clock can't be define to 0, in mmci_sdmmc_set_clkreg()
322 * Only power-off and power-cyc deactivate the clock. in mmci_sdmmc_set_clkreg()
325 clk = MCI_STM32_CLK_CLKDIV_MSK; in mmci_sdmmc_set_clkreg()
326 host->cclk = host->mclk / (2 * clk); in mmci_sdmmc_set_clkreg()
330 if (host->mmc->ios.power_mode == MMC_POWER_ON) in mmci_sdmmc_set_clkreg()
331 host->mmc->actual_clock = host->cclk; in mmci_sdmmc_set_clkreg()
333 host->mmc->actual_clock = 0; in mmci_sdmmc_set_clkreg()
335 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) in mmci_sdmmc_set_clkreg()
336 clk |= MCI_STM32_CLK_WIDEBUS_4; in mmci_sdmmc_set_clkreg()
337 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) in mmci_sdmmc_set_clkreg()
338 clk |= MCI_STM32_CLK_WIDEBUS_8; in mmci_sdmmc_set_clkreg()
340 clk |= MCI_STM32_CLK_HWFCEN; in mmci_sdmmc_set_clkreg()
341 clk |= host->clk_reg_add; in mmci_sdmmc_set_clkreg()
342 clk |= ddr; in mmci_sdmmc_set_clkreg()
344 if (host->mmc->ios.timing >= MMC_TIMING_UHS_SDR50) in mmci_sdmmc_set_clkreg()
345 clk |= MCI_STM32_CLK_BUSSPEED; in mmci_sdmmc_set_clkreg()
347 mmci_write_clkreg(host, clk); in mmci_sdmmc_set_clkreg()
352 if (!dlyb || !dlyb->base) in sdmmc_dlyb_mp15_input_ck()
356 writel_relaxed(0, dlyb->base + DLYB_CR); in sdmmc_dlyb_mp15_input_ck()
361 struct mmc_ios ios = host->mmc->ios; in mmci_sdmmc_set_pwrreg()
362 struct sdmmc_dlyb *dlyb = host->variant_priv; in mmci_sdmmc_set_pwrreg()
365 pwr = host->pwr_reg_add; in mmci_sdmmc_set_pwrreg()
367 if (dlyb && dlyb->ops->set_input_ck) in mmci_sdmmc_set_pwrreg()
368 dlyb->ops->set_input_ck(dlyb); in mmci_sdmmc_set_pwrreg()
371 /* Only a reset could power-off sdmmc */ in mmci_sdmmc_set_pwrreg()
372 reset_control_assert(host->rst); in mmci_sdmmc_set_pwrreg()
374 reset_control_deassert(host->rst); in mmci_sdmmc_set_pwrreg()
377 * Set the SDMMC in Power-cycle state. in mmci_sdmmc_set_pwrreg()
385 * After power-off (reset): the irq mask defined in probe in mmci_sdmmc_set_pwrreg()
389 writel(MCI_IRQENABLE | host->variant->start_err, in mmci_sdmmc_set_pwrreg()
390 host->base + MMCIMASK0); in mmci_sdmmc_set_pwrreg()
393 pwr |= host->pwr_reg & (MCI_STM32_VSWITCHEN | in mmci_sdmmc_set_pwrreg()
397 * After a power-cycle state, we must set the SDMMC in in mmci_sdmmc_set_pwrreg()
398 * Power-off. The SDMMC_D[7:0], SDMMC_CMD and SDMMC_CK are in mmci_sdmmc_set_pwrreg()
399 * driven high. Then we can set the SDMMC to Power-on state in mmci_sdmmc_set_pwrreg()
413 if (host->hw_revision >= 3) { in sdmmc_get_dctrl_cfg()
416 if (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104 || in sdmmc_get_dctrl_cfg()
417 host->mmc->ios.timing == MMC_TIMING_MMC_HS200) { in sdmmc_get_dctrl_cfg()
418 thr = ffs(min_t(unsigned int, host->data->blksz, in sdmmc_get_dctrl_cfg()
419 host->variant->fifosize)); in sdmmc_get_dctrl_cfg()
423 writel_relaxed(thr, host->base + MMCI_STM32_FIFOTHRR); in sdmmc_get_dctrl_cfg()
426 if (host->mmc->card && mmc_card_sdio(host->mmc->card) && in sdmmc_get_dctrl_cfg()
427 host->data->blocks == 1) in sdmmc_get_dctrl_cfg()
429 else if (host->data->stop && !host->mrq->sbc) in sdmmc_get_dctrl_cfg()
440 void __iomem *base = host->base; in sdmmc_busy_complete()
454 * if busy_d0 is in-progress we must activate busyd0end interrupt in sdmmc_busy_complete()
458 if (!host->busy_status) { in sdmmc_busy_complete()
459 writel_relaxed(mask | host->variant->busy_detect_mask, in sdmmc_busy_complete()
461 host->busy_status = status & in sdmmc_busy_complete()
468 if (host->busy_status) { in sdmmc_busy_complete()
469 writel_relaxed(mask & ~host->variant->busy_detect_mask, in sdmmc_busy_complete()
471 host->busy_status = 0; in sdmmc_busy_complete()
474 writel_relaxed(host->variant->busy_detect_mask, base + MMCICLEAR); in sdmmc_busy_complete()
481 writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); in sdmmc_dlyb_mp15_enable()
487 int unit, int phase, bool sampler) in sdmmc_dlyb_mp15_set_cfg() argument
491 writel_relaxed(DLYB_CR_SEN | DLYB_CR_DEN, dlyb->base + DLYB_CR); in sdmmc_dlyb_mp15_set_cfg()
494 FIELD_PREP(DLYB_CFGR_SEL_MASK, phase); in sdmmc_dlyb_mp15_set_cfg()
495 writel_relaxed(cfgr, dlyb->base + DLYB_CFGR); in sdmmc_dlyb_mp15_set_cfg()
498 writel_relaxed(DLYB_CR_DEN, dlyb->base + DLYB_CR); in sdmmc_dlyb_mp15_set_cfg()
505 struct sdmmc_dlyb *dlyb = host->variant_priv; in sdmmc_dlyb_mp15_prepare()
510 dlyb->ops->set_cfg(dlyb, i, DLYB_CFGR_SEL_MAX, true); in sdmmc_dlyb_mp15_prepare()
512 ret = readl_relaxed_poll_timeout(dlyb->base + DLYB_CFGR, cfgr, in sdmmc_dlyb_mp15_prepare()
516 dev_warn(mmc_dev(host->mmc), in sdmmc_dlyb_mp15_prepare()
528 return -EINVAL; in sdmmc_dlyb_mp15_prepare()
530 dlyb->unit = i; in sdmmc_dlyb_mp15_prepare()
531 dlyb->max = __fls(lng); in sdmmc_dlyb_mp15_prepare()
540 cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR); in sdmmc_dlyb_mp25_enable()
543 writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR); in sdmmc_dlyb_mp25_enable()
545 return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR, in sdmmc_dlyb_mp25_enable()
551 int unit __maybe_unused, int phase, in sdmmc_dlyb_mp25_set_cfg() argument
556 cr = readl_relaxed(dlyb->base + SYSCFG_DLYBSD_CR); in sdmmc_dlyb_mp25_set_cfg()
558 cr |= FIELD_PREP(DLYBSD_CR_RXTAPSEL_MASK, phase); in sdmmc_dlyb_mp25_set_cfg()
560 writel_relaxed(cr, dlyb->base + SYSCFG_DLYBSD_CR); in sdmmc_dlyb_mp25_set_cfg()
562 return readl_relaxed_poll_timeout(dlyb->base + SYSCFG_DLYBSD_SR, in sdmmc_dlyb_mp25_set_cfg()
569 struct sdmmc_dlyb *dlyb = host->variant_priv; in sdmmc_dlyb_mp25_prepare()
571 dlyb->max = DLYBSD_TAPSEL_NB; in sdmmc_dlyb_mp25_prepare()
578 struct sdmmc_dlyb *dlyb = host->variant_priv; in sdmmc_dlyb_phase_tuning()
580 int phase, ret; in sdmmc_dlyb_phase_tuning() local
582 for (phase = 0; phase <= dlyb->max; phase++) { in sdmmc_dlyb_phase_tuning()
583 ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false); in sdmmc_dlyb_phase_tuning()
585 dev_err(mmc_dev(host->mmc), "tuning config failed\n"); in sdmmc_dlyb_phase_tuning()
589 if (mmc_send_tuning(host->mmc, opcode, NULL)) { in sdmmc_dlyb_phase_tuning()
595 end_of_len = phase; in sdmmc_dlyb_phase_tuning()
601 dev_err(mmc_dev(host->mmc), "no tuning point found\n"); in sdmmc_dlyb_phase_tuning()
602 return -EINVAL; in sdmmc_dlyb_phase_tuning()
605 if (dlyb->ops->set_input_ck) in sdmmc_dlyb_phase_tuning()
606 dlyb->ops->set_input_ck(dlyb); in sdmmc_dlyb_phase_tuning()
608 phase = end_of_len - max_len / 2; in sdmmc_dlyb_phase_tuning()
609 ret = dlyb->ops->set_cfg(dlyb, dlyb->unit, phase, false); in sdmmc_dlyb_phase_tuning()
611 dev_err(mmc_dev(host->mmc), "tuning reconfig failed\n"); in sdmmc_dlyb_phase_tuning()
615 dev_dbg(mmc_dev(host->mmc), "unit:%d max_dly:%d phase:%d\n", in sdmmc_dlyb_phase_tuning()
616 dlyb->unit, dlyb->max, phase); in sdmmc_dlyb_phase_tuning()
624 struct sdmmc_dlyb *dlyb = host->variant_priv; in sdmmc_execute_tuning()
625 u32 clk; in sdmmc_execute_tuning() local
628 if ((host->mmc->ios.timing != MMC_TIMING_UHS_SDR104 && in sdmmc_execute_tuning()
629 host->mmc->ios.timing != MMC_TIMING_MMC_HS200) || in sdmmc_execute_tuning()
630 host->mmc->actual_clock <= 50000000) in sdmmc_execute_tuning()
633 if (!dlyb || !dlyb->base) in sdmmc_execute_tuning()
634 return -EINVAL; in sdmmc_execute_tuning()
636 ret = dlyb->ops->dlyb_enable(dlyb); in sdmmc_execute_tuning()
644 clk = host->clk_reg; in sdmmc_execute_tuning()
645 clk &= ~MCI_STM32_CLK_SEL_MSK; in sdmmc_execute_tuning()
646 clk |= MCI_STM32_CLK_SELFBCK; in sdmmc_execute_tuning()
647 mmci_write_clkreg(host, clk); in sdmmc_execute_tuning()
649 ret = dlyb->ops->tuning_prepare(host); in sdmmc_execute_tuning()
659 writel_relaxed(MCI_STM32_VSWENDC, host->base + MMCICLEAR); in sdmmc_pre_sig_volt_vswitch()
661 mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCHEN); in sdmmc_pre_sig_volt_vswitch()
671 spin_lock_irqsave(&host->lock, flags); in sdmmc_post_sig_volt_switch()
672 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180 && in sdmmc_post_sig_volt_switch()
673 host->pwr_reg & MCI_STM32_VSWITCHEN) { in sdmmc_post_sig_volt_switch()
674 mmci_write_pwrreg(host, host->pwr_reg | MCI_STM32_VSWITCH); in sdmmc_post_sig_volt_switch()
675 spin_unlock_irqrestore(&host->lock, flags); in sdmmc_post_sig_volt_switch()
678 ret = readl_relaxed_poll_timeout(host->base + MMCISTATUS, in sdmmc_post_sig_volt_switch()
684 host->base + MMCICLEAR); in sdmmc_post_sig_volt_switch()
685 spin_lock_irqsave(&host->lock, flags); in sdmmc_post_sig_volt_switch()
686 mmci_write_pwrreg(host, host->pwr_reg & in sdmmc_post_sig_volt_switch()
689 spin_unlock_irqrestore(&host->lock, flags); in sdmmc_post_sig_volt_switch()
725 struct device_node *np = host->mmc->parent->of_node; in sdmmc_variant_init()
729 host->ops = &sdmmc_variant_ops; in sdmmc_variant_init()
730 host->pwr_reg = readl_relaxed(host->base + MMCIPOWER); in sdmmc_variant_init()
732 base_dlyb = devm_of_iomap(mmc_dev(host->mmc), np, 1, NULL); in sdmmc_variant_init()
736 dlyb = devm_kzalloc(mmc_dev(host->mmc), sizeof(*dlyb), GFP_KERNEL); in sdmmc_variant_init()
740 dlyb->base = base_dlyb; in sdmmc_variant_init()
741 if (of_device_is_compatible(np, "st,stm32mp25-sdmmc2")) in sdmmc_variant_init()
742 dlyb->ops = &dlyb_tuning_mp25_ops; in sdmmc_variant_init()
744 dlyb->ops = &dlyb_tuning_mp15_ops; in sdmmc_variant_init()
746 host->variant_priv = dlyb; in sdmmc_variant_init()
747 host->mmc_ops->execute_tuning = sdmmc_execute_tuning; in sdmmc_variant_init()