Lines Matching +full:s900 +full:- +full:dma
1 // SPDX-License-Identifier: GPL-2.0+
3 // Actions Semi Owl SoCs DMA driver
6 // Author: David Liu <liuwei@actions-semi.com>
15 #include <linux/dma-mapping.h>
26 #include "virt-dma.h"
30 /* Global DMA Controller Registers */
121 ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift))
127 * enum owl_dmadesc_offsets - Describe DMA descriptor, hardware link
128 * list for dma transfer
159 * struct owl_dma_lli - Link list for dma transfer
171 * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
172 * @vd: virtual DMA descriptor
183 * struct owl_dma_pchan - Holder for the physical channels
185 * @base: virtual memory base for the dma channel
195 * struct owl_dma_pchan - Wrapper for DMA ENGINE channel
200 * @drq: physical DMA request ID for this channel
211 * struct owl_dma - Holder for the Owl DMA controller
212 * @dma: dma engine for this instance
213 * @base: virtual memory base for the DMA controller
214 * @clk: clock for the DMA controller
215 * @lock: a lock to use when change DMA controller global register
217 * @irq: interrupt ID for the DMA controller
225 struct dma_device dma; member
245 regval = readl(pchan->base + reg); in pchan_update()
252 writel(val, pchan->base + reg); in pchan_update()
257 writel(data, pchan->base + reg); in pchan_writel()
262 return readl(pchan->base + reg); in pchan_readl()
269 regval = readl(od->base + reg); in dma_update()
276 writel(val, od->base + reg); in dma_update()
281 writel(data, od->base + reg); in dma_writel()
286 return readl(od->base + reg); in dma_readl()
291 return container_of(dd, struct owl_dma, dma); in to_owl_dma()
296 return &chan->dev->device; in chan2dev()
338 return lli->hw[OWL_DMADESC_FLEN] & GENMASK(19, 0); in llc_hw_flen()
344 list_del(&lli->node); in owl_dma_free_lli()
345 dma_pool_free(od->lli_pool, lli, lli->phys); in owl_dma_free_lli()
353 lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); in owl_dma_alloc_lli()
357 INIT_LIST_HEAD(&lli->node); in owl_dma_alloc_lli()
358 lli->phys = phys; in owl_dma_alloc_lli()
369 list_add_tail(&next->node, &txd->lli_list); in owl_dma_add_lli()
372 prev->hw[OWL_DMADESC_NEXT_LLI] = next->phys; in owl_dma_add_lli()
373 prev->hw[OWL_DMADESC_CTRLA] |= in owl_dma_add_lli()
387 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); in owl_dma_cfg_lli()
400 mode |= OWL_DMA_MODE_TS(vchan->drq) in owl_dma_cfg_lli()
408 if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) in owl_dma_cfg_lli()
413 mode |= OWL_DMA_MODE_TS(vchan->drq) in owl_dma_cfg_lli()
421 if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE) in owl_dma_cfg_lli()
426 return -EINVAL; in owl_dma_cfg_lli()
429 lli->hw[OWL_DMADESC_CTRLA] = llc_hw_ctrla(mode, in owl_dma_cfg_lli()
438 lli->hw[OWL_DMADESC_NEXT_LLI] = 0; /* One link list by default */ in owl_dma_cfg_lli()
439 lli->hw[OWL_DMADESC_SADDR] = src; in owl_dma_cfg_lli()
440 lli->hw[OWL_DMADESC_DADDR] = dst; in owl_dma_cfg_lli()
441 lli->hw[OWL_DMADESC_SRC_STRIDE] = 0; in owl_dma_cfg_lli()
442 lli->hw[OWL_DMADESC_DST_STRIDE] = 0; in owl_dma_cfg_lli()
444 if (od->devid == S700_DMA) { in owl_dma_cfg_lli()
446 lli->hw[OWL_DMADESC_FLEN] = len; in owl_dma_cfg_lli()
452 lli->hw[OWL_DMADESC_CTRLB] = FCNT_VAL | ctrlb; in owl_dma_cfg_lli()
455 * On S900, word starts from offset 0xC is shared between in owl_dma_cfg_lli()
460 lli->hw[OWL_DMADESC_FLEN] = len | FCNT_VAL << 20; in owl_dma_cfg_lli()
461 lli->hw[OWL_DMADESC_CTRLB] = ctrlb; in owl_dma_cfg_lli()
474 for (i = 0; i < od->nr_pchans; i++) { in owl_dma_get_pchan()
475 pchan = &od->pchans[i]; in owl_dma_get_pchan()
477 spin_lock_irqsave(&od->lock, flags); in owl_dma_get_pchan()
478 if (!pchan->vchan) { in owl_dma_get_pchan()
479 pchan->vchan = vchan; in owl_dma_get_pchan()
480 spin_unlock_irqrestore(&od->lock, flags); in owl_dma_get_pchan()
484 spin_unlock_irqrestore(&od->lock, flags); in owl_dma_get_pchan()
496 return !(val & (1 << pchan->id)); in owl_dma_pchan_busy()
508 spin_lock_irqsave(&od->lock, flags); in owl_dma_terminate_pchan()
509 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); in owl_dma_terminate_pchan()
512 if (irq_pd & (1 << pchan->id)) { in owl_dma_terminate_pchan()
513 dev_warn(od->dma.dev, in owl_dma_terminate_pchan()
515 pchan->id); in owl_dma_terminate_pchan()
516 dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); in owl_dma_terminate_pchan()
519 pchan->vchan = NULL; in owl_dma_terminate_pchan()
521 spin_unlock_irqrestore(&od->lock, flags); in owl_dma_terminate_pchan()
536 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); in owl_dma_start_next_txd()
537 struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); in owl_dma_start_next_txd()
538 struct owl_dma_pchan *pchan = vchan->pchan; in owl_dma_start_next_txd()
539 struct owl_dma_txd *txd = to_owl_txd(&vd->tx); in owl_dma_start_next_txd()
544 list_del(&vd->node); in owl_dma_start_next_txd()
546 vchan->txd = txd; in owl_dma_start_next_txd()
552 lli = list_first_entry(&txd->lli_list, in owl_dma_start_next_txd()
555 if (txd->cyclic) in owl_dma_start_next_txd()
563 pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); in owl_dma_start_next_txd()
569 spin_lock_irqsave(&od->lock, flags); in owl_dma_start_next_txd()
571 dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); in owl_dma_start_next_txd()
573 spin_unlock_irqrestore(&od->lock, flags); in owl_dma_start_next_txd()
575 dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); in owl_dma_start_next_txd()
577 /* Start DMA transfer for this pchan */ in owl_dma_start_next_txd()
586 owl_dma_terminate_pchan(od, vchan->pchan); in owl_dma_phy_free()
588 vchan->pchan = NULL; in owl_dma_phy_free()
600 spin_lock(&od->lock); in owl_dma_interrupt()
605 for_each_set_bit(i, &pending, od->nr_pchans) { in owl_dma_interrupt()
606 pchan = &od->pchans[i]; in owl_dma_interrupt()
614 for (i = 0; i < od->nr_pchans; i++) { in owl_dma_interrupt()
615 pchan = &od->pchans[i]; in owl_dma_interrupt()
625 dev_dbg(od->dma.dev, in owl_dma_interrupt()
637 spin_unlock(&od->lock); in owl_dma_interrupt()
639 for_each_set_bit(i, &pending, od->nr_pchans) { in owl_dma_interrupt()
642 pchan = &od->pchans[i]; in owl_dma_interrupt()
644 vchan = pchan->vchan; in owl_dma_interrupt()
646 dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", in owl_dma_interrupt()
647 pchan->id); in owl_dma_interrupt()
651 spin_lock(&vchan->vc.lock); in owl_dma_interrupt()
653 txd = vchan->txd; in owl_dma_interrupt()
655 vchan->txd = NULL; in owl_dma_interrupt()
657 vchan_cookie_complete(&txd->vd); in owl_dma_interrupt()
663 if (vchan_next_desc(&vchan->vc)) in owl_dma_interrupt()
669 spin_unlock(&vchan->vc.lock); in owl_dma_interrupt()
682 list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) in owl_dma_free_txd()
690 struct owl_dma *od = to_owl_dma(vd->tx.chan->device); in owl_dma_desc_free()
691 struct owl_dma_txd *txd = to_owl_txd(&vd->tx); in owl_dma_desc_free()
698 struct owl_dma *od = to_owl_dma(chan->device); in owl_dma_terminate_all()
703 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_terminate_all()
705 if (vchan->pchan) in owl_dma_terminate_all()
708 if (vchan->txd) { in owl_dma_terminate_all()
709 owl_dma_desc_free(&vchan->txd->vd); in owl_dma_terminate_all()
710 vchan->txd = NULL; in owl_dma_terminate_all()
713 vchan_get_all_descriptors(&vchan->vc, &head); in owl_dma_terminate_all()
715 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_terminate_all()
717 vchan_dma_desc_free_list(&vchan->vc, &head); in owl_dma_terminate_all()
728 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || in owl_dma_config()
729 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) in owl_dma_config()
730 return -EINVAL; in owl_dma_config()
732 memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config)); in owl_dma_config()
742 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_pause()
744 owl_dma_pause_pchan(vchan->pchan); in owl_dma_pause()
746 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_pause()
756 if (!vchan->pchan && !vchan->txd) in owl_dma_resume()
759 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); in owl_dma_resume()
761 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_resume()
763 owl_dma_resume_pchan(vchan->pchan); in owl_dma_resume()
765 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_resume()
778 pchan = vchan->pchan; in owl_dma_getbytes_chan()
779 txd = vchan->txd; in owl_dma_getbytes_chan()
790 list_for_each_entry(lli, &txd->lli_list, node) { in owl_dma_getbytes_chan()
792 if (lli->phys == next_lli_phy) { in owl_dma_getbytes_chan()
793 list_for_each_entry(lli, &txd->lli_list, node) in owl_dma_getbytes_chan()
819 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_tx_status()
821 vd = vchan_find_desc(&vchan->vc, cookie); in owl_dma_tx_status()
823 txd = to_owl_txd(&vd->tx); in owl_dma_tx_status()
824 list_for_each_entry(lli, &txd->lli_list, node) in owl_dma_tx_status()
830 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_tx_status()
839 struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); in owl_dma_phy_alloc_and_start()
846 dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); in owl_dma_phy_alloc_and_start()
848 vchan->pchan = pchan; in owl_dma_phy_alloc_and_start()
857 spin_lock_irqsave(&vchan->vc.lock, flags); in owl_dma_issue_pending()
858 if (vchan_issue_pending(&vchan->vc)) { in owl_dma_issue_pending()
859 if (!vchan->pchan) in owl_dma_issue_pending()
862 spin_unlock_irqrestore(&vchan->vc.lock, flags); in owl_dma_issue_pending()
870 struct owl_dma *od = to_owl_dma(chan->device); in owl_dma_prep_memcpy()
884 INIT_LIST_HEAD(&txd->lli_list); in owl_dma_prep_memcpy()
894 bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); in owl_dma_prep_memcpy()
898 &vchan->cfg, txd->cyclic); in owl_dma_prep_memcpy()
907 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); in owl_dma_prep_memcpy()
921 struct owl_dma *od = to_owl_dma(chan->device); in owl_dma_prep_slave_sg()
923 struct dma_slave_config *sconfig = &vchan->cfg; in owl_dma_prep_slave_sg()
935 INIT_LIST_HEAD(&txd->lli_list); in owl_dma_prep_slave_sg()
942 dev_err(od->dma.dev, in owl_dma_prep_slave_sg()
955 dst = sconfig->dst_addr; in owl_dma_prep_slave_sg()
957 src = sconfig->src_addr; in owl_dma_prep_slave_sg()
962 txd->cyclic); in owl_dma_prep_slave_sg()
971 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); in owl_dma_prep_slave_sg()
986 struct owl_dma *od = to_owl_dma(chan->device); in owl_prep_dma_cyclic()
988 struct dma_slave_config *sconfig = &vchan->cfg; in owl_prep_dma_cyclic()
999 INIT_LIST_HEAD(&txd->lli_list); in owl_prep_dma_cyclic()
1000 txd->cyclic = true; in owl_prep_dma_cyclic()
1011 dst = sconfig->dst_addr; in owl_prep_dma_cyclic()
1013 src = sconfig->src_addr; in owl_prep_dma_cyclic()
1018 dir, sconfig, txd->cyclic); in owl_prep_dma_cyclic()
1033 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); in owl_prep_dma_cyclic()
1046 vchan_free_chan_resources(&vchan->vc); in owl_dma_free_chan_resources()
1055 next, &od->dma.channels, vc.chan.device_node) { in owl_dma_free()
1056 list_del(&vchan->vc.chan.device_node); in owl_dma_free()
1057 tasklet_kill(&vchan->vc.task); in owl_dma_free()
1064 struct owl_dma *od = ofdma->of_dma_data; in owl_dma_of_xlate()
1067 u8 drq = dma_spec->args[0]; in owl_dma_of_xlate()
1069 if (drq > od->nr_vchans) in owl_dma_of_xlate()
1072 chan = dma_get_any_slave_channel(&od->dma); in owl_dma_of_xlate()
1077 vchan->drq = drq; in owl_dma_of_xlate()
1083 { .compatible = "actions,s900-dma", .data = (void *)S900_DMA,},
1084 { .compatible = "actions,s700-dma", .data = (void *)S700_DMA,},
1091 struct device_node *np = pdev->dev.of_node; in owl_dma_probe()
1095 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); in owl_dma_probe()
1097 return -ENOMEM; in owl_dma_probe()
1099 od->base = devm_platform_ioremap_resource(pdev, 0); in owl_dma_probe()
1100 if (IS_ERR(od->base)) in owl_dma_probe()
1101 return PTR_ERR(od->base); in owl_dma_probe()
1103 ret = of_property_read_u32(np, "dma-channels", &nr_channels); in owl_dma_probe()
1105 dev_err(&pdev->dev, "can't get dma-channels\n"); in owl_dma_probe()
1109 ret = of_property_read_u32(np, "dma-requests", &nr_requests); in owl_dma_probe()
1111 dev_err(&pdev->dev, "can't get dma-requests\n"); in owl_dma_probe()
1115 dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", in owl_dma_probe()
1118 od->devid = (enum owl_dma_id)of_device_get_match_data(&pdev->dev); in owl_dma_probe()
1120 od->nr_pchans = nr_channels; in owl_dma_probe()
1121 od->nr_vchans = nr_requests; in owl_dma_probe()
1123 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); in owl_dma_probe()
1126 spin_lock_init(&od->lock); in owl_dma_probe()
1128 dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); in owl_dma_probe()
1129 dma_cap_set(DMA_SLAVE, od->dma.cap_mask); in owl_dma_probe()
1130 dma_cap_set(DMA_CYCLIC, od->dma.cap_mask); in owl_dma_probe()
1132 od->dma.dev = &pdev->dev; in owl_dma_probe()
1133 od->dma.device_free_chan_resources = owl_dma_free_chan_resources; in owl_dma_probe()
1134 od->dma.device_tx_status = owl_dma_tx_status; in owl_dma_probe()
1135 od->dma.device_issue_pending = owl_dma_issue_pending; in owl_dma_probe()
1136 od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; in owl_dma_probe()
1137 od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg; in owl_dma_probe()
1138 od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic; in owl_dma_probe()
1139 od->dma.device_config = owl_dma_config; in owl_dma_probe()
1140 od->dma.device_pause = owl_dma_pause; in owl_dma_probe()
1141 od->dma.device_resume = owl_dma_resume; in owl_dma_probe()
1142 od->dma.device_terminate_all = owl_dma_terminate_all; in owl_dma_probe()
1143 od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); in owl_dma_probe()
1144 od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); in owl_dma_probe()
1145 od->dma.directions = BIT(DMA_MEM_TO_MEM); in owl_dma_probe()
1146 od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in owl_dma_probe()
1148 INIT_LIST_HEAD(&od->dma.channels); in owl_dma_probe()
1150 od->clk = devm_clk_get(&pdev->dev, NULL); in owl_dma_probe()
1151 if (IS_ERR(od->clk)) { in owl_dma_probe()
1152 dev_err(&pdev->dev, "unable to get clock\n"); in owl_dma_probe()
1153 return PTR_ERR(od->clk); in owl_dma_probe()
1157 * Eventhough the DMA controller is capable of generating 4 in owl_dma_probe()
1158 * IRQ's for DMA priority feature, we only use 1 IRQ for in owl_dma_probe()
1161 od->irq = platform_get_irq(pdev, 0); in owl_dma_probe()
1162 ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, in owl_dma_probe()
1163 dev_name(&pdev->dev), od); in owl_dma_probe()
1165 dev_err(&pdev->dev, "unable to request IRQ\n"); in owl_dma_probe()
1170 od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, in owl_dma_probe()
1172 if (!od->pchans) in owl_dma_probe()
1173 return -ENOMEM; in owl_dma_probe()
1175 for (i = 0; i < od->nr_pchans; i++) { in owl_dma_probe()
1176 struct owl_dma_pchan *pchan = &od->pchans[i]; in owl_dma_probe()
1178 pchan->id = i; in owl_dma_probe()
1179 pchan->base = od->base + OWL_DMA_CHAN_BASE(i); in owl_dma_probe()
1183 od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, in owl_dma_probe()
1185 if (!od->vchans) in owl_dma_probe()
1186 return -ENOMEM; in owl_dma_probe()
1188 for (i = 0; i < od->nr_vchans; i++) { in owl_dma_probe()
1189 struct owl_dma_vchan *vchan = &od->vchans[i]; in owl_dma_probe()
1191 vchan->vc.desc_free = owl_dma_desc_free; in owl_dma_probe()
1192 vchan_init(&vchan->vc, &od->dma); in owl_dma_probe()
1196 od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, in owl_dma_probe()
1200 if (!od->lli_pool) { in owl_dma_probe()
1201 dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); in owl_dma_probe()
1202 return -ENOMEM; in owl_dma_probe()
1205 clk_prepare_enable(od->clk); in owl_dma_probe()
1207 ret = dma_async_device_register(&od->dma); in owl_dma_probe()
1209 dev_err(&pdev->dev, "failed to register DMA engine device\n"); in owl_dma_probe()
1213 /* Device-tree DMA controller registration */ in owl_dma_probe()
1214 ret = of_dma_controller_register(pdev->dev.of_node, in owl_dma_probe()
1217 dev_err(&pdev->dev, "of_dma_controller_register failed\n"); in owl_dma_probe()
1224 dma_async_device_unregister(&od->dma); in owl_dma_probe()
1226 clk_disable_unprepare(od->clk); in owl_dma_probe()
1227 dma_pool_destroy(od->lli_pool); in owl_dma_probe()
1236 of_dma_controller_free(pdev->dev.of_node); in owl_dma_remove()
1237 dma_async_device_unregister(&od->dma); in owl_dma_remove()
1243 devm_free_irq(od->dma.dev, od->irq, od); in owl_dma_remove()
1247 clk_disable_unprepare(od->clk); in owl_dma_remove()
1256 .name = "dma-owl",
1273 MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>");
1275 MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver");