Lines Matching full:pc

92  *                         channel (PC)
93 * @queue: Queue for the PDs issued to this PC
94 * @base: The mapped register I/O base of this PC
95 * @irq: The IRQ that this PC are using
96 * @refcnt: Track how many VCs are using this PC
97 * @tasklet: Tasklet for this PC
98 * @lock: Lock protect agaisting multiple VCs access PC
109 /* lock to protect PC */
117 * @pc: The pointer to the underlying PC
123 struct mtk_cqdma_pchan *pc; member
136 * @pc: The pointer to all the underlying PCs
145 struct mtk_cqdma_pchan **pc; member
168 static u32 mtk_dma_read(struct mtk_cqdma_pchan *pc, u32 reg) in mtk_dma_read() argument
170 return readl(pc->base + reg); in mtk_dma_read()
173 static void mtk_dma_write(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) in mtk_dma_write() argument
175 writel_relaxed(val, pc->base + reg); in mtk_dma_write()
178 static void mtk_dma_rmw(struct mtk_cqdma_pchan *pc, u32 reg, in mtk_dma_rmw() argument
183 val = mtk_dma_read(pc, reg); in mtk_dma_rmw()
186 mtk_dma_write(pc, reg, val); in mtk_dma_rmw()
189 static void mtk_dma_set(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) in mtk_dma_set() argument
191 mtk_dma_rmw(pc, reg, 0, val); in mtk_dma_set()
194 static void mtk_dma_clr(struct mtk_cqdma_pchan *pc, u32 reg, u32 val) in mtk_dma_clr() argument
196 mtk_dma_rmw(pc, reg, val, 0); in mtk_dma_clr()
204 static int mtk_cqdma_poll_engine_done(struct mtk_cqdma_pchan *pc, bool atomic) in mtk_cqdma_poll_engine_done() argument
209 return readl_poll_timeout(pc->base + MTK_CQDMA_EN, in mtk_cqdma_poll_engine_done()
215 return readl_poll_timeout_atomic(pc->base + MTK_CQDMA_EN, in mtk_cqdma_poll_engine_done()
222 static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc) in mtk_cqdma_hard_reset() argument
224 mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); in mtk_cqdma_hard_reset()
225 mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); in mtk_cqdma_hard_reset()
227 return mtk_cqdma_poll_engine_done(pc, true); in mtk_cqdma_hard_reset()
230 static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, in mtk_cqdma_start() argument
234 if (mtk_cqdma_poll_engine_done(pc, true) < 0) in mtk_cqdma_start()
238 mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_WARM_RST_BIT); in mtk_cqdma_start()
239 if (mtk_cqdma_poll_engine_done(pc, true) < 0) in mtk_cqdma_start()
243 mtk_dma_set(pc, MTK_CQDMA_SRC, cvd->src & MTK_CQDMA_ADDR_LIMIT); in mtk_cqdma_start()
245 mtk_dma_set(pc, MTK_CQDMA_SRC2, cvd->src >> MTK_CQDMA_ADDR2_SHFIT); in mtk_cqdma_start()
247 mtk_dma_set(pc, MTK_CQDMA_SRC2, 0); in mtk_cqdma_start()
251 mtk_dma_set(pc, MTK_CQDMA_DST, cvd->dest & MTK_CQDMA_ADDR_LIMIT); in mtk_cqdma_start()
253 mtk_dma_set(pc, MTK_CQDMA_DST2, cvd->dest >> MTK_CQDMA_ADDR2_SHFIT); in mtk_cqdma_start()
255 mtk_dma_set(pc, MTK_CQDMA_DST2, 0); in mtk_cqdma_start()
259 mtk_dma_set(pc, MTK_CQDMA_LEN1, cvd->len); in mtk_cqdma_start()
262 mtk_dma_set(pc, MTK_CQDMA_EN, MTK_CQDMA_EN_BIT); in mtk_cqdma_start()
268 struct mtk_cqdma_pchan *pc = cvc->pc; in mtk_cqdma_issue_vchan_pending() local
273 lockdep_assert_held(&pc->lock); in mtk_cqdma_issue_vchan_pending()
276 /* need to trigger dma engine if PC's queue is empty */ in mtk_cqdma_issue_vchan_pending()
277 if (list_empty(&pc->queue)) in mtk_cqdma_issue_vchan_pending()
282 /* add VD into PC's queue */ in mtk_cqdma_issue_vchan_pending()
283 list_add_tail(&cvd->node, &pc->queue); in mtk_cqdma_issue_vchan_pending()
287 mtk_cqdma_start(pc, cvd); in mtk_cqdma_issue_vchan_pending()
296 * meaning that there are VDs under processing by the PC
302 list_for_each_entry(cvd, &cvc->pc->queue, node) in mtk_cqdma_is_vchan_active()
310 * return the pointer of the CVD that is just consumed by the PC
313 *mtk_cqdma_consume_work_queue(struct mtk_cqdma_pchan *pc) in mtk_cqdma_consume_work_queue() argument
318 /* consume a CVD from PC's queue */ in mtk_cqdma_consume_work_queue()
319 cvd = list_first_entry_or_null(&pc->queue, in mtk_cqdma_consume_work_queue()
330 /* delete CVD from PC's queue */ in mtk_cqdma_consume_work_queue()
350 cvd = list_first_entry_or_null(&pc->queue, in mtk_cqdma_consume_work_queue()
353 mtk_cqdma_start(pc, cvd); in mtk_cqdma_consume_work_queue()
360 struct mtk_cqdma_pchan *pc = from_tasklet(pc, t, tasklet); in mtk_cqdma_tasklet_cb() local
364 spin_lock_irqsave(&pc->lock, flags); in mtk_cqdma_tasklet_cb()
366 cvd = mtk_cqdma_consume_work_queue(pc); in mtk_cqdma_tasklet_cb()
367 spin_unlock_irqrestore(&pc->lock, flags); in mtk_cqdma_tasklet_cb()
382 enable_irq(pc->irq); in mtk_cqdma_tasklet_cb()
392 /* clear interrupt flags for each PC */ in mtk_cqdma_irq()
394 spin_lock(&cqdma->pc[i]->lock); in mtk_cqdma_irq()
395 if (mtk_dma_read(cqdma->pc[i], in mtk_cqdma_irq()
398 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_FLAG, in mtk_cqdma_irq()
404 spin_unlock(&cqdma->pc[i]->lock); in mtk_cqdma_irq()
408 disable_irq_nosync(cqdma->pc[i]->irq); in mtk_cqdma_irq()
411 tasklet_schedule(&cqdma->pc[i]->tasklet); in mtk_cqdma_irq()
424 list_for_each_entry(vd, &cvc->pc->queue, node) in mtk_cqdma_find_active_desc()
451 spin_lock_irqsave(&cvc->pc->lock, flags); in mtk_cqdma_tx_status()
455 spin_unlock_irqrestore(&cvc->pc->lock, flags); in mtk_cqdma_tx_status()
473 /* acquire PC's lock before VS's lock for lock dependency in tasklet */ in mtk_cqdma_issue_pending()
474 spin_lock_irqsave(&cvc->pc->lock, pc_flags); in mtk_cqdma_issue_pending()
481 spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); in mtk_cqdma_issue_pending()
575 /* acquire PC's lock first due to lock dependency in dma ISR */ in mtk_cqdma_free_active_desc()
576 spin_lock_irqsave(&cvc->pc->lock, pc_flags); in mtk_cqdma_free_active_desc()
586 spin_unlock_irqrestore(&cvc->pc->lock, pc_flags); in mtk_cqdma_free_active_desc()
614 struct mtk_cqdma_pchan *pc = NULL; in mtk_cqdma_alloc_chan_resources() local
618 /* allocate PC with the minimum refcount */ in mtk_cqdma_alloc_chan_resources()
620 refcnt = refcount_read(&cqdma->pc[i]->refcnt); in mtk_cqdma_alloc_chan_resources()
622 pc = cqdma->pc[i]; in mtk_cqdma_alloc_chan_resources()
627 if (!pc) in mtk_cqdma_alloc_chan_resources()
630 spin_lock_irqsave(&pc->lock, flags); in mtk_cqdma_alloc_chan_resources()
632 if (!refcount_read(&pc->refcnt)) { in mtk_cqdma_alloc_chan_resources()
633 /* allocate PC when the refcount is zero */ in mtk_cqdma_alloc_chan_resources()
634 mtk_cqdma_hard_reset(pc); in mtk_cqdma_alloc_chan_resources()
636 /* enable interrupt for this PC */ in mtk_cqdma_alloc_chan_resources()
637 mtk_dma_set(pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); in mtk_cqdma_alloc_chan_resources()
643 refcount_set(&pc->refcnt, 1); in mtk_cqdma_alloc_chan_resources()
645 refcount_inc(&pc->refcnt); in mtk_cqdma_alloc_chan_resources()
648 spin_unlock_irqrestore(&pc->lock, flags); in mtk_cqdma_alloc_chan_resources()
650 vc->pc = pc; in mtk_cqdma_alloc_chan_resources()
663 spin_lock_irqsave(&cvc->pc->lock, flags); in mtk_cqdma_free_chan_resources()
665 /* PC is not freed until there is no VC mapped to it */ in mtk_cqdma_free_chan_resources()
666 if (refcount_dec_and_test(&cvc->pc->refcnt)) { in mtk_cqdma_free_chan_resources()
668 mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); in mtk_cqdma_free_chan_resources()
671 if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0) in mtk_cqdma_free_chan_resources()
675 mtk_dma_clr(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); in mtk_cqdma_free_chan_resources()
676 mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_FLAG, in mtk_cqdma_free_chan_resources()
679 /* disable interrupt for this PC */ in mtk_cqdma_free_chan_resources()
680 mtk_dma_clr(cvc->pc, MTK_CQDMA_INT_EN, MTK_CQDMA_INT_EN_BIT); in mtk_cqdma_free_chan_resources()
683 spin_unlock_irqrestore(&cvc->pc->lock, flags); in mtk_cqdma_free_chan_resources()
705 spin_lock_irqsave(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_init()
706 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) { in mtk_cqdma_hw_init()
708 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_init()
715 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_init()
728 spin_lock_irqsave(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_deinit()
729 if (mtk_cqdma_hard_reset(cqdma->pc[i]) < 0) in mtk_cqdma_hw_deinit()
731 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); in mtk_cqdma_hw_deinit()
803 cqdma->pc = devm_kcalloc(&pdev->dev, cqdma->dma_channels, in mtk_cqdma_probe()
804 sizeof(*cqdma->pc), GFP_KERNEL); in mtk_cqdma_probe()
805 if (!cqdma->pc) in mtk_cqdma_probe()
810 cqdma->pc[i] = devm_kcalloc(&pdev->dev, 1, in mtk_cqdma_probe()
811 sizeof(**cqdma->pc), GFP_KERNEL); in mtk_cqdma_probe()
812 if (!cqdma->pc[i]) in mtk_cqdma_probe()
815 INIT_LIST_HEAD(&cqdma->pc[i]->queue); in mtk_cqdma_probe()
816 spin_lock_init(&cqdma->pc[i]->lock); in mtk_cqdma_probe()
817 refcount_set(&cqdma->pc[i]->refcnt, 0); in mtk_cqdma_probe()
818 cqdma->pc[i]->base = devm_platform_ioremap_resource(pdev, i); in mtk_cqdma_probe()
819 if (IS_ERR(cqdma->pc[i]->base)) in mtk_cqdma_probe()
820 return PTR_ERR(cqdma->pc[i]->base); in mtk_cqdma_probe()
826 cqdma->pc[i]->irq = err; in mtk_cqdma_probe()
828 err = devm_request_irq(&pdev->dev, cqdma->pc[i]->irq, in mtk_cqdma_probe()
872 /* initialize tasklet for each PC */ in mtk_cqdma_probe()
874 tasklet_setup(&cqdma->pc[i]->tasklet, mtk_cqdma_tasklet_cb); in mtk_cqdma_probe()
903 spin_lock_irqsave(&cqdma->pc[i]->lock, flags); in mtk_cqdma_remove()
904 mtk_dma_clr(cqdma->pc[i], MTK_CQDMA_INT_EN, in mtk_cqdma_remove()
906 spin_unlock_irqrestore(&cqdma->pc[i]->lock, flags); in mtk_cqdma_remove()
909 synchronize_irq(cqdma->pc[i]->irq); in mtk_cqdma_remove()
911 tasklet_kill(&cqdma->pc[i]->tasklet); in mtk_cqdma_remove()