Lines Matching refs:chan
47 return container_of(dchan, struct sf_pdma_chan, vchan.chan);
55 static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
63 desc->chan = chan;
71 desc->xfer_type = desc->chan->pdma->transfer_type;
77 static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
79 struct pdma_regs *regs = &chan->regs;
88 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
92 if (chan && (!len || !dest || !src)) {
93 dev_err(chan->pdma->dma_dev.dev,
98 desc = sf_pdma_alloc_desc(chan);
103 desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
105 spin_lock_irqsave(&chan->vchan.lock, iflags);
107 spin_unlock_irqrestore(&chan->vchan.lock, iflags);
115 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
117 memcpy(&chan->cfg, cfg, sizeof(*cfg));
124 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
125 struct pdma_regs *regs = &chan->regs;
133 static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
135 struct pdma_regs *regs = &chan->regs;
142 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
146 spin_lock_irqsave(&chan->vchan.lock, flags);
147 sf_pdma_disable_request(chan);
148 kfree(chan->desc);
149 chan->desc = NULL;
150 vchan_get_all_descriptors(&chan->vchan, &head);
151 sf_pdma_disclaim_chan(chan);
152 spin_unlock_irqrestore(&chan->vchan.lock, flags);
153 vchan_dma_desc_free_list(&chan->vchan, &head);
156 static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
160 struct pdma_regs *regs = &chan->regs;
166 spin_lock_irqsave(&chan->vchan.lock, flags);
168 list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
175 if (cookie == tx->chan->completed_cookie)
181 vd = vchan_find_desc(&chan->vchan, cookie);
190 spin_unlock_irqrestore(&chan->vchan.lock, flags);
199 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
205 dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
212 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
216 spin_lock_irqsave(&chan->vchan.lock, flags);
217 sf_pdma_disable_request(chan);
218 kfree(chan->desc);
219 chan->desc = NULL;
220 chan->xfer_err = false;
221 vchan_get_all_descriptors(&chan->vchan, &head);
222 spin_unlock_irqrestore(&chan->vchan.lock, flags);
223 vchan_dma_desc_free_list(&chan->vchan, &head);
228 static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
230 struct pdma_regs *regs = &chan->regs;
241 static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
243 struct virt_dma_chan *vchan = &chan->vchan;
254 static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
256 struct sf_pdma_desc *desc = chan->desc;
257 struct pdma_regs *regs = &chan->regs;
260 dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
269 chan->desc = desc;
270 chan->status = DMA_IN_PROGRESS;
271 sf_pdma_enable_request(chan);
276 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
279 spin_lock_irqsave(&chan->vchan.lock, flags);
281 if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
283 chan->desc = sf_pdma_get_first_pending_desc(chan);
284 sf_pdma_xfer_desc(chan);
287 spin_unlock_irqrestore(&chan->vchan.lock, flags);
300 struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet);
303 spin_lock_irqsave(&chan->lock, flags);
304 if (chan->xfer_err) {
305 chan->retries = MAX_RETRY;
306 chan->status = DMA_COMPLETE;
307 chan->xfer_err = false;
309 spin_unlock_irqrestore(&chan->lock, flags);
311 spin_lock_irqsave(&chan->vchan.lock, flags);
312 list_del(&chan->desc->vdesc.node);
313 vchan_cookie_complete(&chan->desc->vdesc);
315 chan->desc = sf_pdma_get_first_pending_desc(chan);
316 if (chan->desc)
317 sf_pdma_xfer_desc(chan);
319 spin_unlock_irqrestore(&chan->vchan.lock, flags);
324 struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet);
325 struct sf_pdma_desc *desc = chan->desc;
328 spin_lock_irqsave(&chan->lock, flags);
329 if (chan->retries <= 0) {
331 spin_unlock_irqrestore(&chan->lock, flags);
335 chan->retries--;
336 chan->xfer_err = true;
337 chan->status = DMA_ERROR;
339 sf_pdma_enable_request(chan);
340 spin_unlock_irqrestore(&chan->lock, flags);
346 struct sf_pdma_chan *chan = dev_id;
347 struct pdma_regs *regs = &chan->regs;
350 spin_lock(&chan->vchan.lock);
355 tasklet_hi_schedule(&chan->done_tasklet);
358 struct sf_pdma_desc *desc = chan->desc;
364 sf_pdma_xfer_desc(chan);
367 spin_unlock(&chan->vchan.lock);
374 struct sf_pdma_chan *chan = dev_id;
375 struct pdma_regs *regs = &chan->regs;
377 spin_lock(&chan->lock);
379 spin_unlock(&chan->lock);
381 tasklet_schedule(&chan->err_tasklet);
404 struct sf_pdma_chan *chan;
407 chan = &pdma->chans[i];
414 dev_name(&pdev->dev), (void *)chan);
420 chan->txirq = irq;
427 dev_name(&pdev->dev), (void *)chan);
433 chan->errirq = irq;
454 struct sf_pdma_chan *chan;
459 chan = &pdma->chans[i];
461 chan->regs.ctrl =
463 chan->regs.xfer_type =
465 chan->regs.xfer_size =
467 chan->regs.dst_addr =
469 chan->regs.src_addr =
471 chan->regs.act_type =
473 chan->regs.residue =
475 chan->regs.cur_dst_addr =
477 chan->regs.cur_src_addr =
480 chan->pdma = pdma;
481 chan->pm_state = RUNNING;
482 chan->slave_id = i;
483 chan->xfer_err = false;
484 spin_lock_init(&chan->lock);
486 chan->vchan.desc_free = sf_pdma_free_desc;
487 vchan_init(&chan->vchan, &pdma->dma_dev);
489 writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
491 tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet);
492 tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet);
605 list_del(&ch->vchan.chan.device_node);