Lines Matching refs:dch

218 	struct d350_chan *dch = to_d350_chan(chan);
226 desc->tsz = __ffs(len | dest | src | (1 << dch->tsz));
245 cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
246 cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
250 return vchan_tx_prep(&dch->vc, &desc->vd, flags);
256 struct d350_chan *dch = to_d350_chan(chan);
264 desc->tsz = __ffs(len | dest | (1 << dch->tsz));
281 cmd[6] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
286 return vchan_tx_prep(&dch->vc, &desc->vd, flags);
291 struct d350_chan *dch = to_d350_chan(chan);
294 spin_lock_irqsave(&dch->vc.lock, flags);
295 if (dch->status == DMA_IN_PROGRESS) {
296 writel_relaxed(CH_CMD_PAUSE, dch->base + CH_CMD);
297 dch->status = DMA_PAUSED;
299 spin_unlock_irqrestore(&dch->vc.lock, flags);
306 struct d350_chan *dch = to_d350_chan(chan);
309 spin_lock_irqsave(&dch->vc.lock, flags);
310 if (dch->status == DMA_PAUSED) {
311 writel_relaxed(CH_CMD_RESUME, dch->base + CH_CMD);
312 dch->status = DMA_IN_PROGRESS;
314 spin_unlock_irqrestore(&dch->vc.lock, flags);
319 static u32 d350_get_residue(struct d350_chan *dch)
324 hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
327 xsize = readl_relaxed(dch->base + CH_XSIZE);
328 hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
334 return res << dch->desc->tsz;
339 struct d350_chan *dch = to_d350_chan(chan);
343 spin_lock_irqsave(&dch->vc.lock, flags);
344 writel_relaxed(CH_CMD_STOP, dch->base + CH_CMD);
345 if (dch->desc) {
346 if (dch->status != DMA_ERROR)
347 vchan_terminate_vdesc(&dch->desc->vd);
348 dch->desc = NULL;
349 dch->status = DMA_COMPLETE;
351 vchan_get_all_descriptors(&dch->vc, &list);
352 list_splice_tail(&list, &dch->vc.desc_terminated);
353 spin_unlock_irqrestore(&dch->vc.lock, flags);
360 struct d350_chan *dch = to_d350_chan(chan);
362 vchan_synchronize(&dch->vc);
373 struct d350_chan *dch = to_d350_chan(chan);
381 spin_lock_irqsave(&dch->vc.lock, flags);
382 if (cookie == dch->cookie) {
383 status = dch->status;
385 dch->residue = d350_get_residue(dch);
386 residue = dch->residue;
387 } else if ((vd = vchan_find_desc(&dch->vc, cookie))) {
393 spin_unlock_irqrestore(&dch->vc.lock, flags);
399 static void d350_start_next(struct d350_chan *dch)
403 dch->desc = to_d350_desc(vchan_next_desc(&dch->vc));
404 if (!dch->desc)
407 list_del(&dch->desc->vd.node);
408 dch->status = DMA_IN_PROGRESS;
409 dch->cookie = dch->desc->vd.tx.cookie;
410 dch->residue = d350_desc_bytes(dch->desc);
412 hdr = dch->desc->command[0];
413 reg = &dch->desc->command[1];
416 writel_relaxed(*reg++, dch->base + CH_INTREN);
418 writel_relaxed(*reg++, dch->base + CH_CTRL);
420 writel_relaxed(*reg++, dch->base + CH_SRCADDR);
422 writel_relaxed(*reg++, dch->base + CH_SRCADDRHI);
424 writel_relaxed(*reg++, dch->base + CH_DESADDR);
426 writel_relaxed(*reg++, dch->base + CH_DESADDRHI);
428 writel_relaxed(*reg++, dch->base + CH_XSIZE);
430 writel_relaxed(*reg++, dch->base + CH_XSIZEHI);
432 writel_relaxed(*reg++, dch->base + CH_SRCTRANSCFG);
434 writel_relaxed(*reg++, dch->base + CH_DESTRANSCFG);
436 writel_relaxed(*reg++, dch->base + CH_XADDRINC);
438 writel_relaxed(*reg++, dch->base + CH_FILLVAL);
440 writel_relaxed(*reg++, dch->base + CH_SRCTRIGINCFG);
442 writel_relaxed(*reg++, dch->base + CH_DESTRIGINCFG);
444 writel_relaxed(*reg++, dch->base + CH_AUTOCFG);
446 writel_relaxed(*reg++, dch->base + CH_LINKADDR);
448 writel_relaxed(*reg++, dch->base + CH_LINKADDRHI);
450 writel(CH_CMD_ENABLE, dch->base + CH_CMD);
455 struct d350_chan *dch = to_d350_chan(chan);
458 spin_lock_irqsave(&dch->vc.lock, flags);
459 if (vchan_issue_pending(&dch->vc) && !dch->desc)
460 d350_start_next(dch);
461 spin_unlock_irqrestore(&dch->vc.lock, flags);
466 struct d350_chan *dch = data;
467 struct device *dev = dch->vc.chan.device->dev;
468 struct virt_dma_desc *vd = &dch->desc->vd;
471 ch_status = readl(dch->base + CH_STATUS);
476 u32 errinfo = readl_relaxed(dch->base + CH_ERRINFO);
485 vd->tx_result.residue = d350_get_residue(dch);
489 writel_relaxed(ch_status, dch->base + CH_STATUS);
491 spin_lock(&dch->vc.lock);
494 dch->status = DMA_COMPLETE;
495 dch->residue = 0;
496 d350_start_next(dch);
498 dch->status = DMA_ERROR;
499 dch->residue = vd->tx_result.residue;
501 spin_unlock(&dch->vc.lock);
508 struct d350_chan *dch = to_d350_chan(chan);
509 int ret = request_irq(dch->irq, d350_irq, IRQF_SHARED,
510 dev_name(&dch->vc.chan.dev->device), dch);
512 writel_relaxed(CH_INTREN_DONE | CH_INTREN_ERR, dch->base + CH_INTREN);
519 struct d350_chan *dch = to_d350_chan(chan);
521 writel_relaxed(0, dch->base + CH_INTREN);
522 free_irq(dch->irq, dch);
523 vchan_free_chan_resources(&dch->vc);
588 struct d350_chan *dch = &dmac->channels[i];
590 dch->base = base + DMACH(i);
591 writel_relaxed(CH_CMD_CLEAR, dch->base + CH_CMD);
593 reg = readl_relaxed(dch->base + CH_BUILDCFG1);
598 dch->irq = platform_get_irq(pdev, i);
599 if (dch->irq < 0)
600 return dev_err_probe(dev, dch->irq,
603 dch->has_wrap = FIELD_GET(CH_CFG_HAS_WRAP, reg);
604 dch->has_trig = FIELD_GET(CH_CFG_HAS_TRIGIN, reg) &
608 memset &= dch->has_wrap;
610 reg = readl_relaxed(dch->base + CH_BUILDCFG0);
611 dch->tsz = FIELD_GET(CH_CFG_DATA_WIDTH, reg);
615 writel_relaxed(reg, dch->base + CH_LINKATTR);
617 dch->vc.desc_free = d350_desc_free;
618 vchan_init(&dch->vc, &dmac->dma);