xref: /linux/drivers/dma/amd/ptdma/ptdma-dmaengine.c (revision e78f70bad29c5ae1e1076698b690b15794e9b81e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * AMD Passthrough DMA device driver
4  * -- Based on the CCP driver
5  *
6  * Copyright (C) 2016,2021 Advanced Micro Devices, Inc.
7  *
8  * Author: Sanjay R Mehta <sanju.mehta@amd.com>
9  * Author: Gary R Hook <gary.hook@amd.com>
10  */
11 
12 #include <linux/bitfield.h>
13 #include "ptdma.h"
14 #include "../ae4dma/ae4dma.h"
15 #include "../../dmaengine.h"
16 
17 static char *ae4_error_codes[] = {
18 	"",
19 	"ERR 01: INVALID HEADER DW0",
20 	"ERR 02: INVALID STATUS",
21 	"ERR 03: INVALID LENGTH - 4 BYTE ALIGNMENT",
22 	"ERR 04: INVALID SRC ADDR - 4 BYTE ALIGNMENT",
23 	"ERR 05: INVALID DST ADDR - 4 BYTE ALIGNMENT",
24 	"ERR 06: INVALID ALIGNMENT",
25 	"ERR 07: INVALID DESCRIPTOR",
26 };
27 
28 static void ae4_log_error(struct pt_device *d, int e)
29 {
30 	/* ERR 01 - 07 represents Invalid AE4 errors */
31 	if (e <= 7)
32 		dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", ae4_error_codes[e], e);
33 	/* ERR 08 - 15 represents Invalid Descriptor errors */
34 	else if (e > 7 && e <= 15)
35 		dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "INVALID DESCRIPTOR", e);
36 	/* ERR 16 - 31 represents Firmware errors */
37 	else if (e > 15 && e <= 31)
38 		dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FIRMWARE ERROR", e);
39 	/* ERR 32 - 63 represents Fatal errors */
40 	else if (e > 31 && e <= 63)
41 		dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "FATAL ERROR", e);
42 	/* ERR 64 - 255 represents PTE errors */
43 	else if (e > 63 && e <= 255)
44 		dev_info(d->dev, "AE4DMA error: %s (0x%x)\n", "PTE ERROR", e);
45 	else
46 		dev_info(d->dev, "Unknown AE4DMA error");
47 }
48 
49 void ae4_check_status_error(struct ae4_cmd_queue *ae4cmd_q, int idx)
50 {
51 	struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
52 	struct ae4dma_desc desc;
53 	u8 status;
54 
55 	memcpy(&desc, &cmd_q->qbase[idx], sizeof(struct ae4dma_desc));
56 	status = desc.dw1.status;
57 	if (status && status != AE4_DESC_COMPLETED) {
58 		cmd_q->cmd_error = desc.dw1.err_code;
59 		if (cmd_q->cmd_error)
60 			ae4_log_error(cmd_q->pt, cmd_q->cmd_error);
61 	}
62 }
63 EXPORT_SYMBOL_GPL(ae4_check_status_error);
64 
65 static inline struct pt_dma_chan *to_pt_chan(struct dma_chan *dma_chan)
66 {
67 	return container_of(dma_chan, struct pt_dma_chan, vc.chan);
68 }
69 
70 static inline struct pt_dma_desc *to_pt_desc(struct virt_dma_desc *vd)
71 {
72 	return container_of(vd, struct pt_dma_desc, vd);
73 }
74 
75 static void pt_free_chan_resources(struct dma_chan *dma_chan)
76 {
77 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
78 
79 	vchan_free_chan_resources(&chan->vc);
80 }
81 
82 static void pt_synchronize(struct dma_chan *dma_chan)
83 {
84 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
85 
86 	vchan_synchronize(&chan->vc);
87 }
88 
89 static void pt_do_cleanup(struct virt_dma_desc *vd)
90 {
91 	struct pt_dma_desc *desc = to_pt_desc(vd);
92 	struct pt_device *pt = desc->pt;
93 
94 	kmem_cache_free(pt->dma_desc_cache, desc);
95 }
96 
97 static struct pt_cmd_queue *pt_get_cmd_queue(struct pt_device *pt, struct pt_dma_chan *chan)
98 {
99 	struct ae4_cmd_queue *ae4cmd_q;
100 	struct pt_cmd_queue *cmd_q;
101 	struct ae4_device *ae4;
102 
103 	if (pt->ver == AE4_DMA_VERSION) {
104 		ae4 = container_of(pt, struct ae4_device, pt);
105 		ae4cmd_q = &ae4->ae4cmd_q[chan->id];
106 		cmd_q = &ae4cmd_q->cmd_q;
107 	} else {
108 		cmd_q = &pt->cmd_q;
109 	}
110 
111 	return cmd_q;
112 }
113 
114 static int ae4_core_execute_cmd(struct ae4dma_desc *desc, struct ae4_cmd_queue *ae4cmd_q)
115 {
116 	bool soc = FIELD_GET(DWORD0_SOC, desc->dwouv.dw0);
117 	struct pt_cmd_queue *cmd_q = &ae4cmd_q->cmd_q;
118 
119 	if (soc) {
120 		desc->dwouv.dw0 |= FIELD_PREP(DWORD0_IOC, desc->dwouv.dw0);
121 		desc->dwouv.dw0 &= ~DWORD0_SOC;
122 	}
123 
124 	mutex_lock(&ae4cmd_q->cmd_lock);
125 	memcpy(&cmd_q->qbase[ae4cmd_q->tail_wi], desc, sizeof(struct ae4dma_desc));
126 	ae4cmd_q->q_cmd_count++;
127 	ae4cmd_q->tail_wi = (ae4cmd_q->tail_wi + 1) % CMD_Q_LEN;
128 	writel(ae4cmd_q->tail_wi, cmd_q->reg_control + AE4_WR_IDX_OFF);
129 	mutex_unlock(&ae4cmd_q->cmd_lock);
130 
131 	wake_up(&ae4cmd_q->q_w);
132 
133 	return 0;
134 }
135 
136 static int pt_core_perform_passthru_ae4(struct pt_cmd_queue *cmd_q,
137 					struct pt_passthru_engine *pt_engine)
138 {
139 	struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
140 	struct ae4dma_desc desc;
141 
142 	cmd_q->cmd_error = 0;
143 	cmd_q->total_pt_ops++;
144 	memset(&desc, 0, sizeof(desc));
145 	desc.dwouv.dws.byte0 = CMD_AE4_DESC_DW0_VAL;
146 
147 	desc.dw1.status = 0;
148 	desc.dw1.err_code = 0;
149 	desc.dw1.desc_id = 0;
150 
151 	desc.length = pt_engine->src_len;
152 
153 	desc.src_lo = upper_32_bits(pt_engine->src_dma);
154 	desc.src_hi = lower_32_bits(pt_engine->src_dma);
155 	desc.dst_lo = upper_32_bits(pt_engine->dst_dma);
156 	desc.dst_hi = lower_32_bits(pt_engine->dst_dma);
157 
158 	return ae4_core_execute_cmd(&desc, ae4cmd_q);
159 }
160 
161 static int pt_dma_start_desc(struct pt_dma_desc *desc, struct pt_dma_chan *chan)
162 {
163 	struct pt_passthru_engine *pt_engine;
164 	struct pt_device *pt;
165 	struct pt_cmd *pt_cmd;
166 	struct pt_cmd_queue *cmd_q;
167 
168 	desc->issued_to_hw = 1;
169 
170 	pt_cmd = &desc->pt_cmd;
171 	pt = pt_cmd->pt;
172 
173 	cmd_q = pt_get_cmd_queue(pt, chan);
174 
175 	pt_engine = &pt_cmd->passthru;
176 
177 	pt->tdata.cmd = pt_cmd;
178 
179 	/* Execute the command */
180 	if (pt->ver == AE4_DMA_VERSION)
181 		pt_cmd->ret = pt_core_perform_passthru_ae4(cmd_q, pt_engine);
182 	else
183 		pt_cmd->ret = pt_core_perform_passthru(cmd_q, pt_engine);
184 
185 	return 0;
186 }
187 
188 static struct pt_dma_desc *pt_next_dma_desc(struct pt_dma_chan *chan)
189 {
190 	/* Get the next DMA descriptor on the active list */
191 	struct virt_dma_desc *vd = vchan_next_desc(&chan->vc);
192 
193 	return vd ? to_pt_desc(vd) : NULL;
194 }
195 
196 static struct pt_dma_desc *pt_handle_active_desc(struct pt_dma_chan *chan,
197 						 struct pt_dma_desc *desc)
198 {
199 	struct dma_async_tx_descriptor *tx_desc;
200 	struct virt_dma_desc *vd;
201 	struct pt_device *pt;
202 	unsigned long flags;
203 
204 	pt = chan->pt;
205 	/* Loop over descriptors until one is found with commands */
206 	do {
207 		if (desc) {
208 			if (!desc->issued_to_hw) {
209 				/* No errors, keep going */
210 				if (desc->status != DMA_ERROR)
211 					return desc;
212 			}
213 
214 			tx_desc = &desc->vd.tx;
215 			vd = &desc->vd;
216 		} else {
217 			tx_desc = NULL;
218 		}
219 
220 		spin_lock_irqsave(&chan->vc.lock, flags);
221 
222 		if (pt->ver != AE4_DMA_VERSION && desc) {
223 			if (desc->status != DMA_COMPLETE) {
224 				if (desc->status != DMA_ERROR)
225 					desc->status = DMA_COMPLETE;
226 
227 				dma_cookie_complete(tx_desc);
228 				dma_descriptor_unmap(tx_desc);
229 				list_del(&desc->vd.node);
230 			} else {
231 				/* Don't handle it twice */
232 				tx_desc = NULL;
233 			}
234 		}
235 
236 		desc = pt_next_dma_desc(chan);
237 
238 		spin_unlock_irqrestore(&chan->vc.lock, flags);
239 
240 		if (pt->ver != AE4_DMA_VERSION && tx_desc) {
241 			dmaengine_desc_get_callback_invoke(tx_desc, NULL);
242 			dma_run_dependencies(tx_desc);
243 			vchan_vdesc_fini(vd);
244 		}
245 	} while (desc);
246 
247 	return NULL;
248 }
249 
250 static inline bool ae4_core_queue_full(struct pt_cmd_queue *cmd_q)
251 {
252 	u32 front_wi = readl(cmd_q->reg_control + AE4_WR_IDX_OFF);
253 	u32 rear_ri = readl(cmd_q->reg_control + AE4_RD_IDX_OFF);
254 
255 	if (((MAX_CMD_QLEN + front_wi - rear_ri) % MAX_CMD_QLEN)  >= (MAX_CMD_QLEN - 1))
256 		return true;
257 
258 	return false;
259 }
260 
261 static void pt_cmd_callback(void *data, int err)
262 {
263 	struct pt_dma_desc *desc = data;
264 	struct ae4_cmd_queue *ae4cmd_q;
265 	struct dma_chan *dma_chan;
266 	struct pt_dma_chan *chan;
267 	struct ae4_device *ae4;
268 	struct pt_device *pt;
269 	int ret;
270 
271 	if (err == -EINPROGRESS)
272 		return;
273 
274 	dma_chan = desc->vd.tx.chan;
275 	chan = to_pt_chan(dma_chan);
276 	pt = chan->pt;
277 
278 	if (err)
279 		desc->status = DMA_ERROR;
280 
281 	while (true) {
282 		if (pt->ver == AE4_DMA_VERSION) {
283 			ae4 = container_of(pt, struct ae4_device, pt);
284 			ae4cmd_q = &ae4->ae4cmd_q[chan->id];
285 
286 			if (ae4cmd_q->q_cmd_count >= (CMD_Q_LEN - 1) ||
287 			    ae4_core_queue_full(&ae4cmd_q->cmd_q)) {
288 				wake_up(&ae4cmd_q->q_w);
289 
290 				if (wait_for_completion_timeout(&ae4cmd_q->cmp,
291 								msecs_to_jiffies(AE4_TIME_OUT))
292 								== 0) {
293 					dev_err(pt->dev, "TIMEOUT %d:\n", ae4cmd_q->id);
294 					break;
295 				}
296 
297 				reinit_completion(&ae4cmd_q->cmp);
298 				continue;
299 			}
300 		}
301 
302 		/* Check for DMA descriptor completion */
303 		desc = pt_handle_active_desc(chan, desc);
304 
305 		/* Don't submit cmd if no descriptor or DMA is paused */
306 		if (!desc)
307 			break;
308 
309 		ret = pt_dma_start_desc(desc, chan);
310 		if (!ret)
311 			break;
312 
313 		desc->status = DMA_ERROR;
314 	}
315 }
316 
317 static struct pt_dma_desc *pt_alloc_dma_desc(struct pt_dma_chan *chan,
318 					     unsigned long flags)
319 {
320 	struct pt_dma_desc *desc;
321 
322 	desc = kmem_cache_zalloc(chan->pt->dma_desc_cache, GFP_NOWAIT);
323 	if (!desc)
324 		return NULL;
325 
326 	vchan_tx_prep(&chan->vc, &desc->vd, flags);
327 
328 	desc->pt = chan->pt;
329 	desc->pt->cmd_q.int_en = !!(flags & DMA_PREP_INTERRUPT);
330 	desc->issued_to_hw = 0;
331 	desc->status = DMA_IN_PROGRESS;
332 
333 	return desc;
334 }
335 
336 static void pt_cmd_callback_work(void *data, int err)
337 {
338 	struct dma_async_tx_descriptor *tx_desc;
339 	struct pt_dma_desc *desc = data;
340 	struct dma_chan *dma_chan;
341 	struct virt_dma_desc *vd;
342 	struct pt_dma_chan *chan;
343 	unsigned long flags;
344 
345 	if (!desc)
346 		return;
347 
348 	dma_chan = desc->vd.tx.chan;
349 	chan = to_pt_chan(dma_chan);
350 
351 	if (err == -EINPROGRESS)
352 		return;
353 
354 	tx_desc = &desc->vd.tx;
355 	vd = &desc->vd;
356 
357 	if (err)
358 		desc->status = DMA_ERROR;
359 
360 	spin_lock_irqsave(&chan->vc.lock, flags);
361 	if (desc->status != DMA_COMPLETE) {
362 		if (desc->status != DMA_ERROR)
363 			desc->status = DMA_COMPLETE;
364 
365 		dma_cookie_complete(tx_desc);
366 		dma_descriptor_unmap(tx_desc);
367 	} else {
368 		tx_desc = NULL;
369 	}
370 	spin_unlock_irqrestore(&chan->vc.lock, flags);
371 
372 	if (tx_desc) {
373 		dmaengine_desc_get_callback_invoke(tx_desc, NULL);
374 		dma_run_dependencies(tx_desc);
375 		list_del(&desc->vd.node);
376 		vchan_vdesc_fini(vd);
377 	}
378 }
379 
380 static struct pt_dma_desc *pt_create_desc(struct dma_chan *dma_chan,
381 					  dma_addr_t dst,
382 					  dma_addr_t src,
383 					  unsigned int len,
384 					  unsigned long flags)
385 {
386 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
387 	struct pt_passthru_engine *pt_engine;
388 	struct pt_device *pt = chan->pt;
389 	struct ae4_cmd_queue *ae4cmd_q;
390 	struct pt_dma_desc *desc;
391 	struct ae4_device *ae4;
392 	struct pt_cmd *pt_cmd;
393 
394 	desc = pt_alloc_dma_desc(chan, flags);
395 	if (!desc)
396 		return NULL;
397 
398 	pt_cmd = &desc->pt_cmd;
399 	pt_cmd->pt = pt;
400 	pt_engine = &pt_cmd->passthru;
401 	pt_cmd->engine = PT_ENGINE_PASSTHRU;
402 	pt_engine->src_dma = src;
403 	pt_engine->dst_dma = dst;
404 	pt_engine->src_len = len;
405 	pt_cmd->pt_cmd_callback = pt_cmd_callback;
406 	pt_cmd->data = desc;
407 
408 	desc->len = len;
409 
410 	if (pt->ver == AE4_DMA_VERSION) {
411 		pt_cmd->pt_cmd_callback = pt_cmd_callback_work;
412 		ae4 = container_of(pt, struct ae4_device, pt);
413 		ae4cmd_q = &ae4->ae4cmd_q[chan->id];
414 		mutex_lock(&ae4cmd_q->cmd_lock);
415 		list_add_tail(&pt_cmd->entry, &ae4cmd_q->cmd);
416 		mutex_unlock(&ae4cmd_q->cmd_lock);
417 	}
418 
419 	return desc;
420 }
421 
422 static struct dma_async_tx_descriptor *
423 pt_prep_dma_memcpy(struct dma_chan *dma_chan, dma_addr_t dst,
424 		   dma_addr_t src, size_t len, unsigned long flags)
425 {
426 	struct pt_dma_desc *desc;
427 
428 	desc = pt_create_desc(dma_chan, dst, src, len, flags);
429 	if (!desc)
430 		return NULL;
431 
432 	return &desc->vd.tx;
433 }
434 
435 static struct dma_async_tx_descriptor *
436 pt_prep_dma_interrupt(struct dma_chan *dma_chan, unsigned long flags)
437 {
438 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
439 	struct pt_dma_desc *desc;
440 
441 	desc = pt_alloc_dma_desc(chan, flags);
442 	if (!desc)
443 		return NULL;
444 
445 	return &desc->vd.tx;
446 }
447 
448 static void pt_issue_pending(struct dma_chan *dma_chan)
449 {
450 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
451 	struct pt_dma_desc *desc;
452 	struct pt_device *pt;
453 	unsigned long flags;
454 	bool engine_is_idle = true;
455 
456 	pt = chan->pt;
457 
458 	spin_lock_irqsave(&chan->vc.lock, flags);
459 
460 	desc = pt_next_dma_desc(chan);
461 	if (desc && pt->ver != AE4_DMA_VERSION)
462 		engine_is_idle = false;
463 
464 	vchan_issue_pending(&chan->vc);
465 
466 	desc = pt_next_dma_desc(chan);
467 
468 	spin_unlock_irqrestore(&chan->vc.lock, flags);
469 
470 	/* If there was nothing active, start processing */
471 	if (engine_is_idle && desc)
472 		pt_cmd_callback(desc, 0);
473 }
474 
475 static void pt_check_status_trans_ae4(struct pt_device *pt, struct pt_cmd_queue *cmd_q)
476 {
477 	struct ae4_cmd_queue *ae4cmd_q = container_of(cmd_q, struct ae4_cmd_queue, cmd_q);
478 	int i;
479 
480 	for (i = 0; i < CMD_Q_LEN; i++)
481 		ae4_check_status_error(ae4cmd_q, i);
482 }
483 
484 static enum dma_status
485 pt_tx_status(struct dma_chan *c, dma_cookie_t cookie,
486 		struct dma_tx_state *txstate)
487 {
488 	struct pt_dma_chan *chan = to_pt_chan(c);
489 	struct pt_device *pt = chan->pt;
490 	struct pt_cmd_queue *cmd_q;
491 
492 	cmd_q = pt_get_cmd_queue(pt, chan);
493 
494 	if (pt->ver == AE4_DMA_VERSION)
495 		pt_check_status_trans_ae4(pt, cmd_q);
496 	else
497 		pt_check_status_trans(pt, cmd_q);
498 
499 	return dma_cookie_status(c, cookie, txstate);
500 }
501 
502 static int pt_pause(struct dma_chan *dma_chan)
503 {
504 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
505 	struct pt_device *pt = chan->pt;
506 	struct pt_cmd_queue *cmd_q;
507 	unsigned long flags;
508 
509 	spin_lock_irqsave(&chan->vc.lock, flags);
510 	cmd_q = pt_get_cmd_queue(pt, chan);
511 	pt_stop_queue(cmd_q);
512 	spin_unlock_irqrestore(&chan->vc.lock, flags);
513 
514 	return 0;
515 }
516 
517 static int pt_resume(struct dma_chan *dma_chan)
518 {
519 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
520 	struct pt_dma_desc *desc = NULL;
521 	struct pt_device *pt = chan->pt;
522 	struct pt_cmd_queue *cmd_q;
523 	unsigned long flags;
524 
525 	spin_lock_irqsave(&chan->vc.lock, flags);
526 	cmd_q = pt_get_cmd_queue(pt, chan);
527 	pt_start_queue(cmd_q);
528 	desc = pt_next_dma_desc(chan);
529 	spin_unlock_irqrestore(&chan->vc.lock, flags);
530 
531 	/* If there was something active, re-start */
532 	if (desc)
533 		pt_cmd_callback(desc, 0);
534 
535 	return 0;
536 }
537 
538 static int pt_terminate_all(struct dma_chan *dma_chan)
539 {
540 	struct pt_dma_chan *chan = to_pt_chan(dma_chan);
541 	struct pt_device *pt = chan->pt;
542 	struct pt_cmd_queue *cmd_q;
543 	unsigned long flags;
544 	LIST_HEAD(head);
545 
546 	cmd_q = pt_get_cmd_queue(pt, chan);
547 	if (pt->ver == AE4_DMA_VERSION)
548 		pt_stop_queue(cmd_q);
549 	else
550 		iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_control + 0x0010);
551 
552 	spin_lock_irqsave(&chan->vc.lock, flags);
553 	vchan_get_all_descriptors(&chan->vc, &head);
554 	spin_unlock_irqrestore(&chan->vc.lock, flags);
555 
556 	vchan_dma_desc_free_list(&chan->vc, &head);
557 	vchan_free_chan_resources(&chan->vc);
558 
559 	return 0;
560 }
561 
562 int pt_dmaengine_register(struct pt_device *pt)
563 {
564 	struct dma_device *dma_dev = &pt->dma_dev;
565 	struct ae4_cmd_queue *ae4cmd_q = NULL;
566 	struct ae4_device *ae4 = NULL;
567 	struct pt_dma_chan *chan;
568 	char *desc_cache_name;
569 	int ret, i;
570 
571 	if (pt->ver == AE4_DMA_VERSION)
572 		ae4 = container_of(pt, struct ae4_device, pt);
573 
574 	if (ae4)
575 		pt->pt_dma_chan = devm_kcalloc(pt->dev, ae4->cmd_q_count,
576 					       sizeof(*pt->pt_dma_chan), GFP_KERNEL);
577 	else
578 		pt->pt_dma_chan = devm_kzalloc(pt->dev, sizeof(*pt->pt_dma_chan),
579 					       GFP_KERNEL);
580 
581 	if (!pt->pt_dma_chan)
582 		return -ENOMEM;
583 
584 	desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
585 					 "%s-dmaengine-desc-cache",
586 					 dev_name(pt->dev));
587 	if (!desc_cache_name)
588 		return -ENOMEM;
589 
590 	pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
591 					       sizeof(struct pt_dma_desc), 0,
592 					       SLAB_HWCACHE_ALIGN, NULL);
593 	if (!pt->dma_desc_cache)
594 		return -ENOMEM;
595 
596 	dma_dev->dev = pt->dev;
597 	dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
598 	dma_dev->dst_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
599 	dma_dev->directions = DMA_MEM_TO_MEM;
600 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
601 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
602 	dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
603 
604 	/*
605 	 * PTDMA is intended to be used with the AMD NTB devices, hence
606 	 * marking it as DMA_PRIVATE.
607 	 */
608 	dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
609 
610 	INIT_LIST_HEAD(&dma_dev->channels);
611 
612 	/* Set base and prep routines */
613 	dma_dev->device_free_chan_resources = pt_free_chan_resources;
614 	dma_dev->device_prep_dma_memcpy = pt_prep_dma_memcpy;
615 	dma_dev->device_prep_dma_interrupt = pt_prep_dma_interrupt;
616 	dma_dev->device_issue_pending = pt_issue_pending;
617 	dma_dev->device_tx_status = pt_tx_status;
618 	dma_dev->device_pause = pt_pause;
619 	dma_dev->device_resume = pt_resume;
620 	dma_dev->device_terminate_all = pt_terminate_all;
621 	dma_dev->device_synchronize = pt_synchronize;
622 
623 	if (ae4) {
624 		for (i = 0; i < ae4->cmd_q_count; i++) {
625 			chan = pt->pt_dma_chan + i;
626 			ae4cmd_q = &ae4->ae4cmd_q[i];
627 			chan->id = ae4cmd_q->id;
628 			chan->pt = pt;
629 			chan->vc.desc_free = pt_do_cleanup;
630 			vchan_init(&chan->vc, dma_dev);
631 		}
632 	} else {
633 		chan = pt->pt_dma_chan;
634 		chan->pt = pt;
635 		chan->vc.desc_free = pt_do_cleanup;
636 		vchan_init(&chan->vc, dma_dev);
637 	}
638 
639 	ret = dma_async_device_register(dma_dev);
640 	if (ret)
641 		goto err_reg;
642 
643 	return 0;
644 
645 err_reg:
646 	kmem_cache_destroy(pt->dma_desc_cache);
647 
648 	return ret;
649 }
650 EXPORT_SYMBOL_GPL(pt_dmaengine_register);
651 
652 void pt_dmaengine_unregister(struct pt_device *pt)
653 {
654 	struct dma_device *dma_dev = &pt->dma_dev;
655 
656 	dma_async_device_unregister(dma_dev);
657 
658 	kmem_cache_destroy(pt->dma_desc_cache);
659 }
660