Lines Matching +full:tpl +full:- +full:support

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
11 #include <linux/dma-mapping.h>
26 #include <linux/soc/ti/k3-ringacc.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
32 #include "../virt-dma.h"
33 #include "k3-udma.h"
34 #include "k3-psil-priv.h"
359 if (!uc->tchan) in udma_tchanrt_read()
361 return udma_read(uc->tchan->reg_rt, reg); in udma_tchanrt_read()
366 if (!uc->tchan) in udma_tchanrt_write()
368 udma_write(uc->tchan->reg_rt, reg, val); in udma_tchanrt_write()
374 if (!uc->tchan) in udma_tchanrt_update_bits()
376 udma_update_bits(uc->tchan->reg_rt, reg, mask, val); in udma_tchanrt_update_bits()
382 if (!uc->rchan) in udma_rchanrt_read()
384 return udma_read(uc->rchan->reg_rt, reg); in udma_rchanrt_read()
389 if (!uc->rchan) in udma_rchanrt_write()
391 udma_write(uc->rchan->reg_rt, reg, val); in udma_rchanrt_write()
397 if (!uc->rchan) in udma_rchanrt_update_bits()
399 udma_update_bits(uc->rchan->reg_rt, reg, mask, val); in udma_rchanrt_update_bits()
404 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in navss_psil_pair()
407 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, in navss_psil_pair()
408 tisci_rm->tisci_navss_dev_id, in navss_psil_pair()
415 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in navss_psil_unpair()
418 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, in navss_psil_unpair()
419 tisci_rm->tisci_navss_dev_id, in navss_psil_unpair()
425 struct device *chan_dev = &chan->dev->device; in k3_configure_chan_coherency()
429 chan->dev->chan_dma_dev = false; in k3_configure_chan_coherency()
431 chan_dev->dma_coherent = false; in k3_configure_chan_coherency()
432 chan_dev->dma_parms = NULL; in k3_configure_chan_coherency()
434 chan->dev->chan_dma_dev = true; in k3_configure_chan_coherency()
436 chan_dev->dma_coherent = true; in k3_configure_chan_coherency()
438 chan_dev->dma_parms = chan_dev->parent->dma_parms; in k3_configure_chan_coherency()
440 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel); in k3_configure_chan_coherency()
442 chan_dev->dma_coherent = false; in k3_configure_chan_coherency()
443 chan_dev->dma_parms = NULL; in k3_configure_chan_coherency()
451 for (i = 0; i < tpl_map->levels; i++) { in udma_get_chan_tpl_index()
452 if (chan_id >= tpl_map->start_idx[i]) in udma_get_chan_tpl_index()
461 memset(&uc->config, 0, sizeof(uc->config)); in udma_reset_uchan()
462 uc->config.remote_thread_id = -1; in udma_reset_uchan()
463 uc->config.mapped_channel_id = -1; in udma_reset_uchan()
464 uc->config.default_flow_id = -1; in udma_reset_uchan()
465 uc->state = UDMA_CHAN_IS_IDLE; in udma_reset_uchan()
470 struct device *dev = uc->ud->dev; in udma_dump_chan_stdata()
474 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { in udma_dump_chan_stdata()
483 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { in udma_dump_chan_stdata()
496 return d->hwdesc[idx].cppi5_desc_paddr; in udma_curr_cppi5_desc_paddr()
501 return d->hwdesc[idx].cppi5_desc_vaddr; in udma_curr_cppi5_desc_vaddr()
507 struct udma_desc *d = uc->terminated_desc; in udma_udma_desc_from_paddr()
511 d->desc_idx); in udma_udma_desc_from_paddr()
518 d = uc->desc; in udma_udma_desc_from_paddr()
521 d->desc_idx); in udma_udma_desc_from_paddr()
533 if (uc->use_dma_pool) { in udma_free_hwdesc()
536 for (i = 0; i < d->hwdesc_count; i++) { in udma_free_hwdesc()
537 if (!d->hwdesc[i].cppi5_desc_vaddr) in udma_free_hwdesc()
540 dma_pool_free(uc->hdesc_pool, in udma_free_hwdesc()
541 d->hwdesc[i].cppi5_desc_vaddr, in udma_free_hwdesc()
542 d->hwdesc[i].cppi5_desc_paddr); in udma_free_hwdesc()
544 d->hwdesc[i].cppi5_desc_vaddr = NULL; in udma_free_hwdesc()
546 } else if (d->hwdesc[0].cppi5_desc_vaddr) { in udma_free_hwdesc()
547 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size, in udma_free_hwdesc()
548 d->hwdesc[0].cppi5_desc_vaddr, in udma_free_hwdesc()
549 d->hwdesc[0].cppi5_desc_paddr); in udma_free_hwdesc()
551 d->hwdesc[0].cppi5_desc_vaddr = NULL; in udma_free_hwdesc()
562 spin_lock_irqsave(&ud->lock, flags); in udma_purge_desc_work()
563 list_splice_tail_init(&ud->desc_to_purge, &head); in udma_purge_desc_work()
564 spin_unlock_irqrestore(&ud->lock, flags); in udma_purge_desc_work()
567 struct udma_chan *uc = to_udma_chan(vd->tx.chan); in udma_purge_desc_work()
568 struct udma_desc *d = to_udma_desc(&vd->tx); in udma_purge_desc_work()
571 list_del(&vd->node); in udma_purge_desc_work()
576 if (!list_empty(&ud->desc_to_purge)) in udma_purge_desc_work()
577 schedule_work(&ud->purge_work); in udma_purge_desc_work()
582 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); in udma_desc_free()
583 struct udma_chan *uc = to_udma_chan(vd->tx.chan); in udma_desc_free()
584 struct udma_desc *d = to_udma_desc(&vd->tx); in udma_desc_free()
587 if (uc->terminated_desc == d) in udma_desc_free()
588 uc->terminated_desc = NULL; in udma_desc_free()
590 if (uc->use_dma_pool) { in udma_desc_free()
596 spin_lock_irqsave(&ud->lock, flags); in udma_desc_free()
597 list_add_tail(&vd->node, &ud->desc_to_purge); in udma_desc_free()
598 spin_unlock_irqrestore(&ud->lock, flags); in udma_desc_free()
600 schedule_work(&ud->purge_work); in udma_desc_free()
608 if (uc->tchan) in udma_is_chan_running()
610 if (uc->rchan) in udma_is_chan_running()
623 switch (uc->config.dir) { in udma_is_chan_paused()
648 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; in udma_get_rx_flush_hwdesc_paddr()
653 struct udma_desc *d = uc->desc; in udma_push_to_ring()
657 switch (uc->config.dir) { in udma_push_to_ring()
659 ring = uc->rflow->fd_ring; in udma_push_to_ring()
663 ring = uc->tchan->t_ring; in udma_push_to_ring()
666 return -EINVAL; in udma_push_to_ring()
669 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */ in udma_push_to_ring()
670 if (idx == -1) { in udma_push_to_ring()
683 if (uc->config.dir != DMA_DEV_TO_MEM) in udma_desc_is_rx_flush()
697 switch (uc->config.dir) { in udma_pop_from_ring()
699 ring = uc->rflow->r_ring; in udma_pop_from_ring()
703 ring = uc->tchan->tc_ring; in udma_pop_from_ring()
706 return -ENOENT; in udma_pop_from_ring()
721 return -ENOENT; in udma_pop_from_ring()
731 switch (uc->config.dir) { in udma_reset_rings()
733 if (uc->rchan) { in udma_reset_rings()
734 ring1 = uc->rflow->fd_ring; in udma_reset_rings()
735 ring2 = uc->rflow->r_ring; in udma_reset_rings()
740 if (uc->tchan) { in udma_reset_rings()
741 ring1 = uc->tchan->t_ring; in udma_reset_rings()
742 ring2 = uc->tchan->tc_ring; in udma_reset_rings()
756 if (uc->terminated_desc) { in udma_reset_rings()
757 udma_desc_free(&uc->terminated_desc->vd); in udma_reset_rings()
758 uc->terminated_desc = NULL; in udma_reset_rings()
764 if (uc->desc->dir == DMA_DEV_TO_MEM) { in udma_decrement_byte_counters()
767 if (uc->config.ep_type != PSIL_EP_NATIVE) in udma_decrement_byte_counters()
772 if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE) in udma_decrement_byte_counters()
781 if (uc->tchan) { in udma_reset_counters()
791 if (!uc->bchan) { in udma_reset_counters()
797 if (uc->rchan) { in udma_reset_counters()
814 switch (uc->config.dir) { in udma_reset_chan()
828 return -EINVAL; in udma_reset_chan()
834 /* Hard reset: re-initialize the channel to reset */ in udma_reset_chan()
839 memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); in udma_reset_chan()
840 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); in udma_reset_chan()
843 memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); in udma_reset_chan()
844 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); in udma_reset_chan()
852 if (uc->config.dir == DMA_DEV_TO_MEM) in udma_reset_chan()
858 uc->state = UDMA_CHAN_IS_IDLE; in udma_reset_chan()
865 struct udma_chan_config *ucc = &uc->config; in udma_start_desc()
867 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode && in udma_start_desc()
868 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { in udma_start_desc()
874 * PKTDMA supports pre-linked descriptor and cyclic is not in udma_start_desc()
877 for (i = 0; i < uc->desc->sglen; i++) in udma_start_desc()
887 if (uc->config.ep_type == PSIL_EP_NATIVE) in udma_chan_needs_reconfiguration()
891 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) in udma_chan_needs_reconfiguration()
899 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); in udma_start()
902 uc->desc = NULL; in udma_start()
903 return -ENOENT; in udma_start()
906 list_del(&vd->node); in udma_start()
908 uc->desc = to_udma_desc(&vd->tx); in udma_start()
922 switch (uc->desc->dir) { in udma_start()
925 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { in udma_start()
926 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | in udma_start()
927 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); in udma_start()
929 uc->ud->match_data; in udma_start()
931 if (uc->config.enable_acc32) in udma_start()
933 if (uc->config.enable_burst) in udma_start()
942 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, in udma_start()
943 match_data->statictr_z_mask)); in udma_start()
946 memcpy(&uc->static_tr, &uc->desc->static_tr, in udma_start()
947 sizeof(uc->static_tr)); in udma_start()
960 if (uc->config.ep_type == PSIL_EP_PDMA_XY) { in udma_start()
961 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | in udma_start()
962 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); in udma_start()
964 if (uc->config.enable_acc32) in udma_start()
966 if (uc->config.enable_burst) in udma_start()
974 memcpy(&uc->static_tr, &uc->desc->static_tr, in udma_start()
975 sizeof(uc->static_tr)); in udma_start()
994 return -EINVAL; in udma_start()
997 uc->state = UDMA_CHAN_IS_ACTIVE; in udma_start()
1005 enum udma_chan_state old_state = uc->state; in udma_stop()
1007 uc->state = UDMA_CHAN_IS_TERMINATING; in udma_stop()
1008 reinit_completion(&uc->teardown_completed); in udma_stop()
1010 switch (uc->config.dir) { in udma_stop()
1012 if (!uc->cyclic && !uc->desc) in udma_stop()
1013 udma_push_to_ring(uc, -1); in udma_stop()
1033 uc->state = old_state; in udma_stop()
1034 complete_all(&uc->teardown_completed); in udma_stop()
1035 return -EINVAL; in udma_stop()
1043 struct udma_desc *d = uc->desc; in udma_cyclic_packet_elapsed()
1046 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; in udma_cyclic_packet_elapsed()
1048 udma_push_to_ring(uc, d->desc_idx); in udma_cyclic_packet_elapsed()
1049 d->desc_idx = (d->desc_idx + 1) % d->sglen; in udma_cyclic_packet_elapsed()
1054 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_fetch_epib()
1056 memcpy(d->metadata, h_desc->epib, d->metadata_size); in udma_fetch_epib()
1069 if (uc->config.ep_type == PSIL_EP_NATIVE || in udma_is_desc_really_done()
1070 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT)) in udma_is_desc_really_done()
1078 uc->tx_drain.residue = bcnt - peer_bcnt; in udma_is_desc_really_done()
1079 uc->tx_drain.tstamp = ktime_get(); in udma_is_desc_really_done()
1097 spin_lock_irqsave(&uc->vc.lock, flags); in udma_check_tx_completion()
1099 if (uc->desc) { in udma_check_tx_completion()
1101 residue_diff = uc->tx_drain.residue; in udma_check_tx_completion()
1102 time_diff = uc->tx_drain.tstamp; in udma_check_tx_completion()
1107 desc_done = udma_is_desc_really_done(uc, uc->desc); in udma_check_tx_completion()
1115 time_diff = ktime_sub(uc->tx_drain.tstamp, in udma_check_tx_completion()
1117 residue_diff -= uc->tx_drain.residue; in udma_check_tx_completion()
1126 uc->tx_drain.residue; in udma_check_tx_completion()
1129 schedule_delayed_work(&uc->tx_drain.work, HZ); in udma_check_tx_completion()
1133 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_check_tx_completion()
1140 if (uc->desc) { in udma_check_tx_completion()
1141 struct udma_desc *d = uc->desc; in udma_check_tx_completion()
1143 udma_decrement_byte_counters(uc, d->residue); in udma_check_tx_completion()
1145 vchan_cookie_complete(&d->vd); in udma_check_tx_completion()
1152 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_check_tx_completion()
1164 spin_lock(&uc->vc.lock); in udma_ring_irq_handler()
1168 complete_all(&uc->teardown_completed); in udma_ring_irq_handler()
1170 if (uc->terminated_desc) { in udma_ring_irq_handler()
1171 udma_desc_free(&uc->terminated_desc->vd); in udma_ring_irq_handler()
1172 uc->terminated_desc = NULL; in udma_ring_irq_handler()
1175 if (!uc->desc) in udma_ring_irq_handler()
1185 d->desc_idx); in udma_ring_irq_handler()
1187 dev_err(uc->ud->dev, "not matching descriptors!\n"); in udma_ring_irq_handler()
1191 if (d == uc->desc) { in udma_ring_irq_handler()
1193 if (uc->cyclic) { in udma_ring_irq_handler()
1195 vchan_cyclic_callback(&d->vd); in udma_ring_irq_handler()
1198 udma_decrement_byte_counters(uc, d->residue); in udma_ring_irq_handler()
1200 vchan_cookie_complete(&d->vd); in udma_ring_irq_handler()
1202 schedule_delayed_work(&uc->tx_drain.work, in udma_ring_irq_handler()
1211 dma_cookie_complete(&d->vd.tx); in udma_ring_irq_handler()
1215 spin_unlock(&uc->vc.lock); in udma_ring_irq_handler()
1225 spin_lock(&uc->vc.lock); in udma_udma_irq_handler()
1226 d = uc->desc; in udma_udma_irq_handler()
1228 d->tr_idx = (d->tr_idx + 1) % d->sglen; in udma_udma_irq_handler()
1230 if (uc->cyclic) { in udma_udma_irq_handler()
1231 vchan_cyclic_callback(&d->vd); in udma_udma_irq_handler()
1234 udma_decrement_byte_counters(uc, d->residue); in udma_udma_irq_handler()
1236 vchan_cookie_complete(&d->vd); in udma_udma_irq_handler()
1240 spin_unlock(&uc->vc.lock); in udma_udma_irq_handler()
1246 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1252 * only using explicit flow id number. if @from is set to -1 it will try to find
1256 * Returns -ENOMEM if can't find free range.
1257 * -EEXIST if requested range is busy.
1258 * -EINVAL if wrong input values passed.
1268 tmp_from = ud->rchan_cnt; in __udma_alloc_gp_rflow_range()
1270 if (tmp_from < ud->rchan_cnt) in __udma_alloc_gp_rflow_range()
1271 return -EINVAL; in __udma_alloc_gp_rflow_range()
1273 if (tmp_from + cnt > ud->rflow_cnt) in __udma_alloc_gp_rflow_range()
1274 return -EINVAL; in __udma_alloc_gp_rflow_range()
1276 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, in __udma_alloc_gp_rflow_range()
1277 ud->rflow_cnt); in __udma_alloc_gp_rflow_range()
1280 ud->rflow_cnt, in __udma_alloc_gp_rflow_range()
1282 if (start >= ud->rflow_cnt) in __udma_alloc_gp_rflow_range()
1283 return -ENOMEM; in __udma_alloc_gp_rflow_range()
1286 return -EEXIST; in __udma_alloc_gp_rflow_range()
1288 bitmap_set(ud->rflow_gp_map_allocated, start, cnt); in __udma_alloc_gp_rflow_range()
1294 if (from < ud->rchan_cnt) in __udma_free_gp_rflow_range()
1295 return -EINVAL; in __udma_free_gp_rflow_range()
1296 if (from + cnt > ud->rflow_cnt) in __udma_free_gp_rflow_range()
1297 return -EINVAL; in __udma_free_gp_rflow_range()
1299 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); in __udma_free_gp_rflow_range()
1308 * TI-SCI FW will perform additional permission check ant way, it's in __udma_get_rflow()
1312 if (id < 0 || id >= ud->rflow_cnt) in __udma_get_rflow()
1313 return ERR_PTR(-ENOENT); in __udma_get_rflow()
1315 if (test_bit(id, ud->rflow_in_use)) in __udma_get_rflow()
1316 return ERR_PTR(-ENOENT); in __udma_get_rflow()
1318 if (ud->rflow_gp_map) { in __udma_get_rflow()
1320 if (!test_bit(id, ud->rflow_gp_map) && in __udma_get_rflow()
1321 !test_bit(id, ud->rflow_gp_map_allocated)) in __udma_get_rflow()
1322 return ERR_PTR(-EINVAL); in __udma_get_rflow()
1325 dev_dbg(ud->dev, "get rflow%d\n", id); in __udma_get_rflow()
1326 set_bit(id, ud->rflow_in_use); in __udma_get_rflow()
1327 return &ud->rflows[id]; in __udma_get_rflow()
1332 if (!test_bit(rflow->id, ud->rflow_in_use)) { in __udma_put_rflow()
1333 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); in __udma_put_rflow()
1337 dev_dbg(ud->dev, "put rflow%d\n", rflow->id); in __udma_put_rflow()
1338 clear_bit(rflow->id, ud->rflow_in_use); in __udma_put_rflow()
1343 enum udma_tp_level tpl, \
1347 if (test_bit(id, ud->res##_map)) { \
1348 dev_err(ud->dev, "res##%d is in use\n", id); \
1349 return ERR_PTR(-ENOENT); \
1354 if (tpl >= ud->res##_tpl.levels) \
1355 tpl = ud->res##_tpl.levels - 1; \
1357 start = ud->res##_tpl.start_idx[tpl]; \
1359 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1361 if (id == ud->res##_cnt) { \
1362 return ERR_PTR(-ENOENT); \
1366 set_bit(id, ud->res##_map); \
1367 return &ud->res##s[id]; \
1376 struct udma_dev *ud = uc->ud; in bcdma_get_bchan()
1377 enum udma_tp_level tpl; in bcdma_get_bchan() local
1380 if (uc->bchan) { in bcdma_get_bchan()
1381 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n", in bcdma_get_bchan()
1382 uc->id, uc->bchan->id); in bcdma_get_bchan()
1387 * Use normal channels for peripherals, and highest TPL channel for in bcdma_get_bchan()
1390 if (uc->config.tr_trigger_type) in bcdma_get_bchan()
1391 tpl = 0; in bcdma_get_bchan()
1393 tpl = ud->bchan_tpl.levels - 1; in bcdma_get_bchan()
1395 uc->bchan = __udma_reserve_bchan(ud, tpl, -1); in bcdma_get_bchan()
1396 if (IS_ERR(uc->bchan)) { in bcdma_get_bchan()
1397 ret = PTR_ERR(uc->bchan); in bcdma_get_bchan()
1398 uc->bchan = NULL; in bcdma_get_bchan()
1402 uc->tchan = uc->bchan; in bcdma_get_bchan()
1409 struct udma_dev *ud = uc->ud; in udma_get_tchan()
1412 if (uc->tchan) { in udma_get_tchan()
1413 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", in udma_get_tchan()
1414 uc->id, uc->tchan->id); in udma_get_tchan()
1419 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. in udma_get_tchan()
1423 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, in udma_get_tchan()
1424 uc->config.mapped_channel_id); in udma_get_tchan()
1425 if (IS_ERR(uc->tchan)) { in udma_get_tchan()
1426 ret = PTR_ERR(uc->tchan); in udma_get_tchan()
1427 uc->tchan = NULL; in udma_get_tchan()
1431 if (ud->tflow_cnt) { in udma_get_tchan()
1434 /* Only PKTDMA have support for tx flows */ in udma_get_tchan()
1435 if (uc->config.default_flow_id >= 0) in udma_get_tchan()
1436 tflow_id = uc->config.default_flow_id; in udma_get_tchan()
1438 tflow_id = uc->tchan->id; in udma_get_tchan()
1440 if (test_bit(tflow_id, ud->tflow_map)) { in udma_get_tchan()
1441 dev_err(ud->dev, "tflow%d is in use\n", tflow_id); in udma_get_tchan()
1442 clear_bit(uc->tchan->id, ud->tchan_map); in udma_get_tchan()
1443 uc->tchan = NULL; in udma_get_tchan()
1444 return -ENOENT; in udma_get_tchan()
1447 uc->tchan->tflow_id = tflow_id; in udma_get_tchan()
1448 set_bit(tflow_id, ud->tflow_map); in udma_get_tchan()
1450 uc->tchan->tflow_id = -1; in udma_get_tchan()
1458 struct udma_dev *ud = uc->ud; in udma_get_rchan()
1461 if (uc->rchan) { in udma_get_rchan()
1462 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", in udma_get_rchan()
1463 uc->id, uc->rchan->id); in udma_get_rchan()
1468 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels. in udma_get_rchan()
1472 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, in udma_get_rchan()
1473 uc->config.mapped_channel_id); in udma_get_rchan()
1474 if (IS_ERR(uc->rchan)) { in udma_get_rchan()
1475 ret = PTR_ERR(uc->rchan); in udma_get_rchan()
1476 uc->rchan = NULL; in udma_get_rchan()
1485 struct udma_dev *ud = uc->ud; in udma_get_chan_pair()
1488 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { in udma_get_chan_pair()
1489 dev_info(ud->dev, "chan%d: already have %d pair allocated\n", in udma_get_chan_pair()
1490 uc->id, uc->tchan->id); in udma_get_chan_pair()
1494 if (uc->tchan) { in udma_get_chan_pair()
1495 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", in udma_get_chan_pair()
1496 uc->id, uc->tchan->id); in udma_get_chan_pair()
1497 return -EBUSY; in udma_get_chan_pair()
1498 } else if (uc->rchan) { in udma_get_chan_pair()
1499 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", in udma_get_chan_pair()
1500 uc->id, uc->rchan->id); in udma_get_chan_pair()
1501 return -EBUSY; in udma_get_chan_pair()
1505 end = min(ud->tchan_cnt, ud->rchan_cnt); in udma_get_chan_pair()
1507 * Try to use the highest TPL channel pair for MEM_TO_MEM channels in udma_get_chan_pair()
1508 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan in udma_get_chan_pair()
1510 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1]; in udma_get_chan_pair()
1512 if (!test_bit(chan_id, ud->tchan_map) && in udma_get_chan_pair()
1513 !test_bit(chan_id, ud->rchan_map)) in udma_get_chan_pair()
1518 return -ENOENT; in udma_get_chan_pair()
1520 set_bit(chan_id, ud->tchan_map); in udma_get_chan_pair()
1521 set_bit(chan_id, ud->rchan_map); in udma_get_chan_pair()
1522 uc->tchan = &ud->tchans[chan_id]; in udma_get_chan_pair()
1523 uc->rchan = &ud->rchans[chan_id]; in udma_get_chan_pair()
1526 uc->tchan->tflow_id = -1; in udma_get_chan_pair()
1533 struct udma_dev *ud = uc->ud; in udma_get_rflow()
1536 if (!uc->rchan) { in udma_get_rflow()
1537 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); in udma_get_rflow()
1538 return -EINVAL; in udma_get_rflow()
1541 if (uc->rflow) { in udma_get_rflow()
1542 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", in udma_get_rflow()
1543 uc->id, uc->rflow->id); in udma_get_rflow()
1547 uc->rflow = __udma_get_rflow(ud, flow_id); in udma_get_rflow()
1548 if (IS_ERR(uc->rflow)) { in udma_get_rflow()
1549 ret = PTR_ERR(uc->rflow); in udma_get_rflow()
1550 uc->rflow = NULL; in udma_get_rflow()
1559 struct udma_dev *ud = uc->ud; in bcdma_put_bchan()
1561 if (uc->bchan) { in bcdma_put_bchan()
1562 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id, in bcdma_put_bchan()
1563 uc->bchan->id); in bcdma_put_bchan()
1564 clear_bit(uc->bchan->id, ud->bchan_map); in bcdma_put_bchan()
1565 uc->bchan = NULL; in bcdma_put_bchan()
1566 uc->tchan = NULL; in bcdma_put_bchan()
1572 struct udma_dev *ud = uc->ud; in udma_put_rchan()
1574 if (uc->rchan) { in udma_put_rchan()
1575 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, in udma_put_rchan()
1576 uc->rchan->id); in udma_put_rchan()
1577 clear_bit(uc->rchan->id, ud->rchan_map); in udma_put_rchan()
1578 uc->rchan = NULL; in udma_put_rchan()
1584 struct udma_dev *ud = uc->ud; in udma_put_tchan()
1586 if (uc->tchan) { in udma_put_tchan()
1587 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, in udma_put_tchan()
1588 uc->tchan->id); in udma_put_tchan()
1589 clear_bit(uc->tchan->id, ud->tchan_map); in udma_put_tchan()
1591 if (uc->tchan->tflow_id >= 0) in udma_put_tchan()
1592 clear_bit(uc->tchan->tflow_id, ud->tflow_map); in udma_put_tchan()
1594 uc->tchan = NULL; in udma_put_tchan()
1600 struct udma_dev *ud = uc->ud; in udma_put_rflow()
1602 if (uc->rflow) { in udma_put_rflow()
1603 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, in udma_put_rflow()
1604 uc->rflow->id); in udma_put_rflow()
1605 __udma_put_rflow(ud, uc->rflow); in udma_put_rflow()
1606 uc->rflow = NULL; in udma_put_rflow()
1612 if (!uc->bchan) in bcdma_free_bchan_resources()
1615 k3_ringacc_ring_free(uc->bchan->tc_ring); in bcdma_free_bchan_resources()
1616 k3_ringacc_ring_free(uc->bchan->t_ring); in bcdma_free_bchan_resources()
1617 uc->bchan->tc_ring = NULL; in bcdma_free_bchan_resources()
1618 uc->bchan->t_ring = NULL; in bcdma_free_bchan_resources()
1619 k3_configure_chan_coherency(&uc->vc.chan, 0); in bcdma_free_bchan_resources()
1627 struct udma_dev *ud = uc->ud; in bcdma_alloc_bchan_resources()
1634 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1, in bcdma_alloc_bchan_resources()
1635 &uc->bchan->t_ring, in bcdma_alloc_bchan_resources()
1636 &uc->bchan->tc_ring); in bcdma_alloc_bchan_resources()
1638 ret = -EBUSY; in bcdma_alloc_bchan_resources()
1647 k3_configure_chan_coherency(&uc->vc.chan, ud->asel); in bcdma_alloc_bchan_resources()
1648 ring_cfg.asel = ud->asel; in bcdma_alloc_bchan_resources()
1649 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); in bcdma_alloc_bchan_resources()
1651 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg); in bcdma_alloc_bchan_resources()
1658 k3_ringacc_ring_free(uc->bchan->tc_ring); in bcdma_alloc_bchan_resources()
1659 uc->bchan->tc_ring = NULL; in bcdma_alloc_bchan_resources()
1660 k3_ringacc_ring_free(uc->bchan->t_ring); in bcdma_alloc_bchan_resources()
1661 uc->bchan->t_ring = NULL; in bcdma_alloc_bchan_resources()
1662 k3_configure_chan_coherency(&uc->vc.chan, 0); in bcdma_alloc_bchan_resources()
1671 if (!uc->tchan) in udma_free_tx_resources()
1674 k3_ringacc_ring_free(uc->tchan->t_ring); in udma_free_tx_resources()
1675 k3_ringacc_ring_free(uc->tchan->tc_ring); in udma_free_tx_resources()
1676 uc->tchan->t_ring = NULL; in udma_free_tx_resources()
1677 uc->tchan->tc_ring = NULL; in udma_free_tx_resources()
1685 struct udma_dev *ud = uc->ud; in udma_alloc_tx_resources()
1693 tchan = uc->tchan; in udma_alloc_tx_resources()
1694 if (tchan->tflow_id >= 0) in udma_alloc_tx_resources()
1695 ring_idx = tchan->tflow_id; in udma_alloc_tx_resources()
1697 ring_idx = ud->bchan_cnt + tchan->id; in udma_alloc_tx_resources()
1699 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1, in udma_alloc_tx_resources()
1700 &tchan->t_ring, in udma_alloc_tx_resources()
1701 &tchan->tc_ring); in udma_alloc_tx_resources()
1703 ret = -EBUSY; in udma_alloc_tx_resources()
1710 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_alloc_tx_resources()
1715 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); in udma_alloc_tx_resources()
1716 ring_cfg.asel = uc->config.asel; in udma_alloc_tx_resources()
1717 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); in udma_alloc_tx_resources()
1720 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg); in udma_alloc_tx_resources()
1721 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg); in udma_alloc_tx_resources()
1729 k3_ringacc_ring_free(uc->tchan->tc_ring); in udma_alloc_tx_resources()
1730 uc->tchan->tc_ring = NULL; in udma_alloc_tx_resources()
1731 k3_ringacc_ring_free(uc->tchan->t_ring); in udma_alloc_tx_resources()
1732 uc->tchan->t_ring = NULL; in udma_alloc_tx_resources()
1741 if (!uc->rchan) in udma_free_rx_resources()
1744 if (uc->rflow) { in udma_free_rx_resources()
1745 struct udma_rflow *rflow = uc->rflow; in udma_free_rx_resources()
1747 k3_ringacc_ring_free(rflow->fd_ring); in udma_free_rx_resources()
1748 k3_ringacc_ring_free(rflow->r_ring); in udma_free_rx_resources()
1749 rflow->fd_ring = NULL; in udma_free_rx_resources()
1750 rflow->r_ring = NULL; in udma_free_rx_resources()
1760 struct udma_dev *ud = uc->ud; in udma_alloc_rx_resources()
1771 if (uc->config.dir == DMA_MEM_TO_MEM) in udma_alloc_rx_resources()
1774 if (uc->config.default_flow_id >= 0) in udma_alloc_rx_resources()
1775 ret = udma_get_rflow(uc, uc->config.default_flow_id); in udma_alloc_rx_resources()
1777 ret = udma_get_rflow(uc, uc->rchan->id); in udma_alloc_rx_resources()
1780 ret = -EBUSY; in udma_alloc_rx_resources()
1784 rflow = uc->rflow; in udma_alloc_rx_resources()
1785 if (ud->tflow_cnt) in udma_alloc_rx_resources()
1786 fd_ring_id = ud->tflow_cnt + rflow->id; in udma_alloc_rx_resources()
1788 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt + in udma_alloc_rx_resources()
1789 uc->rchan->id; in udma_alloc_rx_resources()
1791 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1, in udma_alloc_rx_resources()
1792 &rflow->fd_ring, &rflow->r_ring); in udma_alloc_rx_resources()
1794 ret = -EBUSY; in udma_alloc_rx_resources()
1801 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_alloc_rx_resources()
1802 if (uc->config.pkt_mode) in udma_alloc_rx_resources()
1812 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel); in udma_alloc_rx_resources()
1813 ring_cfg.asel = uc->config.asel; in udma_alloc_rx_resources()
1814 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan); in udma_alloc_rx_resources()
1817 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); in udma_alloc_rx_resources()
1820 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); in udma_alloc_rx_resources()
1828 k3_ringacc_ring_free(rflow->r_ring); in udma_alloc_rx_resources()
1829 rflow->r_ring = NULL; in udma_alloc_rx_resources()
1830 k3_ringacc_ring_free(rflow->fd_ring); in udma_alloc_rx_resources()
1831 rflow->fd_ring = NULL; in udma_alloc_rx_resources()
1874 struct udma_dev *ud = uc->ud; in udma_tisci_m2m_channel_config()
1875 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_m2m_channel_config()
1876 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_m2m_channel_config()
1877 struct udma_tchan *tchan = uc->tchan; in udma_tisci_m2m_channel_config()
1878 struct udma_rchan *rchan = uc->rchan; in udma_tisci_m2m_channel_config()
1881 u8 tpl; in udma_tisci_m2m_channel_config() local
1883 /* Non synchronized - mem to mem type of transfer */ in udma_tisci_m2m_channel_config()
1884 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); in udma_tisci_m2m_channel_config()
1888 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { in udma_tisci_m2m_channel_config()
1889 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id); in udma_tisci_m2m_channel_config()
1891 burst_size = ud->match_data->burst_size[tpl]; in udma_tisci_m2m_channel_config()
1895 req_tx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_m2m_channel_config()
1896 req_tx.index = tchan->id; in udma_tisci_m2m_channel_config()
1900 req_tx.tx_atype = ud->atype; in udma_tisci_m2m_channel_config()
1906 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in udma_tisci_m2m_channel_config()
1908 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); in udma_tisci_m2m_channel_config()
1913 req_rx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_m2m_channel_config()
1914 req_rx.index = rchan->id; in udma_tisci_m2m_channel_config()
1918 req_rx.rx_atype = ud->atype; in udma_tisci_m2m_channel_config()
1924 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in udma_tisci_m2m_channel_config()
1926 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); in udma_tisci_m2m_channel_config()
1933 struct udma_dev *ud = uc->ud; in bcdma_tisci_m2m_channel_config()
1934 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in bcdma_tisci_m2m_channel_config()
1935 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in bcdma_tisci_m2m_channel_config()
1937 struct udma_bchan *bchan = uc->bchan; in bcdma_tisci_m2m_channel_config()
1940 u8 tpl; in bcdma_tisci_m2m_channel_config() local
1942 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) { in bcdma_tisci_m2m_channel_config()
1943 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id); in bcdma_tisci_m2m_channel_config()
1945 burst_size = ud->match_data->burst_size[tpl]; in bcdma_tisci_m2m_channel_config()
1949 req_tx.nav_id = tisci_rm->tisci_dev_id; in bcdma_tisci_m2m_channel_config()
1951 req_tx.index = bchan->id; in bcdma_tisci_m2m_channel_config()
1957 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in bcdma_tisci_m2m_channel_config()
1959 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret); in bcdma_tisci_m2m_channel_config()
1966 struct udma_dev *ud = uc->ud; in udma_tisci_tx_channel_config()
1967 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_tx_channel_config()
1968 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_tx_channel_config()
1969 struct udma_tchan *tchan = uc->tchan; in udma_tisci_tx_channel_config()
1970 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); in udma_tisci_tx_channel_config()
1975 if (uc->config.pkt_mode) { in udma_tisci_tx_channel_config()
1977 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, in udma_tisci_tx_channel_config()
1978 uc->config.psd_size, 0); in udma_tisci_tx_channel_config()
1985 req_tx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_tx_channel_config()
1986 req_tx.index = tchan->id; in udma_tisci_tx_channel_config()
1988 req_tx.tx_supr_tdpkt = uc->config.notdpkt; in udma_tisci_tx_channel_config()
1991 req_tx.tx_atype = uc->config.atype; in udma_tisci_tx_channel_config()
1992 if (uc->config.ep_type == PSIL_EP_PDMA_XY && in udma_tisci_tx_channel_config()
1993 ud->match_data->flags & UDMA_FLAG_TDTYPE) { in udma_tisci_tx_channel_config()
2000 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in udma_tisci_tx_channel_config()
2002 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); in udma_tisci_tx_channel_config()
2009 struct udma_dev *ud = uc->ud; in bcdma_tisci_tx_channel_config()
2010 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in bcdma_tisci_tx_channel_config()
2011 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in bcdma_tisci_tx_channel_config()
2012 struct udma_tchan *tchan = uc->tchan; in bcdma_tisci_tx_channel_config()
2017 req_tx.nav_id = tisci_rm->tisci_dev_id; in bcdma_tisci_tx_channel_config()
2018 req_tx.index = tchan->id; in bcdma_tisci_tx_channel_config()
2019 req_tx.tx_supr_tdpkt = uc->config.notdpkt; in bcdma_tisci_tx_channel_config()
2020 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) { in bcdma_tisci_tx_channel_config()
2027 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); in bcdma_tisci_tx_channel_config()
2029 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); in bcdma_tisci_tx_channel_config()
2038 struct udma_dev *ud = uc->ud; in udma_tisci_rx_channel_config()
2039 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_tisci_rx_channel_config()
2040 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in udma_tisci_rx_channel_config()
2041 struct udma_rchan *rchan = uc->rchan; in udma_tisci_rx_channel_config()
2042 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); in udma_tisci_rx_channel_config()
2043 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_tisci_rx_channel_config()
2049 if (uc->config.pkt_mode) { in udma_tisci_rx_channel_config()
2051 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, in udma_tisci_rx_channel_config()
2052 uc->config.psd_size, 0); in udma_tisci_rx_channel_config()
2059 req_rx.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_rx_channel_config()
2060 req_rx.index = rchan->id; in udma_tisci_rx_channel_config()
2064 req_rx.rx_atype = uc->config.atype; in udma_tisci_rx_channel_config()
2066 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in udma_tisci_rx_channel_config()
2068 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); in udma_tisci_rx_channel_config()
2087 flow_req.nav_id = tisci_rm->tisci_dev_id; in udma_tisci_rx_channel_config()
2088 flow_req.flow_index = rchan->id; in udma_tisci_rx_channel_config()
2090 if (uc->config.needs_epib) in udma_tisci_rx_channel_config()
2094 if (uc->config.psd_size) in udma_tisci_rx_channel_config()
2109 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); in udma_tisci_rx_channel_config()
2112 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); in udma_tisci_rx_channel_config()
2119 struct udma_dev *ud = uc->ud; in bcdma_tisci_rx_channel_config()
2120 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in bcdma_tisci_rx_channel_config()
2121 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in bcdma_tisci_rx_channel_config()
2122 struct udma_rchan *rchan = uc->rchan; in bcdma_tisci_rx_channel_config()
2127 req_rx.nav_id = tisci_rm->tisci_dev_id; in bcdma_tisci_rx_channel_config()
2128 req_rx.index = rchan->id; in bcdma_tisci_rx_channel_config()
2130 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in bcdma_tisci_rx_channel_config()
2132 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); in bcdma_tisci_rx_channel_config()
2139 struct udma_dev *ud = uc->ud; in pktdma_tisci_rx_channel_config()
2140 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in pktdma_tisci_rx_channel_config()
2141 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; in pktdma_tisci_rx_channel_config()
2147 req_rx.nav_id = tisci_rm->tisci_dev_id; in pktdma_tisci_rx_channel_config()
2148 req_rx.index = uc->rchan->id; in pktdma_tisci_rx_channel_config()
2150 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); in pktdma_tisci_rx_channel_config()
2152 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret); in pktdma_tisci_rx_channel_config()
2161 flow_req.nav_id = tisci_rm->tisci_dev_id; in pktdma_tisci_rx_channel_config()
2162 flow_req.flow_index = uc->rflow->id; in pktdma_tisci_rx_channel_config()
2164 if (uc->config.needs_epib) in pktdma_tisci_rx_channel_config()
2168 if (uc->config.psd_size) in pktdma_tisci_rx_channel_config()
2174 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); in pktdma_tisci_rx_channel_config()
2177 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id, in pktdma_tisci_rx_channel_config()
2186 struct udma_dev *ud = to_udma_dev(chan->device); in udma_alloc_chan_resources()
2187 const struct udma_soc_data *soc_data = ud->soc_data; in udma_alloc_chan_resources()
2192 uc->dma_dev = ud->dev; in udma_alloc_chan_resources()
2194 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { in udma_alloc_chan_resources()
2195 uc->use_dma_pool = true; in udma_alloc_chan_resources()
2197 if (uc->config.dir == DMA_MEM_TO_MEM) { in udma_alloc_chan_resources()
2198 uc->config.hdesc_size = cppi5_trdesc_calc_size( in udma_alloc_chan_resources()
2200 uc->config.pkt_mode = false; in udma_alloc_chan_resources()
2204 if (uc->use_dma_pool) { in udma_alloc_chan_resources()
2205 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, in udma_alloc_chan_resources()
2206 uc->config.hdesc_size, in udma_alloc_chan_resources()
2207 ud->desc_align, in udma_alloc_chan_resources()
2209 if (!uc->hdesc_pool) { in udma_alloc_chan_resources()
2210 dev_err(ud->ddev.dev, in udma_alloc_chan_resources()
2212 uc->use_dma_pool = false; in udma_alloc_chan_resources()
2213 ret = -ENOMEM; in udma_alloc_chan_resources()
2222 reinit_completion(&uc->teardown_completed); in udma_alloc_chan_resources()
2223 complete_all(&uc->teardown_completed); in udma_alloc_chan_resources()
2224 uc->state = UDMA_CHAN_IS_IDLE; in udma_alloc_chan_resources()
2226 switch (uc->config.dir) { in udma_alloc_chan_resources()
2228 /* Non synchronized - mem to mem type of transfer */ in udma_alloc_chan_resources()
2229 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, in udma_alloc_chan_resources()
2230 uc->id); in udma_alloc_chan_resources()
2248 uc->config.src_thread = ud->psil_base + uc->tchan->id; in udma_alloc_chan_resources()
2249 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in udma_alloc_chan_resources()
2252 irq_ring = uc->tchan->tc_ring; in udma_alloc_chan_resources()
2253 irq_udma_idx = uc->tchan->id; in udma_alloc_chan_resources()
2258 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ in udma_alloc_chan_resources()
2259 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, in udma_alloc_chan_resources()
2260 uc->id); in udma_alloc_chan_resources()
2266 uc->config.src_thread = ud->psil_base + uc->tchan->id; in udma_alloc_chan_resources()
2267 uc->config.dst_thread = uc->config.remote_thread_id; in udma_alloc_chan_resources()
2268 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; in udma_alloc_chan_resources()
2270 irq_ring = uc->tchan->tc_ring; in udma_alloc_chan_resources()
2271 irq_udma_idx = uc->tchan->id; in udma_alloc_chan_resources()
2276 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ in udma_alloc_chan_resources()
2277 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, in udma_alloc_chan_resources()
2278 uc->id); in udma_alloc_chan_resources()
2284 uc->config.src_thread = uc->config.remote_thread_id; in udma_alloc_chan_resources()
2285 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in udma_alloc_chan_resources()
2288 irq_ring = uc->rflow->r_ring; in udma_alloc_chan_resources()
2289 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id; in udma_alloc_chan_resources()
2295 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", in udma_alloc_chan_resources()
2296 __func__, uc->id, uc->config.dir); in udma_alloc_chan_resources()
2297 ret = -EINVAL; in udma_alloc_chan_resources()
2307 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); in udma_alloc_chan_resources()
2310 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); in udma_alloc_chan_resources()
2311 ret = -EBUSY; in udma_alloc_chan_resources()
2316 /* PSI-L pairing */ in udma_alloc_chan_resources()
2317 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
2319 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", in udma_alloc_chan_resources()
2320 uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
2324 uc->psil_paired = true; in udma_alloc_chan_resources()
2326 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); in udma_alloc_chan_resources()
2327 if (uc->irq_num_ring <= 0) { in udma_alloc_chan_resources()
2328 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", in udma_alloc_chan_resources()
2330 ret = -EINVAL; in udma_alloc_chan_resources()
2334 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, in udma_alloc_chan_resources()
2335 IRQF_TRIGGER_HIGH, uc->name, uc); in udma_alloc_chan_resources()
2337 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); in udma_alloc_chan_resources()
2342 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { in udma_alloc_chan_resources()
2343 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); in udma_alloc_chan_resources()
2344 if (uc->irq_num_udma <= 0) { in udma_alloc_chan_resources()
2345 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", in udma_alloc_chan_resources()
2347 free_irq(uc->irq_num_ring, uc); in udma_alloc_chan_resources()
2348 ret = -EINVAL; in udma_alloc_chan_resources()
2352 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, in udma_alloc_chan_resources()
2353 uc->name, uc); in udma_alloc_chan_resources()
2355 dev_err(ud->dev, "chan%d: UDMA irq request failed\n", in udma_alloc_chan_resources()
2356 uc->id); in udma_alloc_chan_resources()
2357 free_irq(uc->irq_num_ring, uc); in udma_alloc_chan_resources()
2361 uc->irq_num_udma = 0; in udma_alloc_chan_resources()
2369 uc->irq_num_ring = 0; in udma_alloc_chan_resources()
2370 uc->irq_num_udma = 0; in udma_alloc_chan_resources()
2372 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); in udma_alloc_chan_resources()
2373 uc->psil_paired = false; in udma_alloc_chan_resources()
2380 if (uc->use_dma_pool) { in udma_alloc_chan_resources()
2381 dma_pool_destroy(uc->hdesc_pool); in udma_alloc_chan_resources()
2382 uc->use_dma_pool = false; in udma_alloc_chan_resources()
2391 struct udma_dev *ud = to_udma_dev(chan->device); in bcdma_alloc_chan_resources()
2392 const struct udma_oes_offsets *oes = &ud->soc_data->oes; in bcdma_alloc_chan_resources()
2397 uc->config.pkt_mode = false; in bcdma_alloc_chan_resources()
2403 reinit_completion(&uc->teardown_completed); in bcdma_alloc_chan_resources()
2404 complete_all(&uc->teardown_completed); in bcdma_alloc_chan_resources()
2405 uc->state = UDMA_CHAN_IS_IDLE; in bcdma_alloc_chan_resources()
2407 switch (uc->config.dir) { in bcdma_alloc_chan_resources()
2409 /* Non synchronized - mem to mem type of transfer */ in bcdma_alloc_chan_resources()
2410 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, in bcdma_alloc_chan_resources()
2411 uc->id); in bcdma_alloc_chan_resources()
2417 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring; in bcdma_alloc_chan_resources()
2418 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data; in bcdma_alloc_chan_resources()
2423 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ in bcdma_alloc_chan_resources()
2424 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, in bcdma_alloc_chan_resources()
2425 uc->id); in bcdma_alloc_chan_resources()
2429 uc->config.remote_thread_id = -1; in bcdma_alloc_chan_resources()
2433 uc->config.src_thread = ud->psil_base + uc->tchan->id; in bcdma_alloc_chan_resources()
2434 uc->config.dst_thread = uc->config.remote_thread_id; in bcdma_alloc_chan_resources()
2435 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; in bcdma_alloc_chan_resources()
2437 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring; in bcdma_alloc_chan_resources()
2438 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data; in bcdma_alloc_chan_resources()
2443 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ in bcdma_alloc_chan_resources()
2444 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, in bcdma_alloc_chan_resources()
2445 uc->id); in bcdma_alloc_chan_resources()
2449 uc->config.remote_thread_id = -1; in bcdma_alloc_chan_resources()
2453 uc->config.src_thread = uc->config.remote_thread_id; in bcdma_alloc_chan_resources()
2454 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in bcdma_alloc_chan_resources()
2457 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring; in bcdma_alloc_chan_resources()
2458 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data; in bcdma_alloc_chan_resources()
2464 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", in bcdma_alloc_chan_resources()
2465 __func__, uc->id, uc->config.dir); in bcdma_alloc_chan_resources()
2466 return -EINVAL; in bcdma_alloc_chan_resources()
2474 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); in bcdma_alloc_chan_resources()
2477 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); in bcdma_alloc_chan_resources()
2478 ret = -EBUSY; in bcdma_alloc_chan_resources()
2483 uc->dma_dev = dmaengine_get_dma_device(chan); in bcdma_alloc_chan_resources()
2484 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) { in bcdma_alloc_chan_resources()
2485 uc->config.hdesc_size = cppi5_trdesc_calc_size( in bcdma_alloc_chan_resources()
2488 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, in bcdma_alloc_chan_resources()
2489 uc->config.hdesc_size, in bcdma_alloc_chan_resources()
2490 ud->desc_align, in bcdma_alloc_chan_resources()
2492 if (!uc->hdesc_pool) { in bcdma_alloc_chan_resources()
2493 dev_err(ud->ddev.dev, in bcdma_alloc_chan_resources()
2495 uc->use_dma_pool = false; in bcdma_alloc_chan_resources()
2496 ret = -ENOMEM; in bcdma_alloc_chan_resources()
2500 uc->use_dma_pool = true; in bcdma_alloc_chan_resources()
2501 } else if (uc->config.dir != DMA_MEM_TO_MEM) { in bcdma_alloc_chan_resources()
2502 /* PSI-L pairing */ in bcdma_alloc_chan_resources()
2503 ret = navss_psil_pair(ud, uc->config.src_thread, in bcdma_alloc_chan_resources()
2504 uc->config.dst_thread); in bcdma_alloc_chan_resources()
2506 dev_err(ud->dev, in bcdma_alloc_chan_resources()
2507 "PSI-L pairing failed: 0x%04x -> 0x%04x\n", in bcdma_alloc_chan_resources()
2508 uc->config.src_thread, uc->config.dst_thread); in bcdma_alloc_chan_resources()
2512 uc->psil_paired = true; in bcdma_alloc_chan_resources()
2515 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); in bcdma_alloc_chan_resources()
2516 if (uc->irq_num_ring <= 0) { in bcdma_alloc_chan_resources()
2517 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", in bcdma_alloc_chan_resources()
2519 ret = -EINVAL; in bcdma_alloc_chan_resources()
2523 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, in bcdma_alloc_chan_resources()
2524 IRQF_TRIGGER_HIGH, uc->name, uc); in bcdma_alloc_chan_resources()
2526 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); in bcdma_alloc_chan_resources()
2531 if (is_slave_direction(uc->config.dir)) { in bcdma_alloc_chan_resources()
2532 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx); in bcdma_alloc_chan_resources()
2533 if (uc->irq_num_udma <= 0) { in bcdma_alloc_chan_resources()
2534 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n", in bcdma_alloc_chan_resources()
2536 free_irq(uc->irq_num_ring, uc); in bcdma_alloc_chan_resources()
2537 ret = -EINVAL; in bcdma_alloc_chan_resources()
2541 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, in bcdma_alloc_chan_resources()
2542 uc->name, uc); in bcdma_alloc_chan_resources()
2544 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n", in bcdma_alloc_chan_resources()
2545 uc->id); in bcdma_alloc_chan_resources()
2546 free_irq(uc->irq_num_ring, uc); in bcdma_alloc_chan_resources()
2550 uc->irq_num_udma = 0; in bcdma_alloc_chan_resources()
2555 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, in bcdma_alloc_chan_resources()
2560 uc->irq_num_ring = 0; in bcdma_alloc_chan_resources()
2561 uc->irq_num_udma = 0; in bcdma_alloc_chan_resources()
2563 if (uc->psil_paired) in bcdma_alloc_chan_resources()
2564 navss_psil_unpair(ud, uc->config.src_thread, in bcdma_alloc_chan_resources()
2565 uc->config.dst_thread); in bcdma_alloc_chan_resources()
2566 uc->psil_paired = false; in bcdma_alloc_chan_resources()
2574 if (uc->use_dma_pool) { in bcdma_alloc_chan_resources()
2575 dma_pool_destroy(uc->hdesc_pool); in bcdma_alloc_chan_resources()
2576 uc->use_dma_pool = false; in bcdma_alloc_chan_resources()
2584 struct k3_event_route_data *router_data = chan->route_data; in bcdma_router_config()
2588 if (!uc->bchan) in bcdma_router_config()
2589 return -EINVAL; in bcdma_router_config()
2591 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2) in bcdma_router_config()
2592 return -EINVAL; in bcdma_router_config()
2594 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset; in bcdma_router_config()
2595 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1; in bcdma_router_config()
2597 return router_data->set_event(router_data->priv, trigger_event); in bcdma_router_config()
2603 struct udma_dev *ud = to_udma_dev(chan->device); in pktdma_alloc_chan_resources()
2604 const struct udma_oes_offsets *oes = &ud->soc_data->oes; in pktdma_alloc_chan_resources()
2612 reinit_completion(&uc->teardown_completed); in pktdma_alloc_chan_resources()
2613 complete_all(&uc->teardown_completed); in pktdma_alloc_chan_resources()
2614 uc->state = UDMA_CHAN_IS_IDLE; in pktdma_alloc_chan_resources()
2616 switch (uc->config.dir) { in pktdma_alloc_chan_resources()
2618 /* Slave transfer synchronized - mem to dev (TX) trasnfer */ in pktdma_alloc_chan_resources()
2619 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, in pktdma_alloc_chan_resources()
2620 uc->id); in pktdma_alloc_chan_resources()
2624 uc->config.remote_thread_id = -1; in pktdma_alloc_chan_resources()
2628 uc->config.src_thread = ud->psil_base + uc->tchan->id; in pktdma_alloc_chan_resources()
2629 uc->config.dst_thread = uc->config.remote_thread_id; in pktdma_alloc_chan_resources()
2630 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; in pktdma_alloc_chan_resources()
2632 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow; in pktdma_alloc_chan_resources()
2637 /* Slave transfer synchronized - dev to mem (RX) trasnfer */ in pktdma_alloc_chan_resources()
2638 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, in pktdma_alloc_chan_resources()
2639 uc->id); in pktdma_alloc_chan_resources()
2643 uc->config.remote_thread_id = -1; in pktdma_alloc_chan_resources()
2647 uc->config.src_thread = uc->config.remote_thread_id; in pktdma_alloc_chan_resources()
2648 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | in pktdma_alloc_chan_resources()
2651 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow; in pktdma_alloc_chan_resources()
2657 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", in pktdma_alloc_chan_resources()
2658 __func__, uc->id, uc->config.dir); in pktdma_alloc_chan_resources()
2659 return -EINVAL; in pktdma_alloc_chan_resources()
2667 dev_warn(ud->dev, "chan%d: is running!\n", uc->id); in pktdma_alloc_chan_resources()
2670 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); in pktdma_alloc_chan_resources()
2671 ret = -EBUSY; in pktdma_alloc_chan_resources()
2676 uc->dma_dev = dmaengine_get_dma_device(chan); in pktdma_alloc_chan_resources()
2677 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev, in pktdma_alloc_chan_resources()
2678 uc->config.hdesc_size, ud->desc_align, in pktdma_alloc_chan_resources()
2680 if (!uc->hdesc_pool) { in pktdma_alloc_chan_resources()
2681 dev_err(ud->ddev.dev, in pktdma_alloc_chan_resources()
2683 uc->use_dma_pool = false; in pktdma_alloc_chan_resources()
2684 ret = -ENOMEM; in pktdma_alloc_chan_resources()
2688 uc->use_dma_pool = true; in pktdma_alloc_chan_resources()
2690 /* PSI-L pairing */ in pktdma_alloc_chan_resources()
2691 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); in pktdma_alloc_chan_resources()
2693 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", in pktdma_alloc_chan_resources()
2694 uc->config.src_thread, uc->config.dst_thread); in pktdma_alloc_chan_resources()
2698 uc->psil_paired = true; in pktdma_alloc_chan_resources()
2700 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx); in pktdma_alloc_chan_resources()
2701 if (uc->irq_num_ring <= 0) { in pktdma_alloc_chan_resources()
2702 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", in pktdma_alloc_chan_resources()
2704 ret = -EINVAL; in pktdma_alloc_chan_resources()
2708 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, in pktdma_alloc_chan_resources()
2709 IRQF_TRIGGER_HIGH, uc->name, uc); in pktdma_alloc_chan_resources()
2711 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); in pktdma_alloc_chan_resources()
2715 uc->irq_num_udma = 0; in pktdma_alloc_chan_resources()
2719 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, in pktdma_alloc_chan_resources()
2722 if (uc->tchan) in pktdma_alloc_chan_resources()
2723 dev_dbg(ud->dev, in pktdma_alloc_chan_resources()
2725 uc->id, uc->tchan->id, uc->tchan->tflow_id, in pktdma_alloc_chan_resources()
2726 uc->config.remote_thread_id); in pktdma_alloc_chan_resources()
2727 else if (uc->rchan) in pktdma_alloc_chan_resources()
2728 dev_dbg(ud->dev, in pktdma_alloc_chan_resources()
2730 uc->id, uc->rchan->id, uc->rflow->id, in pktdma_alloc_chan_resources()
2731 uc->config.remote_thread_id); in pktdma_alloc_chan_resources()
2735 uc->irq_num_ring = 0; in pktdma_alloc_chan_resources()
2737 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); in pktdma_alloc_chan_resources()
2738 uc->psil_paired = false; in pktdma_alloc_chan_resources()
2745 dma_pool_destroy(uc->hdesc_pool); in pktdma_alloc_chan_resources()
2746 uc->use_dma_pool = false; in pktdma_alloc_chan_resources()
2756 memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); in udma_slave_config()
2778 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); in udma_alloc_tr_desc()
2783 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); in udma_alloc_tr_desc()
2787 d->sglen = tr_count; in udma_alloc_tr_desc()
2789 d->hwdesc_count = 1; in udma_alloc_tr_desc()
2790 hwdesc = &d->hwdesc[0]; in udma_alloc_tr_desc()
2793 if (uc->use_dma_pool) { in udma_alloc_tr_desc()
2794 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_alloc_tr_desc()
2795 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_alloc_tr_desc()
2797 &hwdesc->cppi5_desc_paddr); in udma_alloc_tr_desc()
2799 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, in udma_alloc_tr_desc()
2801 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, in udma_alloc_tr_desc()
2802 uc->ud->desc_align); in udma_alloc_tr_desc()
2803 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, in udma_alloc_tr_desc()
2804 hwdesc->cppi5_desc_size, in udma_alloc_tr_desc()
2805 &hwdesc->cppi5_desc_paddr, in udma_alloc_tr_desc()
2809 if (!hwdesc->cppi5_desc_vaddr) { in udma_alloc_tr_desc()
2815 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; in udma_alloc_tr_desc()
2817 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; in udma_alloc_tr_desc()
2819 tr_desc = hwdesc->cppi5_desc_vaddr; in udma_alloc_tr_desc()
2821 if (uc->cyclic) in udma_alloc_tr_desc()
2825 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_alloc_tr_desc()
2827 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_alloc_tr_desc()
2830 cppi5_desc_set_pktids(tr_desc, uc->id, in udma_alloc_tr_desc()
2838 * udma_get_tr_counters - calculate TR counters for a given length
2847 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2851 * -EINVAL if the length can not be supported
2867 *tr0_cnt0 = SZ_64K - BIT(align_to); in udma_get_tr_counters()
2870 align_to--; in udma_get_tr_counters()
2873 return -EINVAL; in udma_get_tr_counters()
2911 d->sglen = sglen; in udma_prep_slave_sg_tr()
2913 if (uc->ud->match_data->type == DMA_TYPE_UDMA) in udma_prep_slave_sg_tr()
2916 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_slave_sg_tr()
2918 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_slave_sg_tr()
2925 dev_err(uc->ud->dev, "size %u is not supported\n", in udma_prep_slave_sg_tr()
2957 d->residue += sg_dma_len(sgent); in udma_prep_slave_sg_tr()
2960 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, in udma_prep_slave_sg_tr()
2987 dev_addr = uc->cfg.src_addr; in udma_prep_slave_sg_triggered_tr()
2988 dev_width = uc->cfg.src_addr_width; in udma_prep_slave_sg_triggered_tr()
2989 burst = uc->cfg.src_maxburst; in udma_prep_slave_sg_triggered_tr()
2990 port_window = uc->cfg.src_port_window_size; in udma_prep_slave_sg_triggered_tr()
2992 dev_addr = uc->cfg.dst_addr; in udma_prep_slave_sg_triggered_tr()
2993 dev_width = uc->cfg.dst_addr_width; in udma_prep_slave_sg_triggered_tr()
2994 burst = uc->cfg.dst_maxburst; in udma_prep_slave_sg_triggered_tr()
2995 port_window = uc->cfg.dst_port_window_size; in udma_prep_slave_sg_triggered_tr()
2997 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); in udma_prep_slave_sg_triggered_tr()
3006 dev_err(uc->ud->dev, in udma_prep_slave_sg_triggered_tr()
3024 dev_err(uc->ud->dev, in udma_prep_slave_sg_triggered_tr()
3042 d->sglen = sglen; in udma_prep_slave_sg_triggered_tr()
3044 if (uc->ud->match_data->type == DMA_TYPE_UDMA) { in udma_prep_slave_sg_triggered_tr()
3048 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_slave_sg_triggered_tr()
3052 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_slave_sg_triggered_tr()
3061 dev_err(uc->ud->dev, "size %zu is not supported\n", in udma_prep_slave_sg_triggered_tr()
3072 uc->config.tr_trigger_type, in udma_prep_slave_sg_triggered_tr()
3082 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; in udma_prep_slave_sg_triggered_tr()
3107 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; in udma_prep_slave_sg_triggered_tr()
3118 uc->config.tr_trigger_type, in udma_prep_slave_sg_triggered_tr()
3129 tr_req[tr_idx].dim1 = (-1) * tr_cnt0; in udma_prep_slave_sg_triggered_tr()
3152 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0; in udma_prep_slave_sg_triggered_tr()
3157 d->residue += sg_len; in udma_prep_slave_sg_triggered_tr()
3160 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, csf | CPPI5_TR_CSF_EOP); in udma_prep_slave_sg_triggered_tr()
3169 if (uc->config.ep_type != PSIL_EP_PDMA_XY) in udma_configure_statictr()
3175 d->static_tr.elsize = 0; in udma_configure_statictr()
3178 d->static_tr.elsize = 1; in udma_configure_statictr()
3181 d->static_tr.elsize = 2; in udma_configure_statictr()
3184 d->static_tr.elsize = 3; in udma_configure_statictr()
3187 d->static_tr.elsize = 4; in udma_configure_statictr()
3190 return -EINVAL; in udma_configure_statictr()
3193 d->static_tr.elcnt = elcnt; in udma_configure_statictr()
3195 if (uc->config.pkt_mode || !uc->cyclic) { in udma_configure_statictr()
3204 if (uc->cyclic) in udma_configure_statictr()
3205 d->static_tr.bstcnt = d->residue / d->sglen / div; in udma_configure_statictr()
3207 d->static_tr.bstcnt = d->residue / div; in udma_configure_statictr()
3208 } else if (uc->ud->match_data->type == DMA_TYPE_BCDMA && in udma_configure_statictr()
3209 uc->config.dir == DMA_DEV_TO_MEM && in udma_configure_statictr()
3210 uc->cyclic) { in udma_configure_statictr()
3217 struct cppi5_tr_type1_t *tr_req = d->hwdesc[0].tr_req_base; in udma_configure_statictr()
3219 d->static_tr.bstcnt = in udma_configure_statictr()
3220 (tr_req->icnt0 * tr_req->icnt1) / dev_width; in udma_configure_statictr()
3222 d->static_tr.bstcnt = 0; in udma_configure_statictr()
3225 if (uc->config.dir == DMA_DEV_TO_MEM && in udma_configure_statictr()
3226 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) in udma_configure_statictr()
3227 return -EINVAL; in udma_configure_statictr()
3248 d->sglen = sglen; in udma_prep_slave_sg_pkt()
3249 d->hwdesc_count = sglen; in udma_prep_slave_sg_pkt()
3252 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_prep_slave_sg_pkt()
3254 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_prep_slave_sg_pkt()
3256 if (uc->ud->match_data->type == DMA_TYPE_UDMA) in udma_prep_slave_sg_pkt()
3259 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_slave_sg_pkt()
3262 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; in udma_prep_slave_sg_pkt()
3267 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_prep_slave_sg_pkt()
3269 &hwdesc->cppi5_desc_paddr); in udma_prep_slave_sg_pkt()
3270 if (!hwdesc->cppi5_desc_vaddr) { in udma_prep_slave_sg_pkt()
3271 dev_err(uc->ud->dev, in udma_prep_slave_sg_pkt()
3279 d->residue += sg_len; in udma_prep_slave_sg_pkt()
3280 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_prep_slave_sg_pkt()
3281 desc = hwdesc->cppi5_desc_vaddr; in udma_prep_slave_sg_pkt()
3286 cppi5_desc_set_pktids(&desc->hdr, uc->id, in udma_prep_slave_sg_pkt()
3288 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); in udma_prep_slave_sg_pkt()
3291 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); in udma_prep_slave_sg_pkt()
3301 hwdesc->cppi5_desc_paddr | asel); in udma_prep_slave_sg_pkt()
3303 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA || in udma_prep_slave_sg_pkt()
3308 if (d->residue >= SZ_4M) { in udma_prep_slave_sg_pkt()
3309 dev_err(uc->ud->dev, in udma_prep_slave_sg_pkt()
3311 __func__, d->residue); in udma_prep_slave_sg_pkt()
3317 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_prep_slave_sg_pkt()
3318 cppi5_hdesc_set_pktlen(h_desc, d->residue); in udma_prep_slave_sg_pkt()
3327 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_attach_metadata()
3332 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_attach_metadata()
3333 return -ENOTSUPP; in udma_attach_metadata()
3335 if (!data || len > uc->config.metadata_size) in udma_attach_metadata()
3336 return -EINVAL; in udma_attach_metadata()
3338 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) in udma_attach_metadata()
3339 return -EINVAL; in udma_attach_metadata()
3341 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_attach_metadata()
3342 if (d->dir == DMA_MEM_TO_DEV) in udma_attach_metadata()
3343 memcpy(h_desc->epib, data, len); in udma_attach_metadata()
3345 if (uc->config.needs_epib) in udma_attach_metadata()
3346 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; in udma_attach_metadata()
3348 d->metadata = data; in udma_attach_metadata()
3349 d->metadata_size = len; in udma_attach_metadata()
3350 if (uc->config.needs_epib) in udma_attach_metadata()
3363 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_get_metadata_ptr()
3366 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_get_metadata_ptr()
3367 return ERR_PTR(-ENOTSUPP); in udma_get_metadata_ptr()
3369 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_get_metadata_ptr()
3371 *max_len = uc->config.metadata_size; in udma_get_metadata_ptr()
3373 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? in udma_get_metadata_ptr()
3377 return h_desc->epib; in udma_get_metadata_ptr()
3384 struct udma_chan *uc = to_udma_chan(desc->chan); in udma_set_metadata_len()
3389 if (!uc->config.pkt_mode || !uc->config.metadata_size) in udma_set_metadata_len()
3390 return -ENOTSUPP; in udma_set_metadata_len()
3392 if (payload_len > uc->config.metadata_size) in udma_set_metadata_len()
3393 return -EINVAL; in udma_set_metadata_len()
3395 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) in udma_set_metadata_len()
3396 return -EINVAL; in udma_set_metadata_len()
3398 h_desc = d->hwdesc[0].cppi5_desc_vaddr; in udma_set_metadata_len()
3400 if (uc->config.needs_epib) { in udma_set_metadata_len()
3401 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; in udma_set_metadata_len()
3427 if (dir != uc->config.dir && in udma_prep_slave_sg()
3428 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) { in udma_prep_slave_sg()
3429 dev_err(chan->device->dev, in udma_prep_slave_sg()
3431 __func__, uc->id, in udma_prep_slave_sg()
3432 dmaengine_get_direction_text(uc->config.dir), in udma_prep_slave_sg()
3438 dev_width = uc->cfg.src_addr_width; in udma_prep_slave_sg()
3439 burst = uc->cfg.src_maxburst; in udma_prep_slave_sg()
3441 dev_width = uc->cfg.dst_addr_width; in udma_prep_slave_sg()
3442 burst = uc->cfg.dst_maxburst; in udma_prep_slave_sg()
3444 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); in udma_prep_slave_sg()
3451 uc->config.tx_flags = tx_flags; in udma_prep_slave_sg()
3453 if (uc->config.pkt_mode) in udma_prep_slave_sg()
3456 else if (is_slave_direction(uc->config.dir)) in udma_prep_slave_sg()
3466 d->dir = dir; in udma_prep_slave_sg()
3467 d->desc_idx = 0; in udma_prep_slave_sg()
3468 d->tr_idx = 0; in udma_prep_slave_sg()
3472 dev_err(uc->ud->dev, in udma_prep_slave_sg()
3474 __func__, uc->ud->match_data->statictr_z_mask, in udma_prep_slave_sg()
3475 d->static_tr.bstcnt); in udma_prep_slave_sg()
3482 if (uc->config.metadata_size) in udma_prep_slave_sg()
3483 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_slave_sg()
3485 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); in udma_prep_slave_sg()
3505 dev_err(uc->ud->dev, "size %zu is not supported\n", in udma_prep_dma_cyclic_tr()
3516 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_dma_cyclic_tr()
3517 if (uc->ud->match_data->type == DMA_TYPE_UDMA) in udma_prep_dma_cyclic_tr()
3521 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT); in udma_prep_dma_cyclic_tr()
3524 * For BCDMA <-> PDMA transfers, the EOP flag needs to be set on the in udma_prep_dma_cyclic_tr()
3527 * of TX, and to avoid short-packet error in case of RX. in udma_prep_dma_cyclic_tr()
3532 if (uc->config.ep_type == PSIL_EP_PDMA_XY && in udma_prep_dma_cyclic_tr()
3533 uc->ud->match_data->type == DMA_TYPE_BCDMA) { in udma_prep_dma_cyclic_tr()
3585 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) in udma_prep_dma_cyclic_pkt()
3595 d->hwdesc_count = periods; in udma_prep_dma_cyclic_pkt()
3597 /* TODO: re-check this... */ in udma_prep_dma_cyclic_pkt()
3599 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); in udma_prep_dma_cyclic_pkt()
3601 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); in udma_prep_dma_cyclic_pkt()
3603 if (uc->ud->match_data->type != DMA_TYPE_UDMA) in udma_prep_dma_cyclic_pkt()
3604 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_dma_cyclic_pkt()
3607 struct udma_hwdesc *hwdesc = &d->hwdesc[i]; in udma_prep_dma_cyclic_pkt()
3611 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, in udma_prep_dma_cyclic_pkt()
3613 &hwdesc->cppi5_desc_paddr); in udma_prep_dma_cyclic_pkt()
3614 if (!hwdesc->cppi5_desc_vaddr) { in udma_prep_dma_cyclic_pkt()
3615 dev_err(uc->ud->dev, in udma_prep_dma_cyclic_pkt()
3623 hwdesc->cppi5_desc_size = uc->config.hdesc_size; in udma_prep_dma_cyclic_pkt()
3624 h_desc = hwdesc->cppi5_desc_vaddr; in udma_prep_dma_cyclic_pkt()
3630 cppi5_desc_set_pktids(&h_desc->hdr, uc->id, in udma_prep_dma_cyclic_pkt()
3632 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); in udma_prep_dma_cyclic_pkt()
3653 if (dir != uc->config.dir) { in udma_prep_dma_cyclic()
3654 dev_err(chan->device->dev, in udma_prep_dma_cyclic()
3656 __func__, uc->id, in udma_prep_dma_cyclic()
3657 dmaengine_get_direction_text(uc->config.dir), in udma_prep_dma_cyclic()
3662 uc->cyclic = true; in udma_prep_dma_cyclic()
3665 dev_width = uc->cfg.src_addr_width; in udma_prep_dma_cyclic()
3666 burst = uc->cfg.src_maxburst; in udma_prep_dma_cyclic()
3668 dev_width = uc->cfg.dst_addr_width; in udma_prep_dma_cyclic()
3669 burst = uc->cfg.dst_maxburst; in udma_prep_dma_cyclic()
3671 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); in udma_prep_dma_cyclic()
3678 if (uc->config.pkt_mode) in udma_prep_dma_cyclic()
3688 d->sglen = buf_len / period_len; in udma_prep_dma_cyclic()
3690 d->dir = dir; in udma_prep_dma_cyclic()
3691 d->residue = buf_len; in udma_prep_dma_cyclic()
3695 dev_err(uc->ud->dev, in udma_prep_dma_cyclic()
3697 __func__, uc->ud->match_data->statictr_z_mask, in udma_prep_dma_cyclic()
3698 d->static_tr.bstcnt); in udma_prep_dma_cyclic()
3705 if (uc->config.metadata_size) in udma_prep_dma_cyclic()
3706 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_dma_cyclic()
3708 return vchan_tx_prep(&uc->vc, &d->vd, flags); in udma_prep_dma_cyclic()
3723 if (uc->config.dir != DMA_MEM_TO_MEM) { in udma_prep_dma_memcpy()
3724 dev_err(chan->device->dev, in udma_prep_dma_memcpy()
3726 __func__, uc->id, in udma_prep_dma_memcpy()
3727 dmaengine_get_direction_text(uc->config.dir), in udma_prep_dma_memcpy()
3735 dev_err(uc->ud->dev, "size %zu is not supported\n", in udma_prep_dma_memcpy()
3744 d->dir = DMA_MEM_TO_MEM; in udma_prep_dma_memcpy()
3745 d->desc_idx = 0; in udma_prep_dma_memcpy()
3746 d->tr_idx = 0; in udma_prep_dma_memcpy()
3747 d->residue = len; in udma_prep_dma_memcpy()
3749 if (uc->ud->match_data->type != DMA_TYPE_UDMA) { in udma_prep_dma_memcpy()
3750 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_dma_memcpy()
3751 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT; in udma_prep_dma_memcpy()
3756 tr_req = d->hwdesc[0].tr_req_base; in udma_prep_dma_memcpy()
3794 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, csf | CPPI5_TR_CSF_EOP); in udma_prep_dma_memcpy()
3796 if (uc->config.metadata_size) in udma_prep_dma_memcpy()
3797 d->vd.tx.metadata_ops = &metadata_ops; in udma_prep_dma_memcpy()
3799 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); in udma_prep_dma_memcpy()
3807 spin_lock_irqsave(&uc->vc.lock, flags); in udma_issue_pending()
3810 if (vchan_issue_pending(&uc->vc) && !uc->desc) { in udma_issue_pending()
3816 if (!(uc->state == UDMA_CHAN_IS_TERMINATING && in udma_issue_pending()
3821 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_issue_pending()
3832 spin_lock_irqsave(&uc->vc.lock, flags); in udma_tx_status()
3845 if (uc->desc && uc->desc->vd.tx.cookie == cookie) { in udma_tx_status()
3848 u32 residue = uc->desc->residue; in udma_tx_status()
3851 if (uc->desc->dir == DMA_MEM_TO_DEV) { in udma_tx_status()
3854 if (uc->config.ep_type != PSIL_EP_NATIVE) { in udma_tx_status()
3859 delay = bcnt - peer_bcnt; in udma_tx_status()
3861 } else if (uc->desc->dir == DMA_DEV_TO_MEM) { in udma_tx_status()
3864 if (uc->config.ep_type != PSIL_EP_NATIVE) { in udma_tx_status()
3869 delay = peer_bcnt - bcnt; in udma_tx_status()
3875 if (bcnt && !(bcnt % uc->desc->residue)) in udma_tx_status()
3878 residue -= bcnt % uc->desc->residue; in udma_tx_status()
3880 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { in udma_tx_status()
3893 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_tx_status()
3902 switch (uc->config.dir) { in udma_pause()
3919 return -EINVAL; in udma_pause()
3930 switch (uc->config.dir) { in udma_resume()
3945 return -EINVAL; in udma_resume()
3957 spin_lock_irqsave(&uc->vc.lock, flags); in udma_terminate_all()
3962 if (uc->desc) { in udma_terminate_all()
3963 uc->terminated_desc = uc->desc; in udma_terminate_all()
3964 uc->desc = NULL; in udma_terminate_all()
3965 uc->terminated_desc->terminated = true; in udma_terminate_all()
3966 cancel_delayed_work(&uc->tx_drain.work); in udma_terminate_all()
3969 uc->paused = false; in udma_terminate_all()
3971 vchan_get_all_descriptors(&uc->vc, &head); in udma_terminate_all()
3972 spin_unlock_irqrestore(&uc->vc.lock, flags); in udma_terminate_all()
3973 vchan_dma_desc_free_list(&uc->vc, &head); in udma_terminate_all()
3983 vchan_synchronize(&uc->vc); in udma_synchronize()
3985 if (uc->state == UDMA_CHAN_IS_TERMINATING) { in udma_synchronize()
3986 timeout = wait_for_completion_timeout(&uc->teardown_completed, in udma_synchronize()
3989 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", in udma_synchronize()
3990 uc->id); in udma_synchronize()
3998 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); in udma_synchronize()
4000 cancel_delayed_work_sync(&uc->tx_drain.work); in udma_synchronize()
4008 struct udma_chan *uc = to_udma_chan(&vc->chan); in udma_desc_pre_callback()
4015 d = to_udma_desc(&vd->tx); in udma_desc_pre_callback()
4017 if (d->metadata_size) in udma_desc_pre_callback()
4021 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); in udma_desc_pre_callback()
4026 result->residue = d->residue - in udma_desc_pre_callback()
4028 if (result->residue) in udma_desc_pre_callback()
4029 result->result = DMA_TRANS_ABORTED; in udma_desc_pre_callback()
4031 result->result = DMA_TRANS_NOERROR; in udma_desc_pre_callback()
4033 result->residue = 0; in udma_desc_pre_callback()
4035 status = d->hwdesc[0].tr_resp_base->status; in udma_desc_pre_callback()
4037 result->result = DMA_TRANS_ABORTED; in udma_desc_pre_callback()
4039 result->result = DMA_TRANS_NOERROR; in udma_desc_pre_callback()
4055 spin_lock_irq(&vc->lock); in udma_vchan_complete()
4056 list_splice_tail_init(&vc->desc_completed, &head); in udma_vchan_complete()
4057 vd = vc->cyclic; in udma_vchan_complete()
4059 vc->cyclic = NULL; in udma_vchan_complete()
4060 dmaengine_desc_get_callback(&vd->tx, &cb); in udma_vchan_complete()
4064 spin_unlock_irq(&vc->lock); in udma_vchan_complete()
4072 dmaengine_desc_get_callback(&vd->tx, &cb); in udma_vchan_complete()
4074 list_del(&vd->node); in udma_vchan_complete()
4086 struct udma_dev *ud = to_udma_dev(chan->device); in udma_free_chan_resources()
4089 if (uc->terminated_desc) { in udma_free_chan_resources()
4094 cancel_delayed_work_sync(&uc->tx_drain.work); in udma_free_chan_resources()
4096 if (uc->irq_num_ring > 0) { in udma_free_chan_resources()
4097 free_irq(uc->irq_num_ring, uc); in udma_free_chan_resources()
4099 uc->irq_num_ring = 0; in udma_free_chan_resources()
4101 if (uc->irq_num_udma > 0) { in udma_free_chan_resources()
4102 free_irq(uc->irq_num_udma, uc); in udma_free_chan_resources()
4104 uc->irq_num_udma = 0; in udma_free_chan_resources()
4107 /* Release PSI-L pairing */ in udma_free_chan_resources()
4108 if (uc->psil_paired) { in udma_free_chan_resources()
4109 navss_psil_unpair(ud, uc->config.src_thread, in udma_free_chan_resources()
4110 uc->config.dst_thread); in udma_free_chan_resources()
4111 uc->psil_paired = false; in udma_free_chan_resources()
4114 vchan_free_chan_resources(&uc->vc); in udma_free_chan_resources()
4115 tasklet_kill(&uc->vc.task); in udma_free_chan_resources()
4122 if (uc->use_dma_pool) { in udma_free_chan_resources()
4123 dma_pool_destroy(uc->hdesc_pool); in udma_free_chan_resources()
4124 uc->use_dma_pool = false; in udma_free_chan_resources()
4147 if (chan->device->dev->driver != &udma_driver.driver && in udma_dma_filter_fn()
4148 chan->device->dev->driver != &bcdma_driver.driver && in udma_dma_filter_fn()
4149 chan->device->dev->driver != &pktdma_driver.driver) in udma_dma_filter_fn()
4153 ucc = &uc->config; in udma_dma_filter_fn()
4154 ud = uc->ud; in udma_dma_filter_fn()
4157 if (filter_param->atype > 2) { in udma_dma_filter_fn()
4158 dev_err(ud->dev, "Invalid channel atype: %u\n", in udma_dma_filter_fn()
4159 filter_param->atype); in udma_dma_filter_fn()
4163 if (filter_param->asel > 15) { in udma_dma_filter_fn()
4164 dev_err(ud->dev, "Invalid channel asel: %u\n", in udma_dma_filter_fn()
4165 filter_param->asel); in udma_dma_filter_fn()
4169 ucc->remote_thread_id = filter_param->remote_thread_id; in udma_dma_filter_fn()
4170 ucc->atype = filter_param->atype; in udma_dma_filter_fn()
4171 ucc->asel = filter_param->asel; in udma_dma_filter_fn()
4172 ucc->tr_trigger_type = filter_param->tr_trigger_type; in udma_dma_filter_fn()
4174 if (ucc->tr_trigger_type) { in udma_dma_filter_fn()
4175 ucc->dir = DMA_MEM_TO_MEM; in udma_dma_filter_fn()
4177 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) { in udma_dma_filter_fn()
4178 ucc->dir = DMA_MEM_TO_DEV; in udma_dma_filter_fn()
4180 ucc->dir = DMA_DEV_TO_MEM; in udma_dma_filter_fn()
4183 ep_config = psil_get_ep_config(ucc->remote_thread_id); in udma_dma_filter_fn()
4185 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", in udma_dma_filter_fn()
4186 ucc->remote_thread_id); in udma_dma_filter_fn()
4187 ucc->dir = DMA_MEM_TO_MEM; in udma_dma_filter_fn()
4188 ucc->remote_thread_id = -1; in udma_dma_filter_fn()
4189 ucc->atype = 0; in udma_dma_filter_fn()
4190 ucc->asel = 0; in udma_dma_filter_fn()
4194 if (ud->match_data->type == DMA_TYPE_BCDMA && in udma_dma_filter_fn()
4195 ep_config->pkt_mode) { in udma_dma_filter_fn()
4196 dev_err(ud->dev, in udma_dma_filter_fn()
4197 "Only TR mode is supported (psi-l thread 0x%04x)\n", in udma_dma_filter_fn()
4198 ucc->remote_thread_id); in udma_dma_filter_fn()
4199 ucc->dir = DMA_MEM_TO_MEM; in udma_dma_filter_fn()
4200 ucc->remote_thread_id = -1; in udma_dma_filter_fn()
4201 ucc->atype = 0; in udma_dma_filter_fn()
4202 ucc->asel = 0; in udma_dma_filter_fn()
4206 ucc->pkt_mode = ep_config->pkt_mode; in udma_dma_filter_fn()
4207 ucc->channel_tpl = ep_config->channel_tpl; in udma_dma_filter_fn()
4208 ucc->notdpkt = ep_config->notdpkt; in udma_dma_filter_fn()
4209 ucc->ep_type = ep_config->ep_type; in udma_dma_filter_fn()
4211 if (ud->match_data->type == DMA_TYPE_PKTDMA && in udma_dma_filter_fn()
4212 ep_config->mapped_channel_id >= 0) { in udma_dma_filter_fn()
4213 ucc->mapped_channel_id = ep_config->mapped_channel_id; in udma_dma_filter_fn()
4214 ucc->default_flow_id = ep_config->default_flow_id; in udma_dma_filter_fn()
4216 ucc->mapped_channel_id = -1; in udma_dma_filter_fn()
4217 ucc->default_flow_id = -1; in udma_dma_filter_fn()
4220 if (ucc->ep_type != PSIL_EP_NATIVE) { in udma_dma_filter_fn()
4221 const struct udma_match_data *match_data = ud->match_data; in udma_dma_filter_fn()
4223 if (match_data->flags & UDMA_FLAG_PDMA_ACC32) in udma_dma_filter_fn()
4224 ucc->enable_acc32 = ep_config->pdma_acc32; in udma_dma_filter_fn()
4225 if (match_data->flags & UDMA_FLAG_PDMA_BURST) in udma_dma_filter_fn()
4226 ucc->enable_burst = ep_config->pdma_burst; in udma_dma_filter_fn()
4229 ucc->needs_epib = ep_config->needs_epib; in udma_dma_filter_fn()
4230 ucc->psd_size = ep_config->psd_size; in udma_dma_filter_fn()
4231 ucc->metadata_size = in udma_dma_filter_fn()
4232 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + in udma_dma_filter_fn()
4233 ucc->psd_size; in udma_dma_filter_fn()
4235 if (ucc->pkt_mode) in udma_dma_filter_fn()
4236 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + in udma_dma_filter_fn()
4237 ucc->metadata_size, ud->desc_align); in udma_dma_filter_fn()
4239 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, in udma_dma_filter_fn()
4240 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); in udma_dma_filter_fn()
4245 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id, in udma_dma_filter_fn()
4246 ucc->tr_trigger_type); in udma_dma_filter_fn()
4255 struct udma_dev *ud = ofdma->of_dma_data; in udma_of_xlate()
4259 if (ud->match_data->type == DMA_TYPE_BCDMA) { in udma_of_xlate()
4260 if (dma_spec->args_count != 3) in udma_of_xlate()
4263 filter_param.tr_trigger_type = dma_spec->args[0]; in udma_of_xlate()
4264 filter_param.remote_thread_id = dma_spec->args[1]; in udma_of_xlate()
4265 filter_param.asel = dma_spec->args[2]; in udma_of_xlate()
4268 if (dma_spec->args_count != 1 && dma_spec->args_count != 2) in udma_of_xlate()
4271 filter_param.remote_thread_id = dma_spec->args[0]; in udma_of_xlate()
4273 if (dma_spec->args_count == 2) { in udma_of_xlate()
4274 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_of_xlate()
4275 filter_param.atype = dma_spec->args[1]; in udma_of_xlate()
4279 filter_param.asel = dma_spec->args[1]; in udma_of_xlate()
4287 chan = __dma_request_channel(&ud->ddev.cap_mask, udma_dma_filter_fn, &filter_param, in udma_of_xlate()
4288 ofdma->of_node); in udma_of_xlate()
4290 dev_err(ud->dev, "get channel fail in %s.\n", __func__); in udma_of_xlate()
4291 return ERR_PTR(-EINVAL); in udma_of_xlate()
4391 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4427 .compatible = "ti,am654-navss-main-udmap",
4431 .compatible = "ti,am654-navss-mcu-udmap",
4434 .compatible = "ti,j721e-navss-main-udmap",
4437 .compatible = "ti,j721e-navss-mcu-udmap",
4441 .compatible = "ti,am64-dmss-bcdma",
4445 .compatible = "ti,am64-dmss-pktdma",
4449 .compatible = "ti,am62a-dmss-bcdma-csirx",
4453 .compatible = "ti,j721s2-dmss-bcdma-csi",
4457 .compatible = "ti,j722s-dmss-bcdma-csi",
4515 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]); in udma_get_mmrs()
4516 if (IS_ERR(ud->mmrs[MMR_GCFG])) in udma_get_mmrs()
4517 return PTR_ERR(ud->mmrs[MMR_GCFG]); in udma_get_mmrs()
4519 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); in udma_get_mmrs()
4520 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); in udma_get_mmrs()
4522 switch (ud->match_data->type) { in udma_get_mmrs()
4524 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); in udma_get_mmrs()
4525 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); in udma_get_mmrs()
4526 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2); in udma_get_mmrs()
4527 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); in udma_get_mmrs()
4530 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2) + in udma_get_mmrs()
4533 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2); in udma_get_mmrs()
4534 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2); in udma_get_mmrs()
4535 ud->rflow_cnt = ud->rchan_cnt; in udma_get_mmrs()
4538 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30); in udma_get_mmrs()
4539 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2); in udma_get_mmrs()
4540 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2); in udma_get_mmrs()
4541 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3); in udma_get_mmrs()
4542 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4); in udma_get_mmrs()
4545 return -EINVAL; in udma_get_mmrs()
4549 if (i == MMR_BCHANRT && ud->bchan_cnt == 0) in udma_get_mmrs()
4551 if (i == MMR_TCHANRT && ud->tchan_cnt == 0) in udma_get_mmrs()
4553 if (i == MMR_RCHANRT && ud->rchan_cnt == 0) in udma_get_mmrs()
4556 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]); in udma_get_mmrs()
4557 if (IS_ERR(ud->mmrs[i])) in udma_get_mmrs()
4558 return PTR_ERR(ud->mmrs[i]); in udma_get_mmrs()
4568 bitmap_clear(map, rm_desc->start, rm_desc->num); in udma_mark_resource_ranges()
4569 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec); in udma_mark_resource_ranges()
4570 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name, in udma_mark_resource_ranges()
4571 rm_desc->start, rm_desc->num, rm_desc->start_sec, in udma_mark_resource_ranges()
4572 rm_desc->num_sec); in udma_mark_resource_ranges()
4576 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4577 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4578 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4579 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4580 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4586 struct device *dev = ud->dev; in udma_setup_resources()
4588 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in udma_setup_resources()
4592 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); in udma_setup_resources()
4593 if (of_device_is_compatible(dev->of_node, in udma_setup_resources()
4594 "ti,am654-navss-main-udmap")) { in udma_setup_resources()
4595 ud->tchan_tpl.levels = 2; in udma_setup_resources()
4596 ud->tchan_tpl.start_idx[0] = 8; in udma_setup_resources()
4597 } else if (of_device_is_compatible(dev->of_node, in udma_setup_resources()
4598 "ti,am654-navss-mcu-udmap")) { in udma_setup_resources()
4599 ud->tchan_tpl.levels = 2; in udma_setup_resources()
4600 ud->tchan_tpl.start_idx[0] = 2; in udma_setup_resources()
4602 ud->tchan_tpl.levels = 3; in udma_setup_resources()
4603 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); in udma_setup_resources()
4604 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in udma_setup_resources()
4606 ud->tchan_tpl.levels = 2; in udma_setup_resources()
4607 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in udma_setup_resources()
4609 ud->tchan_tpl.levels = 1; in udma_setup_resources()
4612 ud->rchan_tpl.levels = ud->tchan_tpl.levels; in udma_setup_resources()
4613 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; in udma_setup_resources()
4614 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; in udma_setup_resources()
4616 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), in udma_setup_resources()
4618 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), in udma_setup_resources()
4620 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), in udma_setup_resources()
4622 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), in udma_setup_resources()
4624 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
4627 ud->rflow_gp_map_allocated = devm_kcalloc(dev, in udma_setup_resources()
4628 BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
4631 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), in udma_setup_resources()
4634 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), in udma_setup_resources()
4637 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || in udma_setup_resources()
4638 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || in udma_setup_resources()
4639 !ud->rflows || !ud->rflow_in_use) in udma_setup_resources()
4640 return -ENOMEM; in udma_setup_resources()
4647 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); in udma_setup_resources()
4650 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); in udma_setup_resources()
4657 tisci_rm->rm_ranges[i] = in udma_setup_resources()
4658 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, in udma_setup_resources()
4659 tisci_rm->tisci_dev_id, in udma_setup_resources()
4664 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in udma_setup_resources()
4666 bitmap_zero(ud->tchan_map, ud->tchan_cnt); in udma_setup_resources()
4669 bitmap_fill(ud->tchan_map, ud->tchan_cnt); in udma_setup_resources()
4670 for (i = 0; i < rm_res->sets; i++) in udma_setup_resources()
4671 udma_mark_resource_ranges(ud, ud->tchan_map, in udma_setup_resources()
4672 &rm_res->desc[i], "tchan"); in udma_setup_resources()
4673 irq_res.sets = rm_res->sets; in udma_setup_resources()
4677 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in udma_setup_resources()
4679 bitmap_zero(ud->rchan_map, ud->rchan_cnt); in udma_setup_resources()
4682 bitmap_fill(ud->rchan_map, ud->rchan_cnt); in udma_setup_resources()
4683 for (i = 0; i < rm_res->sets; i++) in udma_setup_resources()
4684 udma_mark_resource_ranges(ud, ud->rchan_map, in udma_setup_resources()
4685 &rm_res->desc[i], "rchan"); in udma_setup_resources()
4686 irq_res.sets += rm_res->sets; in udma_setup_resources()
4691 return -ENOMEM; in udma_setup_resources()
4692 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in udma_setup_resources()
4695 irq_res.desc[0].num = ud->tchan_cnt; in udma_setup_resources()
4698 for (i = 0; i < rm_res->sets; i++) { in udma_setup_resources()
4699 irq_res.desc[i].start = rm_res->desc[i].start; in udma_setup_resources()
4700 irq_res.desc[i].num = rm_res->desc[i].num; in udma_setup_resources()
4701 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec; in udma_setup_resources()
4702 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; in udma_setup_resources()
4705 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in udma_setup_resources()
4708 irq_res.desc[i].num = ud->rchan_cnt; in udma_setup_resources()
4710 for (j = 0; j < rm_res->sets; j++, i++) { in udma_setup_resources()
4711 if (rm_res->desc[j].num) { in udma_setup_resources()
4712 irq_res.desc[i].start = rm_res->desc[j].start + in udma_setup_resources()
4713 ud->soc_data->oes.udma_rchan; in udma_setup_resources()
4714 irq_res.desc[i].num = rm_res->desc[j].num; in udma_setup_resources()
4716 if (rm_res->desc[j].num_sec) { in udma_setup_resources()
4717 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + in udma_setup_resources()
4718 ud->soc_data->oes.udma_rchan; in udma_setup_resources()
4719 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; in udma_setup_resources()
4723 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); in udma_setup_resources()
4726 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); in udma_setup_resources()
4731 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; in udma_setup_resources()
4734 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, in udma_setup_resources()
4735 ud->rflow_cnt - ud->rchan_cnt); in udma_setup_resources()
4737 for (i = 0; i < rm_res->sets; i++) in udma_setup_resources()
4738 udma_mark_resource_ranges(ud, ud->rflow_gp_map, in udma_setup_resources()
4739 &rm_res->desc[i], "gp-rflow"); in udma_setup_resources()
4748 struct device *dev = ud->dev; in bcdma_setup_resources()
4750 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in bcdma_setup_resources()
4751 const struct udma_oes_offsets *oes = &ud->soc_data->oes; in bcdma_setup_resources()
4755 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c); in bcdma_setup_resources()
4757 ud->bchan_tpl.levels = 3; in bcdma_setup_resources()
4758 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap); in bcdma_setup_resources()
4759 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); in bcdma_setup_resources()
4761 ud->bchan_tpl.levels = 2; in bcdma_setup_resources()
4762 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap); in bcdma_setup_resources()
4764 ud->bchan_tpl.levels = 1; in bcdma_setup_resources()
4767 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30); in bcdma_setup_resources()
4769 ud->rchan_tpl.levels = 3; in bcdma_setup_resources()
4770 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap); in bcdma_setup_resources()
4771 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); in bcdma_setup_resources()
4773 ud->rchan_tpl.levels = 2; in bcdma_setup_resources()
4774 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap); in bcdma_setup_resources()
4776 ud->rchan_tpl.levels = 1; in bcdma_setup_resources()
4780 ud->tchan_tpl.levels = 3; in bcdma_setup_resources()
4781 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap); in bcdma_setup_resources()
4782 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); in bcdma_setup_resources()
4784 ud->tchan_tpl.levels = 2; in bcdma_setup_resources()
4785 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap); in bcdma_setup_resources()
4787 ud->tchan_tpl.levels = 1; in bcdma_setup_resources()
4790 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt), in bcdma_setup_resources()
4792 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans), in bcdma_setup_resources()
4794 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), in bcdma_setup_resources()
4796 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), in bcdma_setup_resources()
4798 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), in bcdma_setup_resources()
4800 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), in bcdma_setup_resources()
4803 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt), in bcdma_setup_resources()
4806 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows), in bcdma_setup_resources()
4809 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map || in bcdma_setup_resources()
4810 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans || in bcdma_setup_resources()
4811 !ud->rflows) in bcdma_setup_resources()
4812 return -ENOMEM; in bcdma_setup_resources()
4818 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0) in bcdma_setup_resources()
4820 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0) in bcdma_setup_resources()
4822 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0) in bcdma_setup_resources()
4825 tisci_rm->rm_ranges[i] = in bcdma_setup_resources()
4826 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, in bcdma_setup_resources()
4827 tisci_rm->tisci_dev_id, in bcdma_setup_resources()
4834 if (ud->bchan_cnt) { in bcdma_setup_resources()
4835 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; in bcdma_setup_resources()
4837 bitmap_zero(ud->bchan_map, ud->bchan_cnt); in bcdma_setup_resources()
4840 bitmap_fill(ud->bchan_map, ud->bchan_cnt); in bcdma_setup_resources()
4841 for (i = 0; i < rm_res->sets; i++) in bcdma_setup_resources()
4842 udma_mark_resource_ranges(ud, ud->bchan_map, in bcdma_setup_resources()
4843 &rm_res->desc[i], in bcdma_setup_resources()
4845 irq_res.sets += rm_res->sets; in bcdma_setup_resources()
4850 if (ud->tchan_cnt) { in bcdma_setup_resources()
4851 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in bcdma_setup_resources()
4853 bitmap_zero(ud->tchan_map, ud->tchan_cnt); in bcdma_setup_resources()
4856 bitmap_fill(ud->tchan_map, ud->tchan_cnt); in bcdma_setup_resources()
4857 for (i = 0; i < rm_res->sets; i++) in bcdma_setup_resources()
4858 udma_mark_resource_ranges(ud, ud->tchan_map, in bcdma_setup_resources()
4859 &rm_res->desc[i], in bcdma_setup_resources()
4861 irq_res.sets += rm_res->sets * 2; in bcdma_setup_resources()
4866 if (ud->rchan_cnt) { in bcdma_setup_resources()
4867 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in bcdma_setup_resources()
4869 bitmap_zero(ud->rchan_map, ud->rchan_cnt); in bcdma_setup_resources()
4872 bitmap_fill(ud->rchan_map, ud->rchan_cnt); in bcdma_setup_resources()
4873 for (i = 0; i < rm_res->sets; i++) in bcdma_setup_resources()
4874 udma_mark_resource_ranges(ud, ud->rchan_map, in bcdma_setup_resources()
4875 &rm_res->desc[i], in bcdma_setup_resources()
4877 irq_res.sets += rm_res->sets * 2; in bcdma_setup_resources()
4883 return -ENOMEM; in bcdma_setup_resources()
4884 if (ud->bchan_cnt) { in bcdma_setup_resources()
4885 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN]; in bcdma_setup_resources()
4887 irq_res.desc[0].start = oes->bcdma_bchan_ring; in bcdma_setup_resources()
4888 irq_res.desc[0].num = ud->bchan_cnt; in bcdma_setup_resources()
4891 for (i = 0; i < rm_res->sets; i++) { in bcdma_setup_resources()
4892 irq_res.desc[i].start = rm_res->desc[i].start + in bcdma_setup_resources()
4893 oes->bcdma_bchan_ring; in bcdma_setup_resources()
4894 irq_res.desc[i].num = rm_res->desc[i].num; in bcdma_setup_resources()
4896 if (rm_res->desc[i].num_sec) { in bcdma_setup_resources()
4897 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec + in bcdma_setup_resources()
4898 oes->bcdma_bchan_ring; in bcdma_setup_resources()
4899 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; in bcdma_setup_resources()
4907 if (ud->tchan_cnt) { in bcdma_setup_resources()
4908 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in bcdma_setup_resources()
4910 irq_res.desc[i].start = oes->bcdma_tchan_data; in bcdma_setup_resources()
4911 irq_res.desc[i].num = ud->tchan_cnt; in bcdma_setup_resources()
4912 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring; in bcdma_setup_resources()
4913 irq_res.desc[i + 1].num = ud->tchan_cnt; in bcdma_setup_resources()
4916 for (j = 0; j < rm_res->sets; j++, i += 2) { in bcdma_setup_resources()
4917 irq_res.desc[i].start = rm_res->desc[j].start + in bcdma_setup_resources()
4918 oes->bcdma_tchan_data; in bcdma_setup_resources()
4919 irq_res.desc[i].num = rm_res->desc[j].num; in bcdma_setup_resources()
4921 irq_res.desc[i + 1].start = rm_res->desc[j].start + in bcdma_setup_resources()
4922 oes->bcdma_tchan_ring; in bcdma_setup_resources()
4923 irq_res.desc[i + 1].num = rm_res->desc[j].num; in bcdma_setup_resources()
4925 if (rm_res->desc[j].num_sec) { in bcdma_setup_resources()
4926 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + in bcdma_setup_resources()
4927 oes->bcdma_tchan_data; in bcdma_setup_resources()
4928 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; in bcdma_setup_resources()
4929 irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec + in bcdma_setup_resources()
4930 oes->bcdma_tchan_ring; in bcdma_setup_resources()
4931 irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec; in bcdma_setup_resources()
4936 if (ud->rchan_cnt) { in bcdma_setup_resources()
4937 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in bcdma_setup_resources()
4939 irq_res.desc[i].start = oes->bcdma_rchan_data; in bcdma_setup_resources()
4940 irq_res.desc[i].num = ud->rchan_cnt; in bcdma_setup_resources()
4941 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring; in bcdma_setup_resources()
4942 irq_res.desc[i + 1].num = ud->rchan_cnt; in bcdma_setup_resources()
4945 for (j = 0; j < rm_res->sets; j++, i += 2) { in bcdma_setup_resources()
4946 irq_res.desc[i].start = rm_res->desc[j].start + in bcdma_setup_resources()
4947 oes->bcdma_rchan_data; in bcdma_setup_resources()
4948 irq_res.desc[i].num = rm_res->desc[j].num; in bcdma_setup_resources()
4950 irq_res.desc[i + 1].start = rm_res->desc[j].start + in bcdma_setup_resources()
4951 oes->bcdma_rchan_ring; in bcdma_setup_resources()
4952 irq_res.desc[i + 1].num = rm_res->desc[j].num; in bcdma_setup_resources()
4954 if (rm_res->desc[j].num_sec) { in bcdma_setup_resources()
4955 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + in bcdma_setup_resources()
4956 oes->bcdma_rchan_data; in bcdma_setup_resources()
4957 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; in bcdma_setup_resources()
4958 irq_res.desc[i + 1].start_sec = rm_res->desc[j].start_sec + in bcdma_setup_resources()
4959 oes->bcdma_rchan_ring; in bcdma_setup_resources()
4960 irq_res.desc[i + 1].num_sec = rm_res->desc[j].num_sec; in bcdma_setup_resources()
4966 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); in bcdma_setup_resources()
4969 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); in bcdma_setup_resources()
4979 struct device *dev = ud->dev; in pktdma_setup_resources()
4981 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; in pktdma_setup_resources()
4982 const struct udma_oes_offsets *oes = &ud->soc_data->oes; in pktdma_setup_resources()
4986 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); in pktdma_setup_resources()
4988 ud->tchan_tpl.levels = 3; in pktdma_setup_resources()
4989 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3); in pktdma_setup_resources()
4990 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in pktdma_setup_resources()
4992 ud->tchan_tpl.levels = 2; in pktdma_setup_resources()
4993 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3); in pktdma_setup_resources()
4995 ud->tchan_tpl.levels = 1; in pktdma_setup_resources()
4998 ud->rchan_tpl.levels = ud->tchan_tpl.levels; in pktdma_setup_resources()
4999 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0]; in pktdma_setup_resources()
5000 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1]; in pktdma_setup_resources()
5002 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), in pktdma_setup_resources()
5004 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), in pktdma_setup_resources()
5006 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), in pktdma_setup_resources()
5008 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), in pktdma_setup_resources()
5010 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), in pktdma_setup_resources()
5013 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), in pktdma_setup_resources()
5015 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt), in pktdma_setup_resources()
5018 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans || in pktdma_setup_resources()
5019 !ud->rchans || !ud->rflows || !ud->rflow_in_use) in pktdma_setup_resources()
5020 return -ENOMEM; in pktdma_setup_resources()
5027 tisci_rm->rm_ranges[i] = in pktdma_setup_resources()
5028 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, in pktdma_setup_resources()
5029 tisci_rm->tisci_dev_id, in pktdma_setup_resources()
5034 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; in pktdma_setup_resources()
5036 bitmap_zero(ud->tchan_map, ud->tchan_cnt); in pktdma_setup_resources()
5038 bitmap_fill(ud->tchan_map, ud->tchan_cnt); in pktdma_setup_resources()
5039 for (i = 0; i < rm_res->sets; i++) in pktdma_setup_resources()
5040 udma_mark_resource_ranges(ud, ud->tchan_map, in pktdma_setup_resources()
5041 &rm_res->desc[i], "tchan"); in pktdma_setup_resources()
5045 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; in pktdma_setup_resources()
5047 bitmap_zero(ud->rchan_map, ud->rchan_cnt); in pktdma_setup_resources()
5049 bitmap_fill(ud->rchan_map, ud->rchan_cnt); in pktdma_setup_resources()
5050 for (i = 0; i < rm_res->sets; i++) in pktdma_setup_resources()
5051 udma_mark_resource_ranges(ud, ud->rchan_map, in pktdma_setup_resources()
5052 &rm_res->desc[i], "rchan"); in pktdma_setup_resources()
5056 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; in pktdma_setup_resources()
5059 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt); in pktdma_setup_resources()
5062 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt); in pktdma_setup_resources()
5063 for (i = 0; i < rm_res->sets; i++) in pktdma_setup_resources()
5064 udma_mark_resource_ranges(ud, ud->rflow_in_use, in pktdma_setup_resources()
5065 &rm_res->desc[i], "rflow"); in pktdma_setup_resources()
5066 irq_res.sets = rm_res->sets; in pktdma_setup_resources()
5070 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; in pktdma_setup_resources()
5073 bitmap_zero(ud->tflow_map, ud->tflow_cnt); in pktdma_setup_resources()
5076 bitmap_fill(ud->tflow_map, ud->tflow_cnt); in pktdma_setup_resources()
5077 for (i = 0; i < rm_res->sets; i++) in pktdma_setup_resources()
5078 udma_mark_resource_ranges(ud, ud->tflow_map, in pktdma_setup_resources()
5079 &rm_res->desc[i], "tflow"); in pktdma_setup_resources()
5080 irq_res.sets += rm_res->sets; in pktdma_setup_resources()
5085 return -ENOMEM; in pktdma_setup_resources()
5086 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW]; in pktdma_setup_resources()
5088 irq_res.desc[0].start = oes->pktdma_tchan_flow; in pktdma_setup_resources()
5089 irq_res.desc[0].num = ud->tflow_cnt; in pktdma_setup_resources()
5092 for (i = 0; i < rm_res->sets; i++) { in pktdma_setup_resources()
5093 irq_res.desc[i].start = rm_res->desc[i].start + in pktdma_setup_resources()
5094 oes->pktdma_tchan_flow; in pktdma_setup_resources()
5095 irq_res.desc[i].num = rm_res->desc[i].num; in pktdma_setup_resources()
5097 if (rm_res->desc[i].num_sec) { in pktdma_setup_resources()
5098 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec + in pktdma_setup_resources()
5099 oes->pktdma_tchan_flow; in pktdma_setup_resources()
5100 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec; in pktdma_setup_resources()
5104 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; in pktdma_setup_resources()
5106 irq_res.desc[i].start = oes->pktdma_rchan_flow; in pktdma_setup_resources()
5107 irq_res.desc[i].num = ud->rflow_cnt; in pktdma_setup_resources()
5109 for (j = 0; j < rm_res->sets; j++, i++) { in pktdma_setup_resources()
5110 irq_res.desc[i].start = rm_res->desc[j].start + in pktdma_setup_resources()
5111 oes->pktdma_rchan_flow; in pktdma_setup_resources()
5112 irq_res.desc[i].num = rm_res->desc[j].num; in pktdma_setup_resources()
5114 if (rm_res->desc[j].num_sec) { in pktdma_setup_resources()
5115 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec + in pktdma_setup_resources()
5116 oes->pktdma_rchan_flow; in pktdma_setup_resources()
5117 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec; in pktdma_setup_resources()
5121 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); in pktdma_setup_resources()
5124 dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); in pktdma_setup_resources()
5133 struct device *dev = ud->dev; in setup_resources()
5136 switch (ud->match_data->type) { in setup_resources()
5147 return -EINVAL; in setup_resources()
5153 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt; in setup_resources()
5154 if (ud->bchan_cnt) in setup_resources()
5155 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt); in setup_resources()
5156 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); in setup_resources()
5157 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); in setup_resources()
5159 return -ENODEV; in setup_resources()
5161 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), in setup_resources()
5163 if (!ud->channels) in setup_resources()
5164 return -ENOMEM; in setup_resources()
5166 switch (ud->match_data->type) { in setup_resources()
5169 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", in setup_resources()
5171 ud->tchan_cnt - bitmap_weight(ud->tchan_map, in setup_resources()
5172 ud->tchan_cnt), in setup_resources()
5173 ud->rchan_cnt - bitmap_weight(ud->rchan_map, in setup_resources()
5174 ud->rchan_cnt), in setup_resources()
5175 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, in setup_resources()
5176 ud->rflow_cnt)); in setup_resources()
5182 ud->bchan_cnt - bitmap_weight(ud->bchan_map, in setup_resources()
5183 ud->bchan_cnt), in setup_resources()
5184 ud->tchan_cnt - bitmap_weight(ud->tchan_map, in setup_resources()
5185 ud->tchan_cnt), in setup_resources()
5186 ud->rchan_cnt - bitmap_weight(ud->rchan_map, in setup_resources()
5187 ud->rchan_cnt)); in setup_resources()
5193 ud->tchan_cnt - bitmap_weight(ud->tchan_map, in setup_resources()
5194 ud->tchan_cnt), in setup_resources()
5195 ud->rchan_cnt - bitmap_weight(ud->rchan_map, in setup_resources()
5196 ud->rchan_cnt)); in setup_resources()
5207 struct udma_rx_flush *rx_flush = &ud->rx_flush; in udma_setup_rx_flush()
5211 struct device *dev = ud->dev; in udma_setup_rx_flush()
5216 rx_flush->buffer_size = SZ_1K; in udma_setup_rx_flush()
5217 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size, in udma_setup_rx_flush()
5219 if (!rx_flush->buffer_vaddr) in udma_setup_rx_flush()
5220 return -ENOMEM; in udma_setup_rx_flush()
5222 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr, in udma_setup_rx_flush()
5223 rx_flush->buffer_size, in udma_setup_rx_flush()
5225 if (dma_mapping_error(dev, rx_flush->buffer_paddr)) in udma_setup_rx_flush()
5226 return -ENOMEM; in udma_setup_rx_flush()
5229 hwdesc = &rx_flush->hwdescs[0]; in udma_setup_rx_flush()
5231 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1); in udma_setup_rx_flush()
5232 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
5233 ud->desc_align); in udma_setup_rx_flush()
5235 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
5237 if (!hwdesc->cppi5_desc_vaddr) in udma_setup_rx_flush()
5238 return -ENOMEM; in udma_setup_rx_flush()
5240 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, in udma_setup_rx_flush()
5241 hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
5243 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) in udma_setup_rx_flush()
5244 return -ENOMEM; in udma_setup_rx_flush()
5247 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; in udma_setup_rx_flush()
5249 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size; in udma_setup_rx_flush()
5251 tr_desc = hwdesc->cppi5_desc_vaddr; in udma_setup_rx_flush()
5256 tr_req = hwdesc->tr_req_base; in udma_setup_rx_flush()
5257 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false, in udma_setup_rx_flush()
5259 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT); in udma_setup_rx_flush()
5261 tr_req->addr = rx_flush->buffer_paddr; in udma_setup_rx_flush()
5262 tr_req->icnt0 = rx_flush->buffer_size; in udma_setup_rx_flush()
5263 tr_req->icnt1 = 1; in udma_setup_rx_flush()
5265 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, in udma_setup_rx_flush()
5266 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); in udma_setup_rx_flush()
5269 hwdesc = &rx_flush->hwdescs[1]; in udma_setup_rx_flush()
5270 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + in udma_setup_rx_flush()
5273 ud->desc_align); in udma_setup_rx_flush()
5275 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
5277 if (!hwdesc->cppi5_desc_vaddr) in udma_setup_rx_flush()
5278 return -ENOMEM; in udma_setup_rx_flush()
5280 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr, in udma_setup_rx_flush()
5281 hwdesc->cppi5_desc_size, in udma_setup_rx_flush()
5283 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr)) in udma_setup_rx_flush()
5284 return -ENOMEM; in udma_setup_rx_flush()
5286 desc = hwdesc->cppi5_desc_vaddr; in udma_setup_rx_flush()
5288 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT); in udma_setup_rx_flush()
5289 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0); in udma_setup_rx_flush()
5292 rx_flush->buffer_paddr, rx_flush->buffer_size, in udma_setup_rx_flush()
5293 rx_flush->buffer_paddr, rx_flush->buffer_size); in udma_setup_rx_flush()
5295 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr, in udma_setup_rx_flush()
5296 hwdesc->cppi5_desc_size, DMA_TO_DEVICE); in udma_setup_rx_flush()
5305 struct udma_chan_config *ucc = &uc->config; in udma_dbg_summary_show_chan()
5307 seq_printf(s, " %-13s| %s", dma_chan_name(chan), in udma_dbg_summary_show_chan()
5308 chan->dbg_client_name ?: "in-use"); in udma_dbg_summary_show_chan()
5309 if (ucc->tr_trigger_type) in udma_dbg_summary_show_chan()
5313 dmaengine_get_direction_text(uc->config.dir)); in udma_dbg_summary_show_chan()
5315 switch (uc->config.dir) { in udma_dbg_summary_show_chan()
5317 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) { in udma_dbg_summary_show_chan()
5318 seq_printf(s, "bchan%d)\n", uc->bchan->id); in udma_dbg_summary_show_chan()
5322 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id, in udma_dbg_summary_show_chan()
5323 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
5326 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id, in udma_dbg_summary_show_chan()
5327 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
5328 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) in udma_dbg_summary_show_chan()
5329 seq_printf(s, "rflow%d, ", uc->rflow->id); in udma_dbg_summary_show_chan()
5332 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id, in udma_dbg_summary_show_chan()
5333 ucc->src_thread, ucc->dst_thread); in udma_dbg_summary_show_chan()
5334 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA) in udma_dbg_summary_show_chan()
5335 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id); in udma_dbg_summary_show_chan()
5342 if (ucc->ep_type == PSIL_EP_NATIVE) { in udma_dbg_summary_show_chan()
5343 seq_printf(s, "PSI-L Native"); in udma_dbg_summary_show_chan()
5344 if (ucc->metadata_size) { in udma_dbg_summary_show_chan()
5345 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : ""); in udma_dbg_summary_show_chan()
5346 if (ucc->psd_size) in udma_dbg_summary_show_chan()
5347 seq_printf(s, " PSDsize:%u", ucc->psd_size); in udma_dbg_summary_show_chan()
5352 if (ucc->enable_acc32 || ucc->enable_burst) in udma_dbg_summary_show_chan()
5354 ucc->enable_acc32 ? " ACC32" : "", in udma_dbg_summary_show_chan()
5355 ucc->enable_burst ? " BURST" : ""); in udma_dbg_summary_show_chan()
5358 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode"); in udma_dbg_summary_show_chan()
5366 list_for_each_entry(chan, &dma_dev->channels, device_node) { in udma_dbg_summary_show()
5367 if (chan->client_count) in udma_dbg_summary_show()
5375 const struct udma_match_data *match_data = ud->match_data; in udma_get_copy_align()
5376 u8 tpl; in udma_get_copy_align() local
5378 if (!match_data->enable_memcpy_support) in udma_get_copy_align()
5381 /* Get the highest TPL level the device supports for memcpy */ in udma_get_copy_align()
5382 if (ud->bchan_cnt) in udma_get_copy_align()
5383 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0); in udma_get_copy_align()
5384 else if (ud->tchan_cnt) in udma_get_copy_align()
5385 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0); in udma_get_copy_align()
5389 switch (match_data->burst_size[tpl]) { in udma_get_copy_align()
5409 struct device_node *navss_node = pdev->dev.parent->of_node; in udma_probe()
5411 struct device *dev = &pdev->dev; in udma_probe()
5423 return -ENOMEM; in udma_probe()
5425 match = of_match_node(udma_of_match, dev->of_node); in udma_probe()
5428 return -ENODEV; in udma_probe()
5430 ud->match_data = match->data; in udma_probe()
5432 ud->soc_data = ud->match_data->soc_data; in udma_probe()
5433 if (!ud->soc_data) { in udma_probe()
5437 return -ENODEV; in udma_probe()
5439 ud->soc_data = soc->data; in udma_probe()
5446 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); in udma_probe()
5447 if (IS_ERR(ud->tisci_rm.tisci)) in udma_probe()
5448 return PTR_ERR(ud->tisci_rm.tisci); in udma_probe()
5450 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", in udma_probe()
5451 &ud->tisci_rm.tisci_dev_id); in udma_probe()
5453 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); in udma_probe()
5456 pdev->id = ud->tisci_rm.tisci_dev_id; in udma_probe()
5458 ret = of_property_read_u32(navss_node, "ti,sci-dev-id", in udma_probe()
5459 &ud->tisci_rm.tisci_navss_dev_id); in udma_probe()
5461 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); in udma_probe()
5465 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_probe()
5466 ret = of_property_read_u32(dev->of_node, "ti,udma-atype", in udma_probe()
5467 &ud->atype); in udma_probe()
5468 if (!ret && ud->atype > 2) { in udma_probe()
5469 dev_err(dev, "Invalid atype: %u\n", ud->atype); in udma_probe()
5470 return -EINVAL; in udma_probe()
5473 ret = of_property_read_u32(dev->of_node, "ti,asel", in udma_probe()
5474 &ud->asel); in udma_probe()
5475 if (!ret && ud->asel > 15) { in udma_probe()
5476 dev_err(dev, "Invalid asel: %u\n", ud->asel); in udma_probe()
5477 return -EINVAL; in udma_probe()
5481 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; in udma_probe()
5482 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; in udma_probe()
5484 if (ud->match_data->type == DMA_TYPE_UDMA) { in udma_probe()
5485 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); in udma_probe()
5489 ring_init_data.tisci = ud->tisci_rm.tisci; in udma_probe()
5490 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id; in udma_probe()
5491 if (ud->match_data->type == DMA_TYPE_BCDMA) { in udma_probe()
5492 ring_init_data.num_rings = ud->bchan_cnt + in udma_probe()
5493 ud->tchan_cnt + in udma_probe()
5494 ud->rchan_cnt; in udma_probe()
5496 ring_init_data.num_rings = ud->rflow_cnt + in udma_probe()
5497 ud->tflow_cnt; in udma_probe()
5500 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data); in udma_probe()
5503 if (IS_ERR(ud->ringacc)) in udma_probe()
5504 return PTR_ERR(ud->ringacc); in udma_probe()
5506 dev->msi.domain = of_msi_get_domain(dev, dev->of_node, in udma_probe()
5508 if (!dev->msi.domain) { in udma_probe()
5509 return -EPROBE_DEFER; in udma_probe()
5512 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); in udma_probe()
5514 if (ud->match_data->type != DMA_TYPE_PKTDMA) { in udma_probe()
5515 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); in udma_probe()
5516 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; in udma_probe()
5519 ud->ddev.device_config = udma_slave_config; in udma_probe()
5520 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; in udma_probe()
5521 ud->ddev.device_issue_pending = udma_issue_pending; in udma_probe()
5522 ud->ddev.device_tx_status = udma_tx_status; in udma_probe()
5523 ud->ddev.device_pause = udma_pause; in udma_probe()
5524 ud->ddev.device_resume = udma_resume; in udma_probe()
5525 ud->ddev.device_terminate_all = udma_terminate_all; in udma_probe()
5526 ud->ddev.device_synchronize = udma_synchronize; in udma_probe()
5528 ud->ddev.dbg_summary_show = udma_dbg_summary_show; in udma_probe()
5531 switch (ud->match_data->type) { in udma_probe()
5533 ud->ddev.device_alloc_chan_resources = in udma_probe()
5537 ud->ddev.device_alloc_chan_resources = in udma_probe()
5539 ud->ddev.device_router_config = bcdma_router_config; in udma_probe()
5542 ud->ddev.device_alloc_chan_resources = in udma_probe()
5546 return -EINVAL; in udma_probe()
5548 ud->ddev.device_free_chan_resources = udma_free_chan_resources; in udma_probe()
5550 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; in udma_probe()
5551 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; in udma_probe()
5552 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); in udma_probe()
5553 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; in udma_probe()
5554 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | in udma_probe()
5556 if (ud->match_data->enable_memcpy_support && in udma_probe()
5557 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) { in udma_probe()
5558 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); in udma_probe()
5559 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; in udma_probe()
5560 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); in udma_probe()
5563 ud->ddev.dev = dev; in udma_probe()
5564 ud->dev = dev; in udma_probe()
5565 ud->psil_base = ud->match_data->psil_base; in udma_probe()
5567 INIT_LIST_HEAD(&ud->ddev.channels); in udma_probe()
5568 INIT_LIST_HEAD(&ud->desc_to_purge); in udma_probe()
5574 spin_lock_init(&ud->lock); in udma_probe()
5575 INIT_WORK(&ud->purge_work, udma_purge_desc_work); in udma_probe()
5577 ud->desc_align = 64; in udma_probe()
5578 if (ud->desc_align < dma_get_cache_alignment()) in udma_probe()
5579 ud->desc_align = dma_get_cache_alignment(); in udma_probe()
5585 for (i = 0; i < ud->bchan_cnt; i++) { in udma_probe()
5586 struct udma_bchan *bchan = &ud->bchans[i]; in udma_probe()
5588 bchan->id = i; in udma_probe()
5589 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000; in udma_probe()
5592 for (i = 0; i < ud->tchan_cnt; i++) { in udma_probe()
5593 struct udma_tchan *tchan = &ud->tchans[i]; in udma_probe()
5595 tchan->id = i; in udma_probe()
5596 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; in udma_probe()
5599 for (i = 0; i < ud->rchan_cnt; i++) { in udma_probe()
5600 struct udma_rchan *rchan = &ud->rchans[i]; in udma_probe()
5602 rchan->id = i; in udma_probe()
5603 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; in udma_probe()
5606 for (i = 0; i < ud->rflow_cnt; i++) { in udma_probe()
5607 struct udma_rflow *rflow = &ud->rflows[i]; in udma_probe()
5609 rflow->id = i; in udma_probe()
5613 struct udma_chan *uc = &ud->channels[i]; in udma_probe()
5615 uc->ud = ud; in udma_probe()
5616 uc->vc.desc_free = udma_desc_free; in udma_probe()
5617 uc->id = i; in udma_probe()
5618 uc->bchan = NULL; in udma_probe()
5619 uc->tchan = NULL; in udma_probe()
5620 uc->rchan = NULL; in udma_probe()
5621 uc->config.remote_thread_id = -1; in udma_probe()
5622 uc->config.mapped_channel_id = -1; in udma_probe()
5623 uc->config.default_flow_id = -1; in udma_probe()
5624 uc->config.dir = DMA_MEM_TO_MEM; in udma_probe()
5625 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", in udma_probe()
5628 vchan_init(&uc->vc, &ud->ddev); in udma_probe()
5630 tasklet_setup(&uc->vc.task, udma_vchan_complete); in udma_probe()
5631 init_completion(&uc->teardown_completed); in udma_probe()
5632 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion); in udma_probe()
5636 ud->ddev.copy_align = udma_get_copy_align(ud); in udma_probe()
5638 ret = dma_async_device_register(&ud->ddev); in udma_probe()
5646 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); in udma_probe()
5649 dma_async_device_unregister(&ud->ddev); in udma_probe()
5658 struct dma_device *dma_dev = &ud->ddev; in udma_pm_suspend()
5662 list_for_each_entry(chan, &dma_dev->channels, device_node) { in udma_pm_suspend()
5663 if (chan->client_count) { in udma_pm_suspend()
5666 memcpy(&uc->backup_config, &uc->config, in udma_pm_suspend()
5670 ud->ddev.device_free_chan_resources(chan); in udma_pm_suspend()
5680 struct dma_device *dma_dev = &ud->ddev; in udma_pm_resume()
5685 list_for_each_entry(chan, &dma_dev->channels, device_node) { in udma_pm_resume()
5686 if (chan->client_count) { in udma_pm_resume()
5689 memcpy(&uc->config, &uc->backup_config, in udma_pm_resume()
5693 ret = ud->ddev.device_alloc_chan_resources(chan); in udma_pm_resume()
5708 .name = "ti-udma",
5717 MODULE_DESCRIPTION("Texas Instruments UDMA support");
5721 #include "k3-udma-private.c"