1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * K3 NAVSS DMA glue interface
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6 *
7 */
8
9 #include <linux/module.h>
10 #include <linux/atomic.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/io.h>
14 #include <linux/init.h>
15 #include <linux/of.h>
16 #include <linux/platform_device.h>
17 #include <linux/soc/ti/k3-ringacc.h>
18 #include <linux/dma/ti-cppi5.h>
19 #include <linux/dma/k3-udma-glue.h>
20
21 #include "k3-udma.h"
22 #include "k3-psil-priv.h"
23
24 struct k3_udma_glue_common {
25 struct device *dev;
26 struct device chan_dev;
27 struct udma_dev *udmax;
28 const struct udma_tisci_rm *tisci_rm;
29 struct k3_ringacc *ringacc;
30 u32 src_thread;
31 u32 dst_thread;
32
33 u32 hdesc_size;
34 bool epib;
35 u32 psdata_size;
36 u32 swdata_size;
37 u32 atype_asel;
38 struct psil_endpoint_config *ep_config;
39 };
40
41 struct k3_udma_glue_tx_channel {
42 struct k3_udma_glue_common common;
43
44 struct udma_tchan *udma_tchanx;
45 int udma_tchan_id;
46
47 struct k3_ring *ringtx;
48 struct k3_ring *ringtxcq;
49
50 bool psil_paired;
51
52 int virq;
53
54 atomic_t free_pkts;
55 bool tx_pause_on_err;
56 bool tx_filt_einfo;
57 bool tx_filt_pswords;
58 bool tx_supr_tdpkt;
59
60 int udma_tflow_id;
61 };
62
63 struct k3_udma_glue_rx_flow {
64 struct udma_rflow *udma_rflow;
65 int udma_rflow_id;
66 struct k3_ring *ringrx;
67 struct k3_ring *ringrxfdq;
68
69 int virq;
70 };
71
72 struct k3_udma_glue_rx_channel {
73 struct k3_udma_glue_common common;
74
75 struct udma_rchan *udma_rchanx;
76 int udma_rchan_id;
77 bool remote;
78
79 bool psil_paired;
80
81 u32 swdata_size;
82 int flow_id_base;
83
84 struct k3_udma_glue_rx_flow *flows;
85 u32 flow_num;
86 u32 flows_ready;
87 bool single_fdq; /* one FDQ for all flows */
88 };
89
k3_udma_chan_dev_release(struct device * dev)90 static void k3_udma_chan_dev_release(struct device *dev)
91 {
92 /* The struct containing the device is devm managed */
93 }
94
95 static struct class k3_udma_glue_devclass = {
96 .name = "k3_udma_glue_chan",
97 .dev_release = k3_udma_chan_dev_release,
98 };
99
100 #define K3_UDMAX_TDOWN_TIMEOUT_US 1000
101
of_k3_udma_glue_parse(struct device_node * udmax_np,struct k3_udma_glue_common * common)102 static int of_k3_udma_glue_parse(struct device_node *udmax_np,
103 struct k3_udma_glue_common *common)
104 {
105 common->udmax = of_xudma_dev_get(udmax_np, NULL);
106 if (IS_ERR(common->udmax))
107 return PTR_ERR(common->udmax);
108
109 common->ringacc = xudma_get_ringacc(common->udmax);
110 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
111
112 return 0;
113 }
114
of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common * common,u32 thread_id,bool tx_chn)115 static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
116 bool tx_chn)
117 {
118 if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
119 return -EINVAL;
120
121 if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
122 return -EINVAL;
123
124 /* get psil endpoint config */
125 common->ep_config = psil_get_ep_config(thread_id);
126 if (IS_ERR(common->ep_config)) {
127 dev_err(common->dev,
128 "No configuration for psi-l thread 0x%04x\n",
129 thread_id);
130 return PTR_ERR(common->ep_config);
131 }
132
133 common->epib = common->ep_config->needs_epib;
134 common->psdata_size = common->ep_config->psd_size;
135
136 if (tx_chn)
137 common->dst_thread = thread_id;
138 else
139 common->src_thread = thread_id;
140
141 return 0;
142 }
143
of_k3_udma_glue_parse_chn(struct device_node * chn_np,const char * name,struct k3_udma_glue_common * common,bool tx_chn)144 static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
145 const char *name, struct k3_udma_glue_common *common,
146 bool tx_chn)
147 {
148 struct of_phandle_args dma_spec;
149 u32 thread_id;
150 int ret = 0;
151 int index;
152
153 if (unlikely(!name))
154 return -EINVAL;
155
156 index = of_property_match_string(chn_np, "dma-names", name);
157 if (index < 0)
158 return index;
159
160 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
161 &dma_spec))
162 return -ENOENT;
163
164 ret = of_k3_udma_glue_parse(dma_spec.np, common);
165 if (ret)
166 goto out_put_spec;
167
168 thread_id = dma_spec.args[0];
169 if (dma_spec.args_count == 2) {
170 if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
171 dev_err(common->dev, "Invalid channel atype: %u\n",
172 dma_spec.args[1]);
173 ret = -EINVAL;
174 goto out_put_spec;
175 }
176 if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
177 dev_err(common->dev, "Invalid channel asel: %u\n",
178 dma_spec.args[1]);
179 ret = -EINVAL;
180 goto out_put_spec;
181 }
182
183 common->atype_asel = dma_spec.args[1];
184 }
185
186 ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
187
188 out_put_spec:
189 of_node_put(dma_spec.np);
190 return ret;
191 }
192
193 static int
of_k3_udma_glue_parse_chn_by_id(struct device_node * udmax_np,struct k3_udma_glue_common * common,bool tx_chn,u32 thread_id)194 of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
195 bool tx_chn, u32 thread_id)
196 {
197 int ret = 0;
198
199 if (unlikely(!udmax_np))
200 return -EINVAL;
201
202 ret = of_k3_udma_glue_parse(udmax_np, common);
203 if (ret)
204 return ret;
205
206 ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
207 return ret;
208 }
209
k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)210 static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
211 {
212 struct device *dev = tx_chn->common.dev;
213
214 dev_dbg(dev, "dump_tx_chn:\n"
215 "udma_tchan_id: %d\n"
216 "src_thread: %08x\n"
217 "dst_thread: %08x\n",
218 tx_chn->udma_tchan_id,
219 tx_chn->common.src_thread,
220 tx_chn->common.dst_thread);
221 }
222
k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel * chn,char * mark)223 static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
224 char *mark)
225 {
226 struct device *dev = chn->common.dev;
227
228 dev_dbg(dev, "=== dump ===> %s\n", mark);
229 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
230 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
231 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
232 xudma_tchanrt_read(chn->udma_tchanx,
233 UDMA_CHAN_RT_PEER_RT_EN_REG));
234 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
235 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
236 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
237 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
238 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
239 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
240 }
241
k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)242 static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
243 {
244 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
245 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
246
247 memset(&req, 0, sizeof(req));
248
249 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
250 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
251 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
252 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
253 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
254 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
255 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
256 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
257 req.nav_id = tisci_rm->tisci_dev_id;
258 req.index = tx_chn->udma_tchan_id;
259 if (tx_chn->tx_pause_on_err)
260 req.tx_pause_on_err = 1;
261 if (tx_chn->tx_filt_einfo)
262 req.tx_filt_einfo = 1;
263 if (tx_chn->tx_filt_pswords)
264 req.tx_filt_pswords = 1;
265 req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
266 if (tx_chn->tx_supr_tdpkt)
267 req.tx_supr_tdpkt = 1;
268 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
269 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
270 req.tx_atype = tx_chn->common.atype_asel;
271
272 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
273 }
274
275 static int
k3_udma_glue_request_tx_chn_common(struct device * dev,struct k3_udma_glue_tx_channel * tx_chn,struct k3_udma_glue_tx_channel_cfg * cfg)276 k3_udma_glue_request_tx_chn_common(struct device *dev,
277 struct k3_udma_glue_tx_channel *tx_chn,
278 struct k3_udma_glue_tx_channel_cfg *cfg)
279 {
280 int ret;
281
282 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
283 tx_chn->common.psdata_size,
284 tx_chn->common.swdata_size);
285
286 if (xudma_is_pktdma(tx_chn->common.udmax))
287 tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
288 else
289 tx_chn->udma_tchan_id = -1;
290
291 /* request and cfg UDMAP TX channel */
292 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
293 tx_chn->udma_tchan_id);
294 if (IS_ERR(tx_chn->udma_tchanx)) {
295 ret = PTR_ERR(tx_chn->udma_tchanx);
296 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
297 return ret;
298 }
299 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
300
301 tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
302 tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
303 dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
304 tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
305 ret = device_register(&tx_chn->common.chan_dev);
306 if (ret) {
307 dev_err(dev, "Channel Device registration failed %d\n", ret);
308 put_device(&tx_chn->common.chan_dev);
309 tx_chn->common.chan_dev.parent = NULL;
310 return ret;
311 }
312
313 if (xudma_is_pktdma(tx_chn->common.udmax)) {
314 /* prepare the channel device as coherent */
315 tx_chn->common.chan_dev.dma_coherent = true;
316 dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
317 DMA_BIT_MASK(48));
318 }
319
320 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
321
322 if (xudma_is_pktdma(tx_chn->common.udmax))
323 tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
324 else
325 tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
326
327 /* request and cfg rings */
328 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
329 tx_chn->udma_tflow_id, -1,
330 &tx_chn->ringtx,
331 &tx_chn->ringtxcq);
332 if (ret) {
333 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
334 return ret;
335 }
336
337 /* Set the dma_dev for the rings to be configured */
338 cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
339 cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
340
341 /* Set the ASEL value for DMA rings of PKTDMA */
342 if (xudma_is_pktdma(tx_chn->common.udmax)) {
343 cfg->tx_cfg.asel = tx_chn->common.atype_asel;
344 cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
345 }
346
347 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
348 if (ret) {
349 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
350 return ret;
351 }
352
353 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
354 if (ret) {
355 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
356 return ret;
357 }
358
359 /* request and cfg psi-l */
360 tx_chn->common.src_thread =
361 xudma_dev_get_psil_base(tx_chn->common.udmax) +
362 tx_chn->udma_tchan_id;
363
364 ret = k3_udma_glue_cfg_tx_chn(tx_chn);
365 if (ret) {
366 dev_err(dev, "Failed to cfg tchan %d\n", ret);
367 return ret;
368 }
369
370 k3_udma_glue_dump_tx_chn(tx_chn);
371
372 return 0;
373 }
374
375 struct k3_udma_glue_tx_channel *
k3_udma_glue_request_tx_chn(struct device * dev,const char * name,struct k3_udma_glue_tx_channel_cfg * cfg)376 k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
377 struct k3_udma_glue_tx_channel_cfg *cfg)
378 {
379 struct k3_udma_glue_tx_channel *tx_chn;
380 int ret;
381
382 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
383 if (!tx_chn)
384 return ERR_PTR(-ENOMEM);
385
386 tx_chn->common.dev = dev;
387 tx_chn->common.swdata_size = cfg->swdata_size;
388 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
389 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
390 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
391 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
392
393 /* parse of udmap channel */
394 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
395 &tx_chn->common, true);
396 if (ret)
397 goto err;
398
399 ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
400 if (ret)
401 goto err;
402
403 return tx_chn;
404
405 err:
406 k3_udma_glue_release_tx_chn(tx_chn);
407 return ERR_PTR(ret);
408 }
409 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
410
411 struct k3_udma_glue_tx_channel *
k3_udma_glue_request_tx_chn_for_thread_id(struct device * dev,struct k3_udma_glue_tx_channel_cfg * cfg,struct device_node * udmax_np,u32 thread_id)412 k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
413 struct k3_udma_glue_tx_channel_cfg *cfg,
414 struct device_node *udmax_np, u32 thread_id)
415 {
416 struct k3_udma_glue_tx_channel *tx_chn;
417 int ret;
418
419 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
420 if (!tx_chn)
421 return ERR_PTR(-ENOMEM);
422
423 tx_chn->common.dev = dev;
424 tx_chn->common.swdata_size = cfg->swdata_size;
425 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
426 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
427 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
428 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
429
430 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
431 if (ret)
432 goto err;
433
434 ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
435 if (ret)
436 goto err;
437
438 return tx_chn;
439
440 err:
441 k3_udma_glue_release_tx_chn(tx_chn);
442 return ERR_PTR(ret);
443 }
444 EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
445
k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)446 void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
447 {
448 if (tx_chn->psil_paired) {
449 xudma_navss_psil_unpair(tx_chn->common.udmax,
450 tx_chn->common.src_thread,
451 tx_chn->common.dst_thread);
452 tx_chn->psil_paired = false;
453 }
454
455 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
456 xudma_tchan_put(tx_chn->common.udmax,
457 tx_chn->udma_tchanx);
458
459 if (tx_chn->ringtxcq)
460 k3_ringacc_ring_free(tx_chn->ringtxcq);
461
462 if (tx_chn->ringtx)
463 k3_ringacc_ring_free(tx_chn->ringtx);
464
465 if (tx_chn->common.chan_dev.parent) {
466 device_unregister(&tx_chn->common.chan_dev);
467 tx_chn->common.chan_dev.parent = NULL;
468 }
469 }
470 EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
471
k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,struct cppi5_host_desc_t * desc_tx,dma_addr_t desc_dma)472 int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
473 struct cppi5_host_desc_t *desc_tx,
474 dma_addr_t desc_dma)
475 {
476 u32 ringtxcq_id;
477
478 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
479 return -ENOMEM;
480
481 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
482 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
483
484 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
485 }
486 EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
487
k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * desc_dma)488 int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
489 dma_addr_t *desc_dma)
490 {
491 int ret;
492
493 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
494 if (!ret)
495 atomic_inc(&tx_chn->free_pkts);
496
497 return ret;
498 }
499 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
500
k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)501 int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
502 {
503 int ret;
504
505 ret = xudma_navss_psil_pair(tx_chn->common.udmax,
506 tx_chn->common.src_thread,
507 tx_chn->common.dst_thread);
508 if (ret) {
509 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
510 return ret;
511 }
512
513 tx_chn->psil_paired = true;
514
515 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
516 UDMA_PEER_RT_EN_ENABLE);
517
518 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
519 UDMA_CHAN_RT_CTL_EN);
520
521 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
522 return 0;
523 }
524 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
525
k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel * tx_chn)526 void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
527 {
528 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
529
530 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
531
532 xudma_tchanrt_write(tx_chn->udma_tchanx,
533 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
534 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
535
536 if (tx_chn->psil_paired) {
537 xudma_navss_psil_unpair(tx_chn->common.udmax,
538 tx_chn->common.src_thread,
539 tx_chn->common.dst_thread);
540 tx_chn->psil_paired = false;
541 }
542 }
543 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
544
k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,bool sync)545 void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
546 bool sync)
547 {
548 int i = 0;
549 u32 val;
550
551 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
552
553 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
554 UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
555
556 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
557
558 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
559 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
560 UDMA_CHAN_RT_CTL_REG);
561 udelay(1);
562 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
563 dev_err(tx_chn->common.dev, "TX tdown timeout\n");
564 break;
565 }
566 i++;
567 }
568
569 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
570 UDMA_CHAN_RT_PEER_RT_EN_REG);
571 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
572 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
573 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
574 }
575 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
576
k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel * tx_chn,void * data,void (* cleanup)(void * data,dma_addr_t desc_dma))577 void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
578 void *data,
579 void (*cleanup)(void *data, dma_addr_t desc_dma))
580 {
581 struct device *dev = tx_chn->common.dev;
582 dma_addr_t desc_dma;
583 int occ_tx, i, ret;
584
585 /*
586 * TXQ reset need to be special way as it is input for udma and its
587 * state cached by udma, so:
588 * 1) save TXQ occ
589 * 2) clean up TXQ and call callback .cleanup() for each desc
590 * 3) reset TXQ in a special way
591 */
592 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
593 dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
594
595 for (i = 0; i < occ_tx; i++) {
596 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
597 if (ret) {
598 if (ret != -ENODATA)
599 dev_err(dev, "TX reset pop %d\n", ret);
600 break;
601 }
602 cleanup(data, desc_dma);
603 }
604
605 /* reset TXCQ as it is not input for udma - expected to be empty */
606 k3_ringacc_ring_reset(tx_chn->ringtxcq);
607 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
608 }
609 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
610
k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel * tx_chn)611 u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
612 {
613 return tx_chn->common.hdesc_size;
614 }
615 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
616
k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel * tx_chn)617 u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
618 {
619 return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
620 }
621 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
622
k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel * tx_chn)623 int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
624 {
625 if (xudma_is_pktdma(tx_chn->common.udmax)) {
626 tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
627 tx_chn->udma_tflow_id);
628 } else {
629 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
630 }
631
632 if (!tx_chn->virq)
633 return -ENXIO;
634
635 return tx_chn->virq;
636 }
637 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
638
639 struct device *
k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel * tx_chn)640 k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
641 {
642 if (xudma_is_pktdma(tx_chn->common.udmax) &&
643 (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
644 return &tx_chn->common.chan_dev;
645
646 return xudma_get_device(tx_chn->common.udmax);
647 }
648 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
649
k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * addr)650 void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
651 dma_addr_t *addr)
652 {
653 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
654 !tx_chn->common.atype_asel)
655 return;
656
657 *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
658 }
659 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
660
k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel * tx_chn,dma_addr_t * addr)661 void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
662 dma_addr_t *addr)
663 {
664 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
665 !tx_chn->common.atype_asel)
666 return;
667
668 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
669 }
670 EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
671
k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)672 static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
673 {
674 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
675 struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
676 int ret;
677
678 memset(&req, 0, sizeof(req));
679
680 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
681 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
682 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
683 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
684
685 req.nav_id = tisci_rm->tisci_dev_id;
686 req.index = rx_chn->udma_rchan_id;
687 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
688 /*
689 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
690 * and udmax impl, so just configure it to invalid value.
691 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
692 */
693 req.rxcq_qnum = 0xFFFF;
694 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
695 rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
696 /* Default flow + extra ones */
697 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
698 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
699 req.flowid_start = rx_chn->flow_id_base;
700 req.flowid_cnt = rx_chn->flow_num;
701 }
702 req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
703 req.rx_atype = rx_chn->common.atype_asel;
704
705 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
706 if (ret)
707 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
708 rx_chn->udma_rchan_id, ret);
709
710 return ret;
711 }
712
k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num)713 static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
714 u32 flow_num)
715 {
716 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
717
718 if (IS_ERR_OR_NULL(flow->udma_rflow))
719 return;
720
721 if (flow->ringrxfdq)
722 k3_ringacc_ring_free(flow->ringrxfdq);
723
724 if (flow->ringrx)
725 k3_ringacc_ring_free(flow->ringrx);
726
727 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
728 flow->udma_rflow = NULL;
729 rx_chn->flows_ready--;
730 }
731
k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx,struct k3_udma_glue_rx_flow_cfg * flow_cfg)732 static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
733 u32 flow_idx,
734 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
735 {
736 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
737 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
738 struct device *dev = rx_chn->common.dev;
739 struct ti_sci_msg_rm_udmap_flow_cfg req;
740 int rx_ring_id;
741 int rx_ringfdq_id;
742 int ret = 0;
743
744 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
745 flow->udma_rflow_id);
746 if (IS_ERR(flow->udma_rflow)) {
747 ret = PTR_ERR(flow->udma_rflow);
748 dev_err(dev, "UDMAX rflow get err %d\n", ret);
749 return ret;
750 }
751
752 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
753 ret = -ENODEV;
754 goto err_rflow_put;
755 }
756
757 if (xudma_is_pktdma(rx_chn->common.udmax)) {
758 rx_ringfdq_id = flow->udma_rflow_id +
759 xudma_get_rflow_ring_offset(rx_chn->common.udmax);
760 rx_ring_id = 0;
761 } else {
762 rx_ring_id = flow_cfg->ring_rxq_id;
763 rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
764 }
765
766 /* request and cfg rings */
767 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
768 rx_ringfdq_id, rx_ring_id,
769 &flow->ringrxfdq,
770 &flow->ringrx);
771 if (ret) {
772 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
773 goto err_rflow_put;
774 }
775
776 /* Set the dma_dev for the rings to be configured */
777 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
778 flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
779
780 /* Set the ASEL value for DMA rings of PKTDMA */
781 if (xudma_is_pktdma(rx_chn->common.udmax)) {
782 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
783 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
784 }
785
786 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
787 if (ret) {
788 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
789 goto err_ringrxfdq_free;
790 }
791
792 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
793 if (ret) {
794 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
795 goto err_ringrxfdq_free;
796 }
797
798 if (rx_chn->remote) {
799 rx_ring_id = TI_SCI_RESOURCE_NULL;
800 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
801 } else {
802 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
803 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
804 }
805
806 memset(&req, 0, sizeof(req));
807
808 req.valid_params =
809 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
810 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
811 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
812 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
813 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
814 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
815 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
816 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
817 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
818 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
819 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
820 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
821 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
822 req.nav_id = tisci_rm->tisci_dev_id;
823 req.flow_index = flow->udma_rflow_id;
824 if (rx_chn->common.epib)
825 req.rx_einfo_present = 1;
826 if (rx_chn->common.psdata_size)
827 req.rx_psinfo_present = 1;
828 if (flow_cfg->rx_error_handling)
829 req.rx_error_handling = 1;
830 req.rx_desc_type = 0;
831 req.rx_dest_qnum = rx_ring_id;
832 req.rx_src_tag_hi_sel = 0;
833 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
834 req.rx_dest_tag_hi_sel = 0;
835 req.rx_dest_tag_lo_sel = 0;
836 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
837 req.rx_fdq1_qnum = rx_ringfdq_id;
838 req.rx_fdq2_qnum = rx_ringfdq_id;
839 req.rx_fdq3_qnum = rx_ringfdq_id;
840
841 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
842 if (ret) {
843 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
844 ret);
845 goto err_ringrxfdq_free;
846 }
847
848 rx_chn->flows_ready++;
849 dev_dbg(dev, "flow%d config done. ready:%d\n",
850 flow->udma_rflow_id, rx_chn->flows_ready);
851
852 return 0;
853
854 err_ringrxfdq_free:
855 k3_ringacc_ring_free(flow->ringrxfdq);
856 k3_ringacc_ring_free(flow->ringrx);
857
858 err_rflow_put:
859 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
860 flow->udma_rflow = NULL;
861
862 return ret;
863 }
864
k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel * chn)865 static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
866 {
867 struct device *dev = chn->common.dev;
868
869 dev_dbg(dev, "dump_rx_chn:\n"
870 "udma_rchan_id: %d\n"
871 "src_thread: %08x\n"
872 "dst_thread: %08x\n"
873 "epib: %d\n"
874 "hdesc_size: %u\n"
875 "psdata_size: %u\n"
876 "swdata_size: %u\n"
877 "flow_id_base: %d\n"
878 "flow_num: %d\n",
879 chn->udma_rchan_id,
880 chn->common.src_thread,
881 chn->common.dst_thread,
882 chn->common.epib,
883 chn->common.hdesc_size,
884 chn->common.psdata_size,
885 chn->common.swdata_size,
886 chn->flow_id_base,
887 chn->flow_num);
888 }
889
k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel * chn,char * mark)890 static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
891 char *mark)
892 {
893 struct device *dev = chn->common.dev;
894
895 dev_dbg(dev, "=== dump ===> %s\n", mark);
896
897 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
898 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
899 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
900 xudma_rchanrt_read(chn->udma_rchanx,
901 UDMA_CHAN_RT_PEER_RT_EN_REG));
902 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
903 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
904 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
905 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
906 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
907 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
908 }
909
910 static int
k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel * rx_chn,struct k3_udma_glue_rx_channel_cfg * cfg)911 k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
912 struct k3_udma_glue_rx_channel_cfg *cfg)
913 {
914 int ret;
915
916 /* default rflow */
917 if (cfg->flow_id_use_rxchan_id)
918 return 0;
919
920 /* not a GP rflows */
921 if (rx_chn->flow_id_base != -1 &&
922 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
923 return 0;
924
925 /* Allocate range of GP rflows */
926 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
927 rx_chn->flow_id_base,
928 rx_chn->flow_num);
929 if (ret < 0) {
930 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
931 rx_chn->flow_id_base, rx_chn->flow_num, ret);
932 return ret;
933 }
934 rx_chn->flow_id_base = ret;
935
936 return 0;
937 }
938
939 static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn_priv(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)940 k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
941 struct k3_udma_glue_rx_channel_cfg *cfg)
942 {
943 struct k3_udma_glue_rx_channel *rx_chn;
944 struct psil_endpoint_config *ep_cfg;
945 int ret, i;
946
947 if (cfg->flow_id_num <= 0)
948 return ERR_PTR(-EINVAL);
949
950 if (cfg->flow_id_num != 1 &&
951 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
952 return ERR_PTR(-EINVAL);
953
954 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
955 if (!rx_chn)
956 return ERR_PTR(-ENOMEM);
957
958 rx_chn->common.dev = dev;
959 rx_chn->common.swdata_size = cfg->swdata_size;
960 rx_chn->remote = false;
961
962 /* parse of udmap channel */
963 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
964 &rx_chn->common, false);
965 if (ret)
966 goto err;
967
968 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
969 rx_chn->common.psdata_size,
970 rx_chn->common.swdata_size);
971
972 ep_cfg = rx_chn->common.ep_config;
973
974 if (xudma_is_pktdma(rx_chn->common.udmax)) {
975 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
976 rx_chn->single_fdq = false;
977 } else {
978 rx_chn->udma_rchan_id = -1;
979 rx_chn->single_fdq = true;
980 }
981
982 /* request and cfg UDMAP RX channel */
983 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
984 rx_chn->udma_rchan_id);
985 if (IS_ERR(rx_chn->udma_rchanx)) {
986 ret = PTR_ERR(rx_chn->udma_rchanx);
987 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
988 goto err;
989 }
990 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
991
992 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
993 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
994 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
995 rx_chn->udma_rchan_id, rx_chn->common.src_thread);
996 ret = device_register(&rx_chn->common.chan_dev);
997 if (ret) {
998 dev_err(dev, "Channel Device registration failed %d\n", ret);
999 put_device(&rx_chn->common.chan_dev);
1000 rx_chn->common.chan_dev.parent = NULL;
1001 goto err;
1002 }
1003
1004 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1005 /* prepare the channel device as coherent */
1006 rx_chn->common.chan_dev.dma_coherent = true;
1007 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1008 DMA_BIT_MASK(48));
1009 }
1010
1011 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1012 int flow_start = cfg->flow_id_base;
1013 int flow_end;
1014
1015 if (flow_start == -1)
1016 flow_start = ep_cfg->flow_start;
1017
1018 flow_end = flow_start + cfg->flow_id_num - 1;
1019 if (flow_start < ep_cfg->flow_start ||
1020 flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
1021 dev_err(dev, "Invalid flow range requested\n");
1022 ret = -EINVAL;
1023 goto err;
1024 }
1025 rx_chn->flow_id_base = flow_start;
1026 } else {
1027 rx_chn->flow_id_base = cfg->flow_id_base;
1028
1029 /* Use RX channel id as flow id: target dev can't generate flow_id */
1030 if (cfg->flow_id_use_rxchan_id)
1031 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
1032 }
1033
1034 rx_chn->flow_num = cfg->flow_id_num;
1035
1036 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1037 sizeof(*rx_chn->flows), GFP_KERNEL);
1038 if (!rx_chn->flows) {
1039 ret = -ENOMEM;
1040 goto err;
1041 }
1042
1043 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1044 if (ret)
1045 goto err;
1046
1047 for (i = 0; i < rx_chn->flow_num; i++)
1048 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1049
1050 /* request and cfg psi-l */
1051 rx_chn->common.dst_thread =
1052 xudma_dev_get_psil_base(rx_chn->common.udmax) +
1053 rx_chn->udma_rchan_id;
1054
1055 ret = k3_udma_glue_cfg_rx_chn(rx_chn);
1056 if (ret) {
1057 dev_err(dev, "Failed to cfg rchan %d\n", ret);
1058 goto err;
1059 }
1060
1061 /* init default RX flow only if flow_num = 1 */
1062 if (cfg->def_flow_cfg) {
1063 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
1064 if (ret)
1065 goto err;
1066 }
1067
1068 k3_udma_glue_dump_rx_chn(rx_chn);
1069
1070 return rx_chn;
1071
1072 err:
1073 k3_udma_glue_release_rx_chn(rx_chn);
1074 return ERR_PTR(ret);
1075 }
1076
1077 static int
k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel * rx_chn,struct k3_udma_glue_rx_channel_cfg * cfg,struct device * dev)1078 k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
1079 struct k3_udma_glue_rx_channel_cfg *cfg,
1080 struct device *dev)
1081 {
1082 int ret, i;
1083
1084 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1085 rx_chn->common.psdata_size,
1086 rx_chn->common.swdata_size);
1087
1088 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1089 sizeof(*rx_chn->flows), GFP_KERNEL);
1090 if (!rx_chn->flows)
1091 return -ENOMEM;
1092
1093 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1094 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1095 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
1096 rx_chn->common.src_thread, rx_chn->flow_id_base);
1097 ret = device_register(&rx_chn->common.chan_dev);
1098 if (ret) {
1099 dev_err(dev, "Channel Device registration failed %d\n", ret);
1100 put_device(&rx_chn->common.chan_dev);
1101 rx_chn->common.chan_dev.parent = NULL;
1102 return ret;
1103 }
1104
1105 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1106 /* prepare the channel device as coherent */
1107 rx_chn->common.chan_dev.dma_coherent = true;
1108 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1109 DMA_BIT_MASK(48));
1110 rx_chn->single_fdq = false;
1111 } else {
1112 rx_chn->single_fdq = true;
1113 }
1114
1115 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1116 if (ret)
1117 return ret;
1118
1119 for (i = 0; i < rx_chn->flow_num; i++)
1120 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1121
1122 k3_udma_glue_dump_rx_chn(rx_chn);
1123
1124 return 0;
1125 }
1126
1127 static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)1128 k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
1129 struct k3_udma_glue_rx_channel_cfg *cfg)
1130 {
1131 struct k3_udma_glue_rx_channel *rx_chn;
1132 int ret;
1133
1134 if (cfg->flow_id_num <= 0 ||
1135 cfg->flow_id_use_rxchan_id ||
1136 cfg->def_flow_cfg ||
1137 cfg->flow_id_base < 0)
1138 return ERR_PTR(-EINVAL);
1139
1140 /*
1141 * Remote RX channel is under control of Remote CPU core, so
1142 * Linux can only request and manipulate by dedicated RX flows
1143 */
1144
1145 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1146 if (!rx_chn)
1147 return ERR_PTR(-ENOMEM);
1148
1149 rx_chn->common.dev = dev;
1150 rx_chn->common.swdata_size = cfg->swdata_size;
1151 rx_chn->remote = true;
1152 rx_chn->udma_rchan_id = -1;
1153 rx_chn->flow_num = cfg->flow_id_num;
1154 rx_chn->flow_id_base = cfg->flow_id_base;
1155 rx_chn->psil_paired = false;
1156
1157 /* parse of udmap channel */
1158 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1159 &rx_chn->common, false);
1160 if (ret)
1161 goto err;
1162
1163 ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
1164 if (ret)
1165 goto err;
1166
1167 return rx_chn;
1168
1169 err:
1170 k3_udma_glue_release_rx_chn(rx_chn);
1171 return ERR_PTR(ret);
1172 }
1173
1174 struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device * dev,struct k3_udma_glue_rx_channel_cfg * cfg,struct device_node * udmax_np,u32 thread_id)1175 k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
1176 struct k3_udma_glue_rx_channel_cfg *cfg,
1177 struct device_node *udmax_np, u32 thread_id)
1178 {
1179 struct k3_udma_glue_rx_channel *rx_chn;
1180 int ret;
1181
1182 if (cfg->flow_id_num <= 0 ||
1183 cfg->flow_id_use_rxchan_id ||
1184 cfg->def_flow_cfg ||
1185 cfg->flow_id_base < 0)
1186 return ERR_PTR(-EINVAL);
1187
1188 /*
1189 * Remote RX channel is under control of Remote CPU core, so
1190 * Linux can only request and manipulate by dedicated RX flows
1191 */
1192
1193 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1194 if (!rx_chn)
1195 return ERR_PTR(-ENOMEM);
1196
1197 rx_chn->common.dev = dev;
1198 rx_chn->common.swdata_size = cfg->swdata_size;
1199 rx_chn->remote = true;
1200 rx_chn->udma_rchan_id = -1;
1201 rx_chn->flow_num = cfg->flow_id_num;
1202 rx_chn->flow_id_base = cfg->flow_id_base;
1203 rx_chn->psil_paired = false;
1204
1205 ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
1206 if (ret)
1207 goto err;
1208
1209 ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
1210 if (ret)
1211 goto err;
1212
1213 return rx_chn;
1214
1215 err:
1216 k3_udma_glue_release_rx_chn(rx_chn);
1217 return ERR_PTR(ret);
1218 }
1219 EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
1220
1221 struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn(struct device * dev,const char * name,struct k3_udma_glue_rx_channel_cfg * cfg)1222 k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
1223 struct k3_udma_glue_rx_channel_cfg *cfg)
1224 {
1225 if (cfg->remote)
1226 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
1227 else
1228 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
1229 }
1230 EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
1231
k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1232 void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1233 {
1234 int i;
1235
1236 if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1237 return;
1238
1239 if (rx_chn->psil_paired) {
1240 xudma_navss_psil_unpair(rx_chn->common.udmax,
1241 rx_chn->common.src_thread,
1242 rx_chn->common.dst_thread);
1243 rx_chn->psil_paired = false;
1244 }
1245
1246 for (i = 0; i < rx_chn->flow_num; i++)
1247 k3_udma_glue_release_rx_flow(rx_chn, i);
1248
1249 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1250 xudma_free_gp_rflow_range(rx_chn->common.udmax,
1251 rx_chn->flow_id_base,
1252 rx_chn->flow_num);
1253
1254 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1255 xudma_rchan_put(rx_chn->common.udmax,
1256 rx_chn->udma_rchanx);
1257
1258 if (rx_chn->common.chan_dev.parent) {
1259 device_unregister(&rx_chn->common.chan_dev);
1260 rx_chn->common.chan_dev.parent = NULL;
1261 }
1262 }
1263 EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
1264
k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx,struct k3_udma_glue_rx_flow_cfg * flow_cfg)1265 int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
1266 u32 flow_idx,
1267 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
1268 {
1269 if (flow_idx >= rx_chn->flow_num)
1270 return -EINVAL;
1271
1272 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
1273 }
1274 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
1275
k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1276 u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
1277 u32 flow_idx)
1278 {
1279 struct k3_udma_glue_rx_flow *flow;
1280
1281 if (flow_idx >= rx_chn->flow_num)
1282 return -EINVAL;
1283
1284 flow = &rx_chn->flows[flow_idx];
1285
1286 return k3_ringacc_get_ring_id(flow->ringrxfdq);
1287 }
1288 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
1289
k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel * rx_chn)1290 u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
1291 {
1292 return rx_chn->flow_id_base;
1293 }
1294 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
1295
k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1296 int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
1297 u32 flow_idx)
1298 {
1299 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1300 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1301 struct device *dev = rx_chn->common.dev;
1302 struct ti_sci_msg_rm_udmap_flow_cfg req;
1303 int rx_ring_id;
1304 int rx_ringfdq_id;
1305 int ret = 0;
1306
1307 if (!rx_chn->remote)
1308 return -EINVAL;
1309
1310 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1311 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1312
1313 memset(&req, 0, sizeof(req));
1314
1315 req.valid_params =
1316 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1317 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1318 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1319 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1320 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1321 req.nav_id = tisci_rm->tisci_dev_id;
1322 req.flow_index = flow->udma_rflow_id;
1323 req.rx_dest_qnum = rx_ring_id;
1324 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1325 req.rx_fdq1_qnum = rx_ringfdq_id;
1326 req.rx_fdq2_qnum = rx_ringfdq_id;
1327 req.rx_fdq3_qnum = rx_ringfdq_id;
1328
1329 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1330 if (ret) {
1331 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1332 ret);
1333 }
1334
1335 return ret;
1336 }
1337 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1338
k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_idx)1339 int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1340 u32 flow_idx)
1341 {
1342 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1343 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1344 struct device *dev = rx_chn->common.dev;
1345 struct ti_sci_msg_rm_udmap_flow_cfg req;
1346 int ret = 0;
1347
1348 if (!rx_chn->remote)
1349 return -EINVAL;
1350
1351 memset(&req, 0, sizeof(req));
1352 req.valid_params =
1353 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1354 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1355 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1356 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1357 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1358 req.nav_id = tisci_rm->tisci_dev_id;
1359 req.flow_index = flow->udma_rflow_id;
1360 req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1361 req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1362 req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1363 req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1364 req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1365
1366 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1367 if (ret) {
1368 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1369 ret);
1370 }
1371
1372 return ret;
1373 }
1374 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1375
k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1376 int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1377 {
1378 int ret;
1379
1380 if (rx_chn->remote)
1381 return -EINVAL;
1382
1383 if (rx_chn->flows_ready < rx_chn->flow_num)
1384 return -EINVAL;
1385
1386 ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1387 rx_chn->common.src_thread,
1388 rx_chn->common.dst_thread);
1389 if (ret) {
1390 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1391 return ret;
1392 }
1393
1394 rx_chn->psil_paired = true;
1395
1396 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1397 UDMA_CHAN_RT_CTL_EN);
1398
1399 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1400 UDMA_PEER_RT_EN_ENABLE);
1401
1402 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1403 return 0;
1404 }
1405 EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1406
k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel * rx_chn)1407 void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1408 {
1409 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1410
1411 xudma_rchanrt_write(rx_chn->udma_rchanx,
1412 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1413 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1414
1415 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1416
1417 if (rx_chn->psil_paired) {
1418 xudma_navss_psil_unpair(rx_chn->common.udmax,
1419 rx_chn->common.src_thread,
1420 rx_chn->common.dst_thread);
1421 rx_chn->psil_paired = false;
1422 }
1423 }
1424 EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1425
k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,bool sync)1426 void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1427 bool sync)
1428 {
1429 int i = 0;
1430 u32 val;
1431
1432 if (rx_chn->remote)
1433 return;
1434
1435 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1436
1437 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1438 UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1439
1440 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1441
1442 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1443 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1444 UDMA_CHAN_RT_CTL_REG);
1445 udelay(1);
1446 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1447 dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1448 break;
1449 }
1450 i++;
1451 }
1452
1453 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1454 UDMA_CHAN_RT_PEER_RT_EN_REG);
1455 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1456 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1457 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1458 }
1459 EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1460
k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,void * data,void (* cleanup)(void * data,dma_addr_t desc_dma))1461 void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1462 u32 flow_num, void *data,
1463 void (*cleanup)(void *data, dma_addr_t desc_dma))
1464 {
1465 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1466 struct device *dev = rx_chn->common.dev;
1467 dma_addr_t desc_dma;
1468 int occ_rx, i, ret;
1469
1470 /* reset RXCQ as it is not input for udma - expected to be empty */
1471 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1472 dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1473
1474 /* Skip RX FDQ in case one FDQ is used for the set of flows */
1475 if (rx_chn->single_fdq && flow_num)
1476 goto do_reset;
1477
1478 /*
1479 * RX FDQ reset need to be special way as it is input for udma and its
1480 * state cached by udma, so:
1481 * 1) save RX FDQ occ
1482 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1483 * 3) reset RX FDQ in a special way
1484 */
1485 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1486 dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1487
1488 for (i = 0; i < occ_rx; i++) {
1489 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1490 if (ret) {
1491 if (ret != -ENODATA)
1492 dev_err(dev, "RX reset pop %d\n", ret);
1493 break;
1494 }
1495 cleanup(data, desc_dma);
1496 }
1497
1498 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1499
1500 do_reset:
1501 k3_ringacc_ring_reset(flow->ringrx);
1502 }
1503 EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1504
k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,struct cppi5_host_desc_t * desc_rx,dma_addr_t desc_dma)1505 int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1506 u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1507 dma_addr_t desc_dma)
1508 {
1509 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1510
1511 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1512 }
1513 EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1514
k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num,dma_addr_t * desc_dma)1515 int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1516 u32 flow_num, dma_addr_t *desc_dma)
1517 {
1518 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1519
1520 return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1521 }
1522 EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1523
k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel * rx_chn,u32 flow_num)1524 int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1525 u32 flow_num)
1526 {
1527 struct k3_udma_glue_rx_flow *flow;
1528
1529 flow = &rx_chn->flows[flow_num];
1530
1531 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1532 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1533 flow->udma_rflow_id);
1534 } else {
1535 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1536 }
1537
1538 if (!flow->virq)
1539 return -ENXIO;
1540
1541 return flow->virq;
1542 }
1543 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1544
1545 struct device *
k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel * rx_chn)1546 k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
1547 {
1548 if (xudma_is_pktdma(rx_chn->common.udmax) &&
1549 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1550 return &rx_chn->common.chan_dev;
1551
1552 return xudma_get_device(rx_chn->common.udmax);
1553 }
1554 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
1555
k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel * rx_chn,dma_addr_t * addr)1556 void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
1557 dma_addr_t *addr)
1558 {
1559 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1560 !rx_chn->common.atype_asel)
1561 return;
1562
1563 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1564 }
1565 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
1566
k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel * rx_chn,dma_addr_t * addr)1567 void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
1568 dma_addr_t *addr)
1569 {
1570 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1571 !rx_chn->common.atype_asel)
1572 return;
1573
1574 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1575 }
1576 EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
1577
k3_udma_glue_class_init(void)1578 static int __init k3_udma_glue_class_init(void)
1579 {
1580 return class_register(&k3_udma_glue_devclass);
1581 }
1582
1583 module_init(k3_udma_glue_class_init);
1584 MODULE_DESCRIPTION("TI K3 NAVSS DMA glue interface");
1585 MODULE_LICENSE("GPL v2");
1586