1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * c8sectpfe-core.c - C8SECTPFE STi DVB driver
4  *
5  * Copyright (c) STMicroelectronics 2015
6  *
7  *   Author:Peter Bennett <peter.bennett@st.com>
8  *	    Peter Griffin <peter.griffin@linaro.org>
9  *
10  */
11 #include <linux/atomic.h>
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/delay.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dvb/dmx.h>
18 #include <linux/dvb/frontend.h>
19 #include <linux/err.h>
20 #include <linux/errno.h>
21 #include <linux/firmware.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/io.h>
26 #include <linux/module.h>
27 #include <linux/of_gpio.h>
28 #include <linux/of_platform.h>
29 #include <linux/pinctrl/consumer.h>
30 #include <linux/pinctrl/pinctrl.h>
31 #include <linux/platform_device.h>
32 #include <linux/slab.h>
33 #include <linux/time.h>
34 #include <linux/usb.h>
35 #include <linux/wait.h>
36 
37 #include "c8sectpfe-common.h"
38 #include "c8sectpfe-core.h"
39 #include "c8sectpfe-debugfs.h"
40 
41 #include <media/dmxdev.h>
42 #include <media/dvb_demux.h>
43 #include <media/dvb_frontend.h>
44 #include <media/dvb_net.h>
45 
46 #define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
47 MODULE_FIRMWARE(FIRMWARE_MEMDMA);
48 
49 #define PID_TABLE_SIZE 1024
50 #define POLL_MSECS 50
51 
52 static int load_c8sectpfe_fw(struct c8sectpfei *fei);
53 
54 #define TS_PKT_SIZE 188
55 #define HEADER_SIZE (4)
56 #define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
57 
58 #define FEI_ALIGNMENT (32)
59 /* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
60 #define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
61 
62 #define FIFO_LEN 1024
63 
c8sectpfe_timer_interrupt(struct timer_list * t)64 static void c8sectpfe_timer_interrupt(struct timer_list *t)
65 {
66 	struct c8sectpfei *fei = from_timer(fei, t, timer);
67 	struct channel_info *channel;
68 	int chan_num;
69 
70 	/* iterate through input block channels */
71 	for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
72 		channel = fei->channel_data[chan_num];
73 
74 		/* is this descriptor initialised and TP enabled */
75 		if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
76 			tasklet_schedule(&channel->tsklet);
77 	}
78 
79 	fei->timer.expires = jiffies +	msecs_to_jiffies(POLL_MSECS);
80 	add_timer(&fei->timer);
81 }
82 
channel_swdemux_tsklet(struct tasklet_struct * t)83 static void channel_swdemux_tsklet(struct tasklet_struct *t)
84 {
85 	struct channel_info *channel = from_tasklet(channel, t, tsklet);
86 	struct c8sectpfei *fei;
87 	unsigned long wp, rp;
88 	int pos, num_packets, n, size;
89 	u8 *buf;
90 
91 	if (unlikely(!channel || !channel->irec))
92 		return;
93 
94 	fei = channel->fei;
95 
96 	wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
97 	rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
98 
99 	pos = rp - channel->back_buffer_busaddr;
100 
101 	/* has it wrapped */
102 	if (wp < rp)
103 		wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
104 
105 	size = wp - rp;
106 	num_packets = size / PACKET_SIZE;
107 
108 	/* manage cache so data is visible to CPU */
109 	dma_sync_single_for_cpu(fei->dev,
110 				rp,
111 				size,
112 				DMA_FROM_DEVICE);
113 
114 	buf = channel->back_buffer_aligned;
115 
116 	dev_dbg(fei->dev,
117 		"chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
118 		channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
119 
120 	for (n = 0; n < num_packets; n++) {
121 		dvb_dmx_swfilter_packets(
122 			&fei->c8sectpfe[0]->
123 				demux[channel->demux_mapping].dvb_demux,
124 			&buf[pos], 1);
125 
126 		pos += PACKET_SIZE;
127 	}
128 
129 	/* advance the read pointer */
130 	if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
131 		writel(channel->back_buffer_busaddr, channel->irec +
132 			DMA_PRDS_BUSRP_TP(0));
133 	else
134 		writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
135 }
136 
c8sectpfe_start_feed(struct dvb_demux_feed * dvbdmxfeed)137 static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
138 {
139 	struct dvb_demux *demux = dvbdmxfeed->demux;
140 	struct stdemux *stdemux = demux->priv;
141 	struct c8sectpfei *fei = stdemux->c8sectpfei;
142 	struct channel_info *channel;
143 	u32 tmp;
144 	unsigned long *bitmap;
145 	int ret;
146 
147 	switch (dvbdmxfeed->type) {
148 	case DMX_TYPE_TS:
149 		break;
150 	case DMX_TYPE_SEC:
151 		break;
152 	default:
153 		dev_err(fei->dev, "%s:%d Error bailing\n"
154 			, __func__, __LINE__);
155 		return -EINVAL;
156 	}
157 
158 	if (dvbdmxfeed->type == DMX_TYPE_TS) {
159 		switch (dvbdmxfeed->pes_type) {
160 		case DMX_PES_VIDEO:
161 		case DMX_PES_AUDIO:
162 		case DMX_PES_TELETEXT:
163 		case DMX_PES_PCR:
164 		case DMX_PES_OTHER:
165 			break;
166 		default:
167 			dev_err(fei->dev, "%s:%d Error bailing\n"
168 				, __func__, __LINE__);
169 			return -EINVAL;
170 		}
171 	}
172 
173 	if (!atomic_read(&fei->fw_loaded)) {
174 		ret = load_c8sectpfe_fw(fei);
175 		if (ret)
176 			return ret;
177 	}
178 
179 	mutex_lock(&fei->lock);
180 
181 	channel = fei->channel_data[stdemux->tsin_index];
182 
183 	bitmap = channel->pid_buffer_aligned;
184 
185 	/* 8192 is a special PID */
186 	if (dvbdmxfeed->pid == 8192) {
187 		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
188 		tmp &= ~C8SECTPFE_PID_ENABLE;
189 		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
190 
191 	} else {
192 		bitmap_set(bitmap, dvbdmxfeed->pid, 1);
193 	}
194 
195 	/* manage cache so PID bitmap is visible to HW */
196 	dma_sync_single_for_device(fei->dev,
197 					channel->pid_buffer_busaddr,
198 					PID_TABLE_SIZE,
199 					DMA_TO_DEVICE);
200 
201 	channel->active = 1;
202 
203 	if (fei->global_feed_count == 0) {
204 		fei->timer.expires = jiffies +
205 			msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
206 
207 		add_timer(&fei->timer);
208 	}
209 
210 	if (stdemux->running_feed_count == 0) {
211 
212 		dev_dbg(fei->dev, "Starting channel=%p\n", channel);
213 
214 		tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
215 
216 		/* Reset the internal inputblock sram pointers */
217 		writel(channel->fifo,
218 			fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
219 		writel(channel->fifo + FIFO_LEN - 1,
220 			fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
221 
222 		writel(channel->fifo,
223 			fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
224 		writel(channel->fifo,
225 			fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
226 
227 
228 		/* reset read / write memdma ptrs for this channel */
229 		writel(channel->back_buffer_busaddr, channel->irec +
230 			DMA_PRDS_BUSBASE_TP(0));
231 
232 		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
233 		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
234 
235 		writel(channel->back_buffer_busaddr, channel->irec +
236 			DMA_PRDS_BUSWP_TP(0));
237 
238 		/* Issue a reset and enable InputBlock */
239 		writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
240 			, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
241 
242 		/* and enable the tp */
243 		writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
244 
245 		dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
246 			, __func__, __LINE__, stdemux);
247 	}
248 
249 	stdemux->running_feed_count++;
250 	fei->global_feed_count++;
251 
252 	mutex_unlock(&fei->lock);
253 
254 	return 0;
255 }
256 
c8sectpfe_stop_feed(struct dvb_demux_feed * dvbdmxfeed)257 static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
258 {
259 
260 	struct dvb_demux *demux = dvbdmxfeed->demux;
261 	struct stdemux *stdemux = demux->priv;
262 	struct c8sectpfei *fei = stdemux->c8sectpfei;
263 	struct channel_info *channel;
264 	int idlereq;
265 	u32 tmp;
266 	int ret;
267 	unsigned long *bitmap;
268 
269 	if (!atomic_read(&fei->fw_loaded)) {
270 		ret = load_c8sectpfe_fw(fei);
271 		if (ret)
272 			return ret;
273 	}
274 
275 	mutex_lock(&fei->lock);
276 
277 	channel = fei->channel_data[stdemux->tsin_index];
278 
279 	bitmap = channel->pid_buffer_aligned;
280 
281 	if (dvbdmxfeed->pid == 8192) {
282 		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
283 		tmp |= C8SECTPFE_PID_ENABLE;
284 		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
285 	} else {
286 		bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
287 	}
288 
289 	/* manage cache so data is visible to HW */
290 	dma_sync_single_for_device(fei->dev,
291 					channel->pid_buffer_busaddr,
292 					PID_TABLE_SIZE,
293 					DMA_TO_DEVICE);
294 
295 	if (--stdemux->running_feed_count == 0) {
296 
297 		channel = fei->channel_data[stdemux->tsin_index];
298 
299 		/* TP re-configuration on page 168 of functional spec */
300 
301 		/* disable IB (prevents more TS data going to memdma) */
302 		writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
303 
304 		/* disable this channels descriptor */
305 		writel(0,  channel->irec + DMA_PRDS_TPENABLE);
306 
307 		tasklet_disable(&channel->tsklet);
308 
309 		/* now request memdma channel goes idle */
310 		idlereq = (1 << channel->tsin_id) | IDLEREQ;
311 		writel(idlereq, fei->io + DMA_IDLE_REQ);
312 
313 		/* wait for idle irq handler to signal completion */
314 		ret = wait_for_completion_timeout(&channel->idle_completion,
315 						msecs_to_jiffies(100));
316 
317 		if (ret == 0)
318 			dev_warn(fei->dev,
319 				"Timeout waiting for idle irq on tsin%d\n",
320 				channel->tsin_id);
321 
322 		reinit_completion(&channel->idle_completion);
323 
324 		/* reset read / write ptrs for this channel */
325 
326 		writel(channel->back_buffer_busaddr,
327 			channel->irec + DMA_PRDS_BUSBASE_TP(0));
328 
329 		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
330 		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
331 
332 		writel(channel->back_buffer_busaddr,
333 			channel->irec + DMA_PRDS_BUSWP_TP(0));
334 
335 		dev_dbg(fei->dev,
336 			"%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
337 			__func__, __LINE__, stdemux, channel->tsin_id);
338 
339 		/* turn off all PIDS in the bitmap */
340 		memset(channel->pid_buffer_aligned, 0, PID_TABLE_SIZE);
341 
342 		/* manage cache so data is visible to HW */
343 		dma_sync_single_for_device(fei->dev,
344 					channel->pid_buffer_busaddr,
345 					PID_TABLE_SIZE,
346 					DMA_TO_DEVICE);
347 
348 		channel->active = 0;
349 	}
350 
351 	if (--fei->global_feed_count == 0) {
352 		dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
353 			, __func__, __LINE__, fei->global_feed_count);
354 
355 		del_timer(&fei->timer);
356 	}
357 
358 	mutex_unlock(&fei->lock);
359 
360 	return 0;
361 }
362 
find_channel(struct c8sectpfei * fei,int tsin_num)363 static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
364 {
365 	int i;
366 
367 	for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
368 		if (!fei->channel_data[i])
369 			continue;
370 
371 		if (fei->channel_data[i]->tsin_id == tsin_num)
372 			return fei->channel_data[i];
373 	}
374 
375 	return NULL;
376 }
377 
c8sectpfe_getconfig(struct c8sectpfei * fei)378 static void c8sectpfe_getconfig(struct c8sectpfei *fei)
379 {
380 	struct c8sectpfe_hw *hw = &fei->hw_stats;
381 
382 	hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
383 	hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
384 	hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
385 	hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
386 	hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
387 	hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
388 	hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
389 
390 	dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
391 	dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
392 	dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
393 	dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
394 				, hw->num_swts);
395 	dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
396 	dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
397 	dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
398 	dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
399 			, hw->num_tp);
400 }
401 
c8sectpfe_idle_irq_handler(int irq,void * priv)402 static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
403 {
404 	struct c8sectpfei *fei = priv;
405 	struct channel_info *chan;
406 	int bit;
407 	unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
408 
409 	/* page 168 of functional spec: Clear the idle request
410 	   by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
411 
412 	/* signal idle completion */
413 	for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
414 
415 		chan = find_channel(fei, bit);
416 
417 		if (chan)
418 			complete(&chan->idle_completion);
419 	}
420 
421 	writel(0, fei->io + DMA_IDLE_REQ);
422 
423 	return IRQ_HANDLED;
424 }
425 
426 
free_input_block(struct c8sectpfei * fei,struct channel_info * tsin)427 static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
428 {
429 	if (!fei || !tsin)
430 		return;
431 
432 	if (tsin->back_buffer_busaddr)
433 		if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
434 			dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
435 				FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
436 
437 	kfree(tsin->back_buffer_start);
438 
439 	if (tsin->pid_buffer_busaddr)
440 		if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
441 			dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
442 				PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
443 
444 	kfree(tsin->pid_buffer_start);
445 }
446 
447 #define MAX_NAME 20
448 
configure_memdma_and_inputblock(struct c8sectpfei * fei,struct channel_info * tsin)449 static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
450 				struct channel_info *tsin)
451 {
452 	int ret;
453 	u32 tmp;
454 	char tsin_pin_name[MAX_NAME];
455 
456 	if (!fei || !tsin)
457 		return -EINVAL;
458 
459 	dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
460 		, __func__, __LINE__, tsin, tsin->tsin_id);
461 
462 	init_completion(&tsin->idle_completion);
463 
464 	tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE + FEI_ALIGNMENT, GFP_KERNEL);
465 	if (!tsin->back_buffer_start) {
466 		ret = -ENOMEM;
467 		goto err_unmap;
468 	}
469 
470 	/* Ensure backbuffer is 32byte aligned */
471 	tsin->back_buffer_aligned = tsin->back_buffer_start + FEI_ALIGNMENT;
472 
473 	tsin->back_buffer_aligned = PTR_ALIGN(tsin->back_buffer_aligned, FEI_ALIGNMENT);
474 
475 	tsin->back_buffer_busaddr = dma_map_single(fei->dev,
476 					tsin->back_buffer_aligned,
477 					FEI_BUFFER_SIZE,
478 					DMA_BIDIRECTIONAL);
479 
480 	if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
481 		dev_err(fei->dev, "failed to map back_buffer\n");
482 		ret = -EFAULT;
483 		goto err_unmap;
484 	}
485 
486 	/*
487 	 * The pid buffer can be configured (in hw) for byte or bit
488 	 * per pid. By powers of deduction we conclude stih407 family
489 	 * is configured (at SoC design stage) for bit per pid.
490 	 */
491 	tsin->pid_buffer_start = kzalloc(PID_TABLE_SIZE + PID_TABLE_SIZE, GFP_KERNEL);
492 	if (!tsin->pid_buffer_start) {
493 		ret = -ENOMEM;
494 		goto err_unmap;
495 	}
496 
497 	/*
498 	 * PID buffer needs to be aligned to size of the pid table
499 	 * which at bit per pid is 1024 bytes (8192 pids / 8).
500 	 * PIDF_BASE register enforces this alignment when writing
501 	 * the register.
502 	 */
503 
504 	tsin->pid_buffer_aligned = tsin->pid_buffer_start + PID_TABLE_SIZE;
505 
506 	tsin->pid_buffer_aligned = PTR_ALIGN(tsin->pid_buffer_aligned, PID_TABLE_SIZE);
507 
508 	tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
509 						tsin->pid_buffer_aligned,
510 						PID_TABLE_SIZE,
511 						DMA_BIDIRECTIONAL);
512 
513 	if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
514 		dev_err(fei->dev, "failed to map pid_bitmap\n");
515 		ret = -EFAULT;
516 		goto err_unmap;
517 	}
518 
519 	/* manage cache so pid bitmap is visible to HW */
520 	dma_sync_single_for_device(fei->dev,
521 				tsin->pid_buffer_busaddr,
522 				PID_TABLE_SIZE,
523 				DMA_TO_DEVICE);
524 
525 	snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
526 		(tsin->serial_not_parallel ? "serial" : "parallel"));
527 
528 	tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
529 	if (IS_ERR(tsin->pstate)) {
530 		dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
531 			, __func__, tsin_pin_name);
532 		ret = PTR_ERR(tsin->pstate);
533 		goto err_unmap;
534 	}
535 
536 	ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
537 
538 	if (ret) {
539 		dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
540 			, __func__);
541 		goto err_unmap;
542 	}
543 
544 	/* Enable this input block */
545 	tmp = readl(fei->io + SYS_INPUT_CLKEN);
546 	tmp |= BIT(tsin->tsin_id);
547 	writel(tmp, fei->io + SYS_INPUT_CLKEN);
548 
549 	if (tsin->serial_not_parallel)
550 		tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
551 
552 	if (tsin->invert_ts_clk)
553 		tmp |= C8SECTPFE_INVERT_TSCLK;
554 
555 	if (tsin->async_not_sync)
556 		tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
557 
558 	tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
559 
560 	writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
561 
562 	writel(C8SECTPFE_SYNC(0x9) |
563 		C8SECTPFE_DROP(0x9) |
564 		C8SECTPFE_TOKEN(0x47),
565 		fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
566 
567 	writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
568 
569 	/* Place the FIFO's at the end of the irec descriptors */
570 
571 	tsin->fifo = (tsin->tsin_id * FIFO_LEN);
572 
573 	writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
574 	writel(tsin->fifo + FIFO_LEN - 1,
575 		fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
576 
577 	writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
578 	writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
579 
580 	writel(tsin->pid_buffer_busaddr,
581 		fei->io + PIDF_BASE(tsin->tsin_id));
582 
583 	dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
584 		tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
585 		&tsin->pid_buffer_busaddr);
586 
587 	/* Configure and enable HW PID filtering */
588 
589 	/*
590 	 * The PID value is created by assembling the first 8 bytes of
591 	 * the TS packet into a 64-bit word in big-endian format. A
592 	 * slice of that 64-bit word is taken from
593 	 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
594 	 */
595 	tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
596 		| C8SECTPFE_PID_OFFSET(40));
597 
598 	writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
599 
600 	dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
601 		tsin->tsin_id,
602 		readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
603 		readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
604 		readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
605 		readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
606 
607 	/* Get base addpress of pointer record block from DMEM */
608 	tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
609 			readl(fei->io + DMA_PTRREC_BASE);
610 
611 	/* fill out pointer record data structure */
612 
613 	/* advance pointer record block to our channel */
614 	tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
615 
616 	writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
617 
618 	writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
619 
620 	writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
621 
622 	writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
623 
624 	/* read/write pointers with physical bus address */
625 
626 	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
627 
628 	tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
629 	writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
630 
631 	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
632 	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
633 
634 	/* initialize tasklet */
635 	tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
636 
637 	return 0;
638 
639 err_unmap:
640 	free_input_block(fei, tsin);
641 	return ret;
642 }
643 
c8sectpfe_error_irq_handler(int irq,void * priv)644 static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
645 {
646 	struct c8sectpfei *fei = priv;
647 
648 	dev_err(fei->dev, "%s: error handling not yet implemented\n"
649 		, __func__);
650 
651 	/*
652 	 * TODO FIXME we should detect some error conditions here
653 	 * and ideally do something about them!
654 	 */
655 
656 	return IRQ_HANDLED;
657 }
658 
c8sectpfe_probe(struct platform_device * pdev)659 static int c8sectpfe_probe(struct platform_device *pdev)
660 {
661 	struct device *dev = &pdev->dev;
662 	struct device_node *child, *np = dev->of_node;
663 	struct c8sectpfei *fei;
664 	struct resource *res;
665 	int ret, index = 0;
666 	struct channel_info *tsin;
667 
668 	/* Allocate the c8sectpfei structure */
669 	fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
670 	if (!fei)
671 		return -ENOMEM;
672 
673 	fei->dev = dev;
674 
675 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
676 	fei->io = devm_ioremap_resource(dev, res);
677 	if (IS_ERR(fei->io))
678 		return PTR_ERR(fei->io);
679 
680 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
681 					"c8sectpfe-ram");
682 	fei->sram = devm_ioremap_resource(dev, res);
683 	if (IS_ERR(fei->sram))
684 		return PTR_ERR(fei->sram);
685 
686 	fei->sram_size = resource_size(res);
687 
688 	fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
689 	if (fei->idle_irq < 0)
690 		return fei->idle_irq;
691 
692 	fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
693 	if (fei->error_irq < 0)
694 		return fei->error_irq;
695 
696 	platform_set_drvdata(pdev, fei);
697 
698 	fei->c8sectpfeclk = devm_clk_get_enabled(dev, "c8sectpfe");
699 	if (IS_ERR(fei->c8sectpfeclk)) {
700 		dev_err(dev, "Failed to enable c8sectpfe clock\n");
701 		return PTR_ERR(fei->c8sectpfeclk);
702 	}
703 
704 	/* to save power disable all IP's (on by default) */
705 	writel(0, fei->io + SYS_INPUT_CLKEN);
706 
707 	/* Enable memdma clock */
708 	writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
709 
710 	/* clear internal sram */
711 	memset_io(fei->sram, 0x0, fei->sram_size);
712 
713 	c8sectpfe_getconfig(fei);
714 
715 	ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
716 			0, "c8sectpfe-idle-irq", fei);
717 	if (ret) {
718 		dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
719 		return ret;
720 	}
721 
722 	ret = devm_request_irq(dev, fei->error_irq,
723 				c8sectpfe_error_irq_handler, 0,
724 				"c8sectpfe-error-irq", fei);
725 	if (ret) {
726 		dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
727 		return ret;
728 	}
729 
730 	fei->tsin_count = of_get_child_count(np);
731 
732 	if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
733 		fei->tsin_count > fei->hw_stats.num_ib) {
734 
735 		dev_err(dev, "More tsin declared than exist on SoC!\n");
736 		return -EINVAL;
737 	}
738 
739 	fei->pinctrl = devm_pinctrl_get(dev);
740 
741 	if (IS_ERR(fei->pinctrl)) {
742 		dev_err(dev, "Error getting tsin pins\n");
743 		return PTR_ERR(fei->pinctrl);
744 	}
745 
746 	for_each_child_of_node(np, child) {
747 		struct device_node *i2c_bus;
748 
749 		fei->channel_data[index] = devm_kzalloc(dev,
750 						sizeof(struct channel_info),
751 						GFP_KERNEL);
752 
753 		if (!fei->channel_data[index]) {
754 			ret = -ENOMEM;
755 			goto err_node_put;
756 		}
757 
758 		tsin = fei->channel_data[index];
759 
760 		tsin->fei = fei;
761 
762 		ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
763 		if (ret) {
764 			dev_err(&pdev->dev, "No tsin_num found\n");
765 			goto err_node_put;
766 		}
767 
768 		/* sanity check value */
769 		if (tsin->tsin_id > fei->hw_stats.num_ib) {
770 			dev_err(&pdev->dev,
771 				"tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
772 				tsin->tsin_id, fei->hw_stats.num_ib);
773 			ret = -EINVAL;
774 			goto err_node_put;
775 		}
776 
777 		tsin->invert_ts_clk = of_property_read_bool(child,
778 							"invert-ts-clk");
779 
780 		tsin->serial_not_parallel = of_property_read_bool(child,
781 							"serial-not-parallel");
782 
783 		tsin->async_not_sync = of_property_read_bool(child,
784 							"async-not-sync");
785 
786 		ret = of_property_read_u32(child, "dvb-card",
787 					&tsin->dvb_card);
788 		if (ret) {
789 			dev_err(&pdev->dev, "No dvb-card found\n");
790 			goto err_node_put;
791 		}
792 
793 		i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
794 		if (!i2c_bus) {
795 			dev_err(&pdev->dev, "No i2c-bus found\n");
796 			ret = -ENODEV;
797 			goto err_node_put;
798 		}
799 		tsin->i2c_adapter =
800 			of_find_i2c_adapter_by_node(i2c_bus);
801 		if (!tsin->i2c_adapter) {
802 			dev_err(&pdev->dev, "No i2c adapter found\n");
803 			of_node_put(i2c_bus);
804 			ret = -ENODEV;
805 			goto err_node_put;
806 		}
807 		of_node_put(i2c_bus);
808 
809 		/* Acquire reset GPIO and activate it */
810 		tsin->rst_gpio = devm_fwnode_gpiod_get(dev,
811 						       of_fwnode_handle(child),
812 						       "reset", GPIOD_OUT_HIGH,
813 						       "NIM reset");
814 		ret = PTR_ERR_OR_ZERO(tsin->rst_gpio);
815 		if (ret && ret != -EBUSY) {
816 			dev_err(dev, "Can't request tsin%d reset gpio\n",
817 				fei->channel_data[index]->tsin_id);
818 			goto err_node_put;
819 		}
820 
821 		if (!ret) {
822 			/* wait for the chip to reset */
823 			usleep_range(3500, 5000);
824 			/* release the reset line */
825 			gpiod_set_value_cansleep(tsin->rst_gpio, 0);
826 			usleep_range(3000, 5000);
827 		}
828 
829 		tsin->demux_mapping = index;
830 
831 		dev_dbg(fei->dev,
832 			"channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
833 			fei->channel_data[index], index,
834 			tsin->tsin_id, tsin->invert_ts_clk,
835 			tsin->serial_not_parallel, tsin->async_not_sync,
836 			tsin->dvb_card);
837 
838 		index++;
839 	}
840 
841 	/* Setup timer interrupt */
842 	timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
843 
844 	mutex_init(&fei->lock);
845 
846 	/* Get the configuration information about the tuners */
847 	ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
848 					(void *)fei,
849 					c8sectpfe_start_feed,
850 					c8sectpfe_stop_feed);
851 	if (ret) {
852 		dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
853 			ret);
854 		return ret;
855 	}
856 
857 	c8sectpfe_debugfs_init(fei);
858 
859 	return 0;
860 
861 err_node_put:
862 	of_node_put(child);
863 	return ret;
864 }
865 
c8sectpfe_remove(struct platform_device * pdev)866 static void c8sectpfe_remove(struct platform_device *pdev)
867 {
868 	struct c8sectpfei *fei = platform_get_drvdata(pdev);
869 	struct channel_info *channel;
870 	int i;
871 
872 	wait_for_completion(&fei->fw_ack);
873 
874 	c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
875 
876 	/*
877 	 * Now loop through and un-configure each of the InputBlock resources
878 	 */
879 	for (i = 0; i < fei->tsin_count; i++) {
880 		channel = fei->channel_data[i];
881 		free_input_block(fei, channel);
882 	}
883 
884 	c8sectpfe_debugfs_exit(fei);
885 
886 	dev_info(fei->dev, "Stopping memdma SLIM core\n");
887 	if (readl(fei->io + DMA_CPU_RUN))
888 		writel(0x0,  fei->io + DMA_CPU_RUN);
889 
890 	/* unclock all internal IP's */
891 	if (readl(fei->io + SYS_INPUT_CLKEN))
892 		writel(0, fei->io + SYS_INPUT_CLKEN);
893 
894 	if (readl(fei->io + SYS_OTHER_CLKEN))
895 		writel(0, fei->io + SYS_OTHER_CLKEN);
896 }
897 
898 
configure_channels(struct c8sectpfei * fei)899 static int configure_channels(struct c8sectpfei *fei)
900 {
901 	int index = 0, ret;
902 	struct device_node *child, *np = fei->dev->of_node;
903 
904 	/* iterate round each tsin and configure memdma descriptor and IB hw */
905 	for_each_child_of_node(np, child) {
906 		ret = configure_memdma_and_inputblock(fei,
907 						fei->channel_data[index]);
908 		if (ret) {
909 			dev_err(fei->dev,
910 				"configure_memdma_and_inputblock failed\n");
911 			of_node_put(child);
912 			goto err_unmap;
913 		}
914 		index++;
915 	}
916 
917 	return 0;
918 
919 err_unmap:
920 	while (--index >= 0)
921 		free_input_block(fei, fei->channel_data[index]);
922 
923 	return ret;
924 }
925 
926 static int
c8sectpfe_elf_sanity_check(struct c8sectpfei * fei,const struct firmware * fw)927 c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
928 {
929 	struct elf32_hdr *ehdr;
930 	char class;
931 
932 	if (!fw) {
933 		dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
934 		return -EINVAL;
935 	}
936 
937 	if (fw->size < sizeof(struct elf32_hdr)) {
938 		dev_err(fei->dev, "Image is too small\n");
939 		return -EINVAL;
940 	}
941 
942 	ehdr = (struct elf32_hdr *)fw->data;
943 
944 	/* We only support ELF32 at this point */
945 	class = ehdr->e_ident[EI_CLASS];
946 	if (class != ELFCLASS32) {
947 		dev_err(fei->dev, "Unsupported class: %d\n", class);
948 		return -EINVAL;
949 	}
950 
951 	if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
952 		dev_err(fei->dev, "Unsupported firmware endianness\n");
953 		return -EINVAL;
954 	}
955 
956 	if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
957 		dev_err(fei->dev, "Image is too small\n");
958 		return -EINVAL;
959 	}
960 
961 	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
962 		dev_err(fei->dev, "Image is corrupted (bad magic)\n");
963 		return -EINVAL;
964 	}
965 
966 	/* Check ELF magic */
967 	ehdr = (Elf32_Ehdr *)fw->data;
968 	if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
969 	    ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
970 	    ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
971 	    ehdr->e_ident[EI_MAG3] != ELFMAG3) {
972 		dev_err(fei->dev, "Invalid ELF magic\n");
973 		return -EINVAL;
974 	}
975 
976 	if (ehdr->e_type != ET_EXEC) {
977 		dev_err(fei->dev, "Unsupported ELF header type\n");
978 		return -EINVAL;
979 	}
980 
981 	if (ehdr->e_phoff > fw->size) {
982 		dev_err(fei->dev, "Firmware size is too small\n");
983 		return -EINVAL;
984 	}
985 
986 	return 0;
987 }
988 
989 
load_imem_segment(struct c8sectpfei * fei,Elf32_Phdr * phdr,const struct firmware * fw,u8 __iomem * dest,int seg_num)990 static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
991 			const struct firmware *fw, u8 __iomem *dest,
992 			int seg_num)
993 {
994 	const u8 *imem_src = fw->data + phdr->p_offset;
995 	int i;
996 
997 	/*
998 	 * For IMEM segments, the segment contains 24-bit
999 	 * instructions which must be padded to 32-bit
1000 	 * instructions before being written. The written
1001 	 * segment is padded with NOP instructions.
1002 	 */
1003 
1004 	dev_dbg(fei->dev,
1005 		"Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
1006 		seg_num, phdr->p_paddr, phdr->p_filesz, dest,
1007 		phdr->p_memsz + phdr->p_memsz / 3);
1008 
1009 	for (i = 0; i < phdr->p_filesz; i++) {
1010 
1011 		writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1012 
1013 		/* Every 3 bytes, add an additional
1014 		 * padding zero in destination */
1015 		if (i % 3 == 2) {
1016 			dest++;
1017 			writeb(0x00, (void __iomem *)dest);
1018 		}
1019 
1020 		dest++;
1021 		imem_src++;
1022 	}
1023 }
1024 
load_dmem_segment(struct c8sectpfei * fei,Elf32_Phdr * phdr,const struct firmware * fw,u8 __iomem * dst,int seg_num)1025 static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1026 			const struct firmware *fw, u8 __iomem *dst, int seg_num)
1027 {
1028 	/*
1029 	 * For DMEM segments copy the segment data from the ELF
1030 	 * file and pad segment with zeroes
1031 	 */
1032 
1033 	dev_dbg(fei->dev,
1034 		"Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1035 		seg_num, phdr->p_paddr, phdr->p_filesz,
1036 		dst, phdr->p_memsz);
1037 
1038 	memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
1039 		phdr->p_filesz);
1040 
1041 	memset((void __force *)dst + phdr->p_filesz, 0,
1042 		phdr->p_memsz - phdr->p_filesz);
1043 }
1044 
load_slim_core_fw(const struct firmware * fw,struct c8sectpfei * fei)1045 static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
1046 {
1047 	Elf32_Ehdr *ehdr;
1048 	Elf32_Phdr *phdr;
1049 	u8 __iomem *dst;
1050 	int err = 0, i;
1051 
1052 	if (!fw || !fei)
1053 		return -EINVAL;
1054 
1055 	ehdr = (Elf32_Ehdr *)fw->data;
1056 	phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1057 
1058 	/* go through the available ELF segments */
1059 	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1060 
1061 		/* Only consider LOAD segments */
1062 		if (phdr->p_type != PT_LOAD)
1063 			continue;
1064 
1065 		/*
1066 		 * Check segment is contained within the fw->data buffer
1067 		 */
1068 		if (phdr->p_offset + phdr->p_filesz > fw->size) {
1069 			dev_err(fei->dev,
1070 				"Segment %d is outside of firmware file\n", i);
1071 			err = -EINVAL;
1072 			break;
1073 		}
1074 
1075 		/*
1076 		 * MEMDMA IMEM has executable flag set, otherwise load
1077 		 * this segment into DMEM.
1078 		 *
1079 		 */
1080 
1081 		if (phdr->p_flags & PF_X) {
1082 			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1083 			/*
1084 			 * The Slim ELF file uses 32-bit word addressing for
1085 			 * load offsets.
1086 			 */
1087 			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1088 			load_imem_segment(fei, phdr, fw, dst, i);
1089 		} else {
1090 			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1091 			/*
1092 			 * The Slim ELF file uses 32-bit word addressing for
1093 			 * load offsets.
1094 			 */
1095 			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1096 			load_dmem_segment(fei, phdr, fw, dst, i);
1097 		}
1098 	}
1099 
1100 	release_firmware(fw);
1101 	return err;
1102 }
1103 
load_c8sectpfe_fw(struct c8sectpfei * fei)1104 static int load_c8sectpfe_fw(struct c8sectpfei *fei)
1105 {
1106 	const struct firmware *fw;
1107 	int err;
1108 
1109 	dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1110 
1111 	err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
1112 	if (err)
1113 		return err;
1114 
1115 	err = c8sectpfe_elf_sanity_check(fei, fw);
1116 	if (err) {
1117 		dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1118 			, err);
1119 		release_firmware(fw);
1120 		return err;
1121 	}
1122 
1123 	err = load_slim_core_fw(fw, fei);
1124 	if (err) {
1125 		dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1126 		return err;
1127 	}
1128 
1129 	/* now the firmware is loaded configure the input blocks */
1130 	err = configure_channels(fei);
1131 	if (err) {
1132 		dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1133 		return err;
1134 	}
1135 
1136 	/*
1137 	 * STBus target port can access IMEM and DMEM ports
1138 	 * without waiting for CPU
1139 	 */
1140 	writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1141 
1142 	dev_info(fei->dev, "Boot the memdma SLIM core\n");
1143 	writel(0x1,  fei->io + DMA_CPU_RUN);
1144 
1145 	atomic_set(&fei->fw_loaded, 1);
1146 
1147 	return 0;
1148 }
1149 
1150 static const struct of_device_id c8sectpfe_match[] = {
1151 	{ .compatible = "st,stih407-c8sectpfe" },
1152 	{ /* sentinel */ },
1153 };
1154 MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1155 
1156 static struct platform_driver c8sectpfe_driver = {
1157 	.driver = {
1158 		.name = "c8sectpfe",
1159 		.of_match_table = c8sectpfe_match,
1160 	},
1161 	.probe	= c8sectpfe_probe,
1162 	.remove_new = c8sectpfe_remove,
1163 };
1164 
1165 module_platform_driver(c8sectpfe_driver);
1166 
1167 MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1168 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1169 MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1170 MODULE_LICENSE("GPL");
1171