1 /*
2  * driver for Earthsoft PT1/PT2
3  *
4  * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
5  *
6  * based on pt1dvr - http://pt1dvr.sourceforge.jp/
7  * 	by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pci.h>
29 #include <linux/kthread.h>
30 #include <linux/freezer.h>
31 
32 #include "dvbdev.h"
33 #include "dvb_demux.h"
34 #include "dmxdev.h"
35 #include "dvb_net.h"
36 #include "dvb_frontend.h"
37 
38 #include "va1j5jf8007t.h"
39 #include "va1j5jf8007s.h"
40 
41 #define DRIVER_NAME "earth-pt1"
42 
43 #define PT1_PAGE_SHIFT 12
44 #define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
45 #define PT1_NR_UPACKETS 1024
46 #define PT1_NR_BUFS 511
47 
48 struct pt1_buffer_page {
49 	__le32 upackets[PT1_NR_UPACKETS];
50 };
51 
52 struct pt1_table_page {
53 	__le32 next_pfn;
54 	__le32 buf_pfns[PT1_NR_BUFS];
55 };
56 
57 struct pt1_buffer {
58 	struct pt1_buffer_page *page;
59 	dma_addr_t addr;
60 };
61 
62 struct pt1_table {
63 	struct pt1_table_page *page;
64 	dma_addr_t addr;
65 	struct pt1_buffer bufs[PT1_NR_BUFS];
66 };
67 
68 #define PT1_NR_ADAPS 4
69 
70 struct pt1_adapter;
71 
72 struct pt1 {
73 	struct pci_dev *pdev;
74 	void __iomem *regs;
75 	struct i2c_adapter i2c_adap;
76 	int i2c_running;
77 	struct pt1_adapter *adaps[PT1_NR_ADAPS];
78 	struct pt1_table *tables;
79 	struct task_struct *kthread;
80 
81 	struct mutex lock;
82 	int power;
83 	int reset;
84 };
85 
86 struct pt1_adapter {
87 	struct pt1 *pt1;
88 	int index;
89 
90 	u8 *buf;
91 	int upacket_count;
92 	int packet_count;
93 
94 	struct dvb_adapter adap;
95 	struct dvb_demux demux;
96 	int users;
97 	struct dmxdev dmxdev;
98 	struct dvb_net net;
99 	struct dvb_frontend *fe;
100 	int (*orig_set_voltage)(struct dvb_frontend *fe,
101 				fe_sec_voltage_t voltage);
102 	int (*orig_sleep)(struct dvb_frontend *fe);
103 	int (*orig_init)(struct dvb_frontend *fe);
104 
105 	fe_sec_voltage_t voltage;
106 	int sleep;
107 };
108 
109 #define pt1_printk(level, pt1, format, arg...)	\
110 	dev_printk(level, &(pt1)->pdev->dev, format, ##arg)
111 
pt1_write_reg(struct pt1 * pt1,int reg,u32 data)112 static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
113 {
114 	writel(data, pt1->regs + reg * 4);
115 }
116 
pt1_read_reg(struct pt1 * pt1,int reg)117 static u32 pt1_read_reg(struct pt1 *pt1, int reg)
118 {
119 	return readl(pt1->regs + reg * 4);
120 }
121 
122 static int pt1_nr_tables = 64;
123 module_param_named(nr_tables, pt1_nr_tables, int, 0);
124 
pt1_increment_table_count(struct pt1 * pt1)125 static void pt1_increment_table_count(struct pt1 *pt1)
126 {
127 	pt1_write_reg(pt1, 0, 0x00000020);
128 }
129 
pt1_init_table_count(struct pt1 * pt1)130 static void pt1_init_table_count(struct pt1 *pt1)
131 {
132 	pt1_write_reg(pt1, 0, 0x00000010);
133 }
134 
pt1_register_tables(struct pt1 * pt1,u32 first_pfn)135 static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
136 {
137 	pt1_write_reg(pt1, 5, first_pfn);
138 	pt1_write_reg(pt1, 0, 0x0c000040);
139 }
140 
pt1_unregister_tables(struct pt1 * pt1)141 static void pt1_unregister_tables(struct pt1 *pt1)
142 {
143 	pt1_write_reg(pt1, 0, 0x08080000);
144 }
145 
pt1_sync(struct pt1 * pt1)146 static int pt1_sync(struct pt1 *pt1)
147 {
148 	int i;
149 	for (i = 0; i < 57; i++) {
150 		if (pt1_read_reg(pt1, 0) & 0x20000000)
151 			return 0;
152 		pt1_write_reg(pt1, 0, 0x00000008);
153 	}
154 	pt1_printk(KERN_ERR, pt1, "could not sync\n");
155 	return -EIO;
156 }
157 
pt1_identify(struct pt1 * pt1)158 static u64 pt1_identify(struct pt1 *pt1)
159 {
160 	int i;
161 	u64 id;
162 	id = 0;
163 	for (i = 0; i < 57; i++) {
164 		id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
165 		pt1_write_reg(pt1, 0, 0x00000008);
166 	}
167 	return id;
168 }
169 
pt1_unlock(struct pt1 * pt1)170 static int pt1_unlock(struct pt1 *pt1)
171 {
172 	int i;
173 	pt1_write_reg(pt1, 0, 0x00000008);
174 	for (i = 0; i < 3; i++) {
175 		if (pt1_read_reg(pt1, 0) & 0x80000000)
176 			return 0;
177 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
178 	}
179 	pt1_printk(KERN_ERR, pt1, "could not unlock\n");
180 	return -EIO;
181 }
182 
pt1_reset_pci(struct pt1 * pt1)183 static int pt1_reset_pci(struct pt1 *pt1)
184 {
185 	int i;
186 	pt1_write_reg(pt1, 0, 0x01010000);
187 	pt1_write_reg(pt1, 0, 0x01000000);
188 	for (i = 0; i < 10; i++) {
189 		if (pt1_read_reg(pt1, 0) & 0x00000001)
190 			return 0;
191 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
192 	}
193 	pt1_printk(KERN_ERR, pt1, "could not reset PCI\n");
194 	return -EIO;
195 }
196 
pt1_reset_ram(struct pt1 * pt1)197 static int pt1_reset_ram(struct pt1 *pt1)
198 {
199 	int i;
200 	pt1_write_reg(pt1, 0, 0x02020000);
201 	pt1_write_reg(pt1, 0, 0x02000000);
202 	for (i = 0; i < 10; i++) {
203 		if (pt1_read_reg(pt1, 0) & 0x00000002)
204 			return 0;
205 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
206 	}
207 	pt1_printk(KERN_ERR, pt1, "could not reset RAM\n");
208 	return -EIO;
209 }
210 
pt1_do_enable_ram(struct pt1 * pt1)211 static int pt1_do_enable_ram(struct pt1 *pt1)
212 {
213 	int i, j;
214 	u32 status;
215 	status = pt1_read_reg(pt1, 0) & 0x00000004;
216 	pt1_write_reg(pt1, 0, 0x00000002);
217 	for (i = 0; i < 10; i++) {
218 		for (j = 0; j < 1024; j++) {
219 			if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
220 				return 0;
221 		}
222 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
223 	}
224 	pt1_printk(KERN_ERR, pt1, "could not enable RAM\n");
225 	return -EIO;
226 }
227 
pt1_enable_ram(struct pt1 * pt1)228 static int pt1_enable_ram(struct pt1 *pt1)
229 {
230 	int i, ret;
231 	int phase;
232 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
233 	phase = pt1->pdev->device == 0x211a ? 128 : 166;
234 	for (i = 0; i < phase; i++) {
235 		ret = pt1_do_enable_ram(pt1);
236 		if (ret < 0)
237 			return ret;
238 	}
239 	return 0;
240 }
241 
pt1_disable_ram(struct pt1 * pt1)242 static void pt1_disable_ram(struct pt1 *pt1)
243 {
244 	pt1_write_reg(pt1, 0, 0x0b0b0000);
245 }
246 
pt1_set_stream(struct pt1 * pt1,int index,int enabled)247 static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
248 {
249 	pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
250 }
251 
pt1_init_streams(struct pt1 * pt1)252 static void pt1_init_streams(struct pt1 *pt1)
253 {
254 	int i;
255 	for (i = 0; i < PT1_NR_ADAPS; i++)
256 		pt1_set_stream(pt1, i, 0);
257 }
258 
pt1_filter(struct pt1 * pt1,struct pt1_buffer_page * page)259 static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
260 {
261 	u32 upacket;
262 	int i;
263 	int index;
264 	struct pt1_adapter *adap;
265 	int offset;
266 	u8 *buf;
267 
268 	if (!page->upackets[PT1_NR_UPACKETS - 1])
269 		return 0;
270 
271 	for (i = 0; i < PT1_NR_UPACKETS; i++) {
272 		upacket = le32_to_cpu(page->upackets[i]);
273 		index = (upacket >> 29) - 1;
274 		if (index < 0 || index >=  PT1_NR_ADAPS)
275 			continue;
276 
277 		adap = pt1->adaps[index];
278 		if (upacket >> 25 & 1)
279 			adap->upacket_count = 0;
280 		else if (!adap->upacket_count)
281 			continue;
282 
283 		buf = adap->buf;
284 		offset = adap->packet_count * 188 + adap->upacket_count * 3;
285 		buf[offset] = upacket >> 16;
286 		buf[offset + 1] = upacket >> 8;
287 		if (adap->upacket_count != 62)
288 			buf[offset + 2] = upacket;
289 
290 		if (++adap->upacket_count >= 63) {
291 			adap->upacket_count = 0;
292 			if (++adap->packet_count >= 21) {
293 				dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
294 				adap->packet_count = 0;
295 			}
296 		}
297 	}
298 
299 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
300 	return 1;
301 }
302 
pt1_thread(void * data)303 static int pt1_thread(void *data)
304 {
305 	struct pt1 *pt1;
306 	int table_index;
307 	int buf_index;
308 	struct pt1_buffer_page *page;
309 
310 	pt1 = data;
311 	set_freezable();
312 
313 	table_index = 0;
314 	buf_index = 0;
315 
316 	while (!kthread_should_stop()) {
317 		try_to_freeze();
318 
319 		page = pt1->tables[table_index].bufs[buf_index].page;
320 		if (!pt1_filter(pt1, page)) {
321 			schedule_timeout_interruptible((HZ + 999) / 1000);
322 			continue;
323 		}
324 
325 		if (++buf_index >= PT1_NR_BUFS) {
326 			pt1_increment_table_count(pt1);
327 			buf_index = 0;
328 			if (++table_index >= pt1_nr_tables)
329 				table_index = 0;
330 		}
331 	}
332 
333 	return 0;
334 }
335 
pt1_free_page(struct pt1 * pt1,void * page,dma_addr_t addr)336 static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
337 {
338 	dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
339 }
340 
pt1_alloc_page(struct pt1 * pt1,dma_addr_t * addrp,u32 * pfnp)341 static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
342 {
343 	void *page;
344 	dma_addr_t addr;
345 
346 	page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
347 				  GFP_KERNEL);
348 	if (page == NULL)
349 		return NULL;
350 
351 	BUG_ON(addr & (PT1_PAGE_SIZE - 1));
352 	BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
353 
354 	*addrp = addr;
355 	*pfnp = addr >> PT1_PAGE_SHIFT;
356 	return page;
357 }
358 
pt1_cleanup_buffer(struct pt1 * pt1,struct pt1_buffer * buf)359 static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
360 {
361 	pt1_free_page(pt1, buf->page, buf->addr);
362 }
363 
364 static int
pt1_init_buffer(struct pt1 * pt1,struct pt1_buffer * buf,u32 * pfnp)365 pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf,  u32 *pfnp)
366 {
367 	struct pt1_buffer_page *page;
368 	dma_addr_t addr;
369 
370 	page = pt1_alloc_page(pt1, &addr, pfnp);
371 	if (page == NULL)
372 		return -ENOMEM;
373 
374 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
375 
376 	buf->page = page;
377 	buf->addr = addr;
378 	return 0;
379 }
380 
pt1_cleanup_table(struct pt1 * pt1,struct pt1_table * table)381 static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
382 {
383 	int i;
384 
385 	for (i = 0; i < PT1_NR_BUFS; i++)
386 		pt1_cleanup_buffer(pt1, &table->bufs[i]);
387 
388 	pt1_free_page(pt1, table->page, table->addr);
389 }
390 
391 static int
pt1_init_table(struct pt1 * pt1,struct pt1_table * table,u32 * pfnp)392 pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
393 {
394 	struct pt1_table_page *page;
395 	dma_addr_t addr;
396 	int i, ret;
397 	u32 buf_pfn;
398 
399 	page = pt1_alloc_page(pt1, &addr, pfnp);
400 	if (page == NULL)
401 		return -ENOMEM;
402 
403 	for (i = 0; i < PT1_NR_BUFS; i++) {
404 		ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
405 		if (ret < 0)
406 			goto err;
407 
408 		page->buf_pfns[i] = cpu_to_le32(buf_pfn);
409 	}
410 
411 	pt1_increment_table_count(pt1);
412 	table->page = page;
413 	table->addr = addr;
414 	return 0;
415 
416 err:
417 	while (i--)
418 		pt1_cleanup_buffer(pt1, &table->bufs[i]);
419 
420 	pt1_free_page(pt1, page, addr);
421 	return ret;
422 }
423 
pt1_cleanup_tables(struct pt1 * pt1)424 static void pt1_cleanup_tables(struct pt1 *pt1)
425 {
426 	struct pt1_table *tables;
427 	int i;
428 
429 	tables = pt1->tables;
430 	pt1_unregister_tables(pt1);
431 
432 	for (i = 0; i < pt1_nr_tables; i++)
433 		pt1_cleanup_table(pt1, &tables[i]);
434 
435 	vfree(tables);
436 }
437 
pt1_init_tables(struct pt1 * pt1)438 static int pt1_init_tables(struct pt1 *pt1)
439 {
440 	struct pt1_table *tables;
441 	int i, ret;
442 	u32 first_pfn, pfn;
443 
444 	tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
445 	if (tables == NULL)
446 		return -ENOMEM;
447 
448 	pt1_init_table_count(pt1);
449 
450 	i = 0;
451 	if (pt1_nr_tables) {
452 		ret = pt1_init_table(pt1, &tables[0], &first_pfn);
453 		if (ret)
454 			goto err;
455 		i++;
456 	}
457 
458 	while (i < pt1_nr_tables) {
459 		ret = pt1_init_table(pt1, &tables[i], &pfn);
460 		if (ret)
461 			goto err;
462 		tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
463 		i++;
464 	}
465 
466 	tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
467 
468 	pt1_register_tables(pt1, first_pfn);
469 	pt1->tables = tables;
470 	return 0;
471 
472 err:
473 	while (i--)
474 		pt1_cleanup_table(pt1, &tables[i]);
475 
476 	vfree(tables);
477 	return ret;
478 }
479 
pt1_start_feed(struct dvb_demux_feed * feed)480 static int pt1_start_feed(struct dvb_demux_feed *feed)
481 {
482 	struct pt1_adapter *adap;
483 	adap = container_of(feed->demux, struct pt1_adapter, demux);
484 	if (!adap->users++)
485 		pt1_set_stream(adap->pt1, adap->index, 1);
486 	return 0;
487 }
488 
pt1_stop_feed(struct dvb_demux_feed * feed)489 static int pt1_stop_feed(struct dvb_demux_feed *feed)
490 {
491 	struct pt1_adapter *adap;
492 	adap = container_of(feed->demux, struct pt1_adapter, demux);
493 	if (!--adap->users)
494 		pt1_set_stream(adap->pt1, adap->index, 0);
495 	return 0;
496 }
497 
498 static void
pt1_update_power(struct pt1 * pt1)499 pt1_update_power(struct pt1 *pt1)
500 {
501 	int bits;
502 	int i;
503 	struct pt1_adapter *adap;
504 	static const int sleep_bits[] = {
505 		1 << 4,
506 		1 << 6 | 1 << 7,
507 		1 << 5,
508 		1 << 6 | 1 << 8,
509 	};
510 
511 	bits = pt1->power | !pt1->reset << 3;
512 	mutex_lock(&pt1->lock);
513 	for (i = 0; i < PT1_NR_ADAPS; i++) {
514 		adap = pt1->adaps[i];
515 		switch (adap->voltage) {
516 		case SEC_VOLTAGE_13: /* actually 11V */
517 			bits |= 1 << 1;
518 			break;
519 		case SEC_VOLTAGE_18: /* actually 15V */
520 			bits |= 1 << 1 | 1 << 2;
521 			break;
522 		default:
523 			break;
524 		}
525 
526 		/* XXX: The bits should be changed depending on adap->sleep. */
527 		bits |= sleep_bits[i];
528 	}
529 	pt1_write_reg(pt1, 1, bits);
530 	mutex_unlock(&pt1->lock);
531 }
532 
pt1_set_voltage(struct dvb_frontend * fe,fe_sec_voltage_t voltage)533 static int pt1_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
534 {
535 	struct pt1_adapter *adap;
536 
537 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
538 	adap->voltage = voltage;
539 	pt1_update_power(adap->pt1);
540 
541 	if (adap->orig_set_voltage)
542 		return adap->orig_set_voltage(fe, voltage);
543 	else
544 		return 0;
545 }
546 
pt1_sleep(struct dvb_frontend * fe)547 static int pt1_sleep(struct dvb_frontend *fe)
548 {
549 	struct pt1_adapter *adap;
550 
551 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
552 	adap->sleep = 1;
553 	pt1_update_power(adap->pt1);
554 
555 	if (adap->orig_sleep)
556 		return adap->orig_sleep(fe);
557 	else
558 		return 0;
559 }
560 
pt1_wakeup(struct dvb_frontend * fe)561 static int pt1_wakeup(struct dvb_frontend *fe)
562 {
563 	struct pt1_adapter *adap;
564 
565 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
566 	adap->sleep = 0;
567 	pt1_update_power(adap->pt1);
568 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
569 
570 	if (adap->orig_init)
571 		return adap->orig_init(fe);
572 	else
573 		return 0;
574 }
575 
pt1_free_adapter(struct pt1_adapter * adap)576 static void pt1_free_adapter(struct pt1_adapter *adap)
577 {
578 	dvb_net_release(&adap->net);
579 	adap->demux.dmx.close(&adap->demux.dmx);
580 	dvb_dmxdev_release(&adap->dmxdev);
581 	dvb_dmx_release(&adap->demux);
582 	dvb_unregister_adapter(&adap->adap);
583 	free_page((unsigned long)adap->buf);
584 	kfree(adap);
585 }
586 
587 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
588 
589 static struct pt1_adapter *
pt1_alloc_adapter(struct pt1 * pt1)590 pt1_alloc_adapter(struct pt1 *pt1)
591 {
592 	struct pt1_adapter *adap;
593 	void *buf;
594 	struct dvb_adapter *dvb_adap;
595 	struct dvb_demux *demux;
596 	struct dmxdev *dmxdev;
597 	int ret;
598 
599 	adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
600 	if (!adap) {
601 		ret = -ENOMEM;
602 		goto err;
603 	}
604 
605 	adap->pt1 = pt1;
606 
607 	adap->voltage = SEC_VOLTAGE_OFF;
608 	adap->sleep = 1;
609 
610 	buf = (u8 *)__get_free_page(GFP_KERNEL);
611 	if (!buf) {
612 		ret = -ENOMEM;
613 		goto err_kfree;
614 	}
615 
616 	adap->buf = buf;
617 	adap->upacket_count = 0;
618 	adap->packet_count = 0;
619 
620 	dvb_adap = &adap->adap;
621 	dvb_adap->priv = adap;
622 	ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
623 				   &pt1->pdev->dev, adapter_nr);
624 	if (ret < 0)
625 		goto err_free_page;
626 
627 	demux = &adap->demux;
628 	demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
629 	demux->priv = adap;
630 	demux->feednum = 256;
631 	demux->filternum = 256;
632 	demux->start_feed = pt1_start_feed;
633 	demux->stop_feed = pt1_stop_feed;
634 	demux->write_to_decoder = NULL;
635 	ret = dvb_dmx_init(demux);
636 	if (ret < 0)
637 		goto err_unregister_adapter;
638 
639 	dmxdev = &adap->dmxdev;
640 	dmxdev->filternum = 256;
641 	dmxdev->demux = &demux->dmx;
642 	dmxdev->capabilities = 0;
643 	ret = dvb_dmxdev_init(dmxdev, dvb_adap);
644 	if (ret < 0)
645 		goto err_dmx_release;
646 
647 	dvb_net_init(dvb_adap, &adap->net, &demux->dmx);
648 
649 	return adap;
650 
651 err_dmx_release:
652 	dvb_dmx_release(demux);
653 err_unregister_adapter:
654 	dvb_unregister_adapter(dvb_adap);
655 err_free_page:
656 	free_page((unsigned long)buf);
657 err_kfree:
658 	kfree(adap);
659 err:
660 	return ERR_PTR(ret);
661 }
662 
pt1_cleanup_adapters(struct pt1 * pt1)663 static void pt1_cleanup_adapters(struct pt1 *pt1)
664 {
665 	int i;
666 	for (i = 0; i < PT1_NR_ADAPS; i++)
667 		pt1_free_adapter(pt1->adaps[i]);
668 }
669 
pt1_init_adapters(struct pt1 * pt1)670 static int pt1_init_adapters(struct pt1 *pt1)
671 {
672 	int i;
673 	struct pt1_adapter *adap;
674 	int ret;
675 
676 	for (i = 0; i < PT1_NR_ADAPS; i++) {
677 		adap = pt1_alloc_adapter(pt1);
678 		if (IS_ERR(adap)) {
679 			ret = PTR_ERR(adap);
680 			goto err;
681 		}
682 
683 		adap->index = i;
684 		pt1->adaps[i] = adap;
685 	}
686 	return 0;
687 
688 err:
689 	while (i--)
690 		pt1_free_adapter(pt1->adaps[i]);
691 
692 	return ret;
693 }
694 
pt1_cleanup_frontend(struct pt1_adapter * adap)695 static void pt1_cleanup_frontend(struct pt1_adapter *adap)
696 {
697 	dvb_unregister_frontend(adap->fe);
698 }
699 
pt1_init_frontend(struct pt1_adapter * adap,struct dvb_frontend * fe)700 static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
701 {
702 	int ret;
703 
704 	adap->orig_set_voltage = fe->ops.set_voltage;
705 	adap->orig_sleep = fe->ops.sleep;
706 	adap->orig_init = fe->ops.init;
707 	fe->ops.set_voltage = pt1_set_voltage;
708 	fe->ops.sleep = pt1_sleep;
709 	fe->ops.init = pt1_wakeup;
710 
711 	ret = dvb_register_frontend(&adap->adap, fe);
712 	if (ret < 0)
713 		return ret;
714 
715 	adap->fe = fe;
716 	return 0;
717 }
718 
pt1_cleanup_frontends(struct pt1 * pt1)719 static void pt1_cleanup_frontends(struct pt1 *pt1)
720 {
721 	int i;
722 	for (i = 0; i < PT1_NR_ADAPS; i++)
723 		pt1_cleanup_frontend(pt1->adaps[i]);
724 }
725 
726 struct pt1_config {
727 	struct va1j5jf8007s_config va1j5jf8007s_config;
728 	struct va1j5jf8007t_config va1j5jf8007t_config;
729 };
730 
731 static const struct pt1_config pt1_configs[2] = {
732 	{
733 		{
734 			.demod_address = 0x1b,
735 			.frequency = VA1J5JF8007S_20MHZ,
736 		},
737 		{
738 			.demod_address = 0x1a,
739 			.frequency = VA1J5JF8007T_20MHZ,
740 		},
741 	}, {
742 		{
743 			.demod_address = 0x19,
744 			.frequency = VA1J5JF8007S_20MHZ,
745 		},
746 		{
747 			.demod_address = 0x18,
748 			.frequency = VA1J5JF8007T_20MHZ,
749 		},
750 	},
751 };
752 
753 static const struct pt1_config pt2_configs[2] = {
754 	{
755 		{
756 			.demod_address = 0x1b,
757 			.frequency = VA1J5JF8007S_25MHZ,
758 		},
759 		{
760 			.demod_address = 0x1a,
761 			.frequency = VA1J5JF8007T_25MHZ,
762 		},
763 	}, {
764 		{
765 			.demod_address = 0x19,
766 			.frequency = VA1J5JF8007S_25MHZ,
767 		},
768 		{
769 			.demod_address = 0x18,
770 			.frequency = VA1J5JF8007T_25MHZ,
771 		},
772 	},
773 };
774 
pt1_init_frontends(struct pt1 * pt1)775 static int pt1_init_frontends(struct pt1 *pt1)
776 {
777 	int i, j;
778 	struct i2c_adapter *i2c_adap;
779 	const struct pt1_config *configs, *config;
780 	struct dvb_frontend *fe[4];
781 	int ret;
782 
783 	i = 0;
784 	j = 0;
785 
786 	i2c_adap = &pt1->i2c_adap;
787 	configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
788 	do {
789 		config = &configs[i / 2];
790 
791 		fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
792 					    i2c_adap);
793 		if (!fe[i]) {
794 			ret = -ENODEV; /* This does not sound nice... */
795 			goto err;
796 		}
797 		i++;
798 
799 		fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
800 					    i2c_adap);
801 		if (!fe[i]) {
802 			ret = -ENODEV;
803 			goto err;
804 		}
805 		i++;
806 
807 		ret = va1j5jf8007s_prepare(fe[i - 2]);
808 		if (ret < 0)
809 			goto err;
810 
811 		ret = va1j5jf8007t_prepare(fe[i - 1]);
812 		if (ret < 0)
813 			goto err;
814 
815 	} while (i < 4);
816 
817 	do {
818 		ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
819 		if (ret < 0)
820 			goto err;
821 	} while (++j < 4);
822 
823 	return 0;
824 
825 err:
826 	while (i-- > j)
827 		fe[i]->ops.release(fe[i]);
828 
829 	while (j--)
830 		dvb_unregister_frontend(fe[j]);
831 
832 	return ret;
833 }
834 
pt1_i2c_emit(struct pt1 * pt1,int addr,int busy,int read_enable,int clock,int data,int next_addr)835 static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
836 			 int clock, int data, int next_addr)
837 {
838 	pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
839 		      !clock << 11 | !data << 10 | next_addr);
840 }
841 
pt1_i2c_write_bit(struct pt1 * pt1,int addr,int * addrp,int data)842 static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
843 {
844 	pt1_i2c_emit(pt1, addr,     1, 0, 0, data, addr + 1);
845 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
846 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
847 	*addrp = addr + 3;
848 }
849 
pt1_i2c_read_bit(struct pt1 * pt1,int addr,int * addrp)850 static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
851 {
852 	pt1_i2c_emit(pt1, addr,     1, 0, 0, 1, addr + 1);
853 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
854 	pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
855 	pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
856 	*addrp = addr + 4;
857 }
858 
pt1_i2c_write_byte(struct pt1 * pt1,int addr,int * addrp,int data)859 static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
860 {
861 	int i;
862 	for (i = 0; i < 8; i++)
863 		pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
864 	pt1_i2c_write_bit(pt1, addr, &addr, 1);
865 	*addrp = addr;
866 }
867 
pt1_i2c_read_byte(struct pt1 * pt1,int addr,int * addrp,int last)868 static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
869 {
870 	int i;
871 	for (i = 0; i < 8; i++)
872 		pt1_i2c_read_bit(pt1, addr, &addr);
873 	pt1_i2c_write_bit(pt1, addr, &addr, last);
874 	*addrp = addr;
875 }
876 
pt1_i2c_prepare(struct pt1 * pt1,int addr,int * addrp)877 static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
878 {
879 	pt1_i2c_emit(pt1, addr,     1, 0, 1, 1, addr + 1);
880 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
881 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
882 	*addrp = addr + 3;
883 }
884 
885 static void
pt1_i2c_write_msg(struct pt1 * pt1,int addr,int * addrp,struct i2c_msg * msg)886 pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
887 {
888 	int i;
889 	pt1_i2c_prepare(pt1, addr, &addr);
890 	pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
891 	for (i = 0; i < msg->len; i++)
892 		pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
893 	*addrp = addr;
894 }
895 
896 static void
pt1_i2c_read_msg(struct pt1 * pt1,int addr,int * addrp,struct i2c_msg * msg)897 pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
898 {
899 	int i;
900 	pt1_i2c_prepare(pt1, addr, &addr);
901 	pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
902 	for (i = 0; i < msg->len; i++)
903 		pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
904 	*addrp = addr;
905 }
906 
pt1_i2c_end(struct pt1 * pt1,int addr)907 static int pt1_i2c_end(struct pt1 *pt1, int addr)
908 {
909 	pt1_i2c_emit(pt1, addr,     1, 0, 0, 0, addr + 1);
910 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
911 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
912 
913 	pt1_write_reg(pt1, 0, 0x00000004);
914 	do {
915 		if (signal_pending(current))
916 			return -EINTR;
917 		schedule_timeout_interruptible((HZ + 999) / 1000);
918 	} while (pt1_read_reg(pt1, 0) & 0x00000080);
919 	return 0;
920 }
921 
pt1_i2c_begin(struct pt1 * pt1,int * addrp)922 static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
923 {
924 	int addr;
925 	addr = 0;
926 
927 	pt1_i2c_emit(pt1, addr,     0, 0, 1, 1, addr /* itself */);
928 	addr = addr + 1;
929 
930 	if (!pt1->i2c_running) {
931 		pt1_i2c_emit(pt1, addr,     1, 0, 1, 1, addr + 1);
932 		pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
933 		addr = addr + 2;
934 		pt1->i2c_running = 1;
935 	}
936 	*addrp = addr;
937 }
938 
pt1_i2c_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)939 static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
940 {
941 	struct pt1 *pt1;
942 	int i;
943 	struct i2c_msg *msg, *next_msg;
944 	int addr, ret;
945 	u16 len;
946 	u32 word;
947 
948 	pt1 = i2c_get_adapdata(adap);
949 
950 	for (i = 0; i < num; i++) {
951 		msg = &msgs[i];
952 		if (msg->flags & I2C_M_RD)
953 			return -ENOTSUPP;
954 
955 		if (i + 1 < num)
956 			next_msg = &msgs[i + 1];
957 		else
958 			next_msg = NULL;
959 
960 		if (next_msg && next_msg->flags & I2C_M_RD) {
961 			i++;
962 
963 			len = next_msg->len;
964 			if (len > 4)
965 				return -ENOTSUPP;
966 
967 			pt1_i2c_begin(pt1, &addr);
968 			pt1_i2c_write_msg(pt1, addr, &addr, msg);
969 			pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
970 			ret = pt1_i2c_end(pt1, addr);
971 			if (ret < 0)
972 				return ret;
973 
974 			word = pt1_read_reg(pt1, 2);
975 			while (len--) {
976 				next_msg->buf[len] = word;
977 				word >>= 8;
978 			}
979 		} else {
980 			pt1_i2c_begin(pt1, &addr);
981 			pt1_i2c_write_msg(pt1, addr, &addr, msg);
982 			ret = pt1_i2c_end(pt1, addr);
983 			if (ret < 0)
984 				return ret;
985 		}
986 	}
987 
988 	return num;
989 }
990 
pt1_i2c_func(struct i2c_adapter * adap)991 static u32 pt1_i2c_func(struct i2c_adapter *adap)
992 {
993 	return I2C_FUNC_I2C;
994 }
995 
996 static const struct i2c_algorithm pt1_i2c_algo = {
997 	.master_xfer = pt1_i2c_xfer,
998 	.functionality = pt1_i2c_func,
999 };
1000 
pt1_i2c_wait(struct pt1 * pt1)1001 static void pt1_i2c_wait(struct pt1 *pt1)
1002 {
1003 	int i;
1004 	for (i = 0; i < 128; i++)
1005 		pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
1006 }
1007 
pt1_i2c_init(struct pt1 * pt1)1008 static void pt1_i2c_init(struct pt1 *pt1)
1009 {
1010 	int i;
1011 	for (i = 0; i < 1024; i++)
1012 		pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
1013 }
1014 
pt1_remove(struct pci_dev * pdev)1015 static void __devexit pt1_remove(struct pci_dev *pdev)
1016 {
1017 	struct pt1 *pt1;
1018 	void __iomem *regs;
1019 
1020 	pt1 = pci_get_drvdata(pdev);
1021 	regs = pt1->regs;
1022 
1023 	kthread_stop(pt1->kthread);
1024 	pt1_cleanup_tables(pt1);
1025 	pt1_cleanup_frontends(pt1);
1026 	pt1_disable_ram(pt1);
1027 	pt1->power = 0;
1028 	pt1->reset = 1;
1029 	pt1_update_power(pt1);
1030 	pt1_cleanup_adapters(pt1);
1031 	i2c_del_adapter(&pt1->i2c_adap);
1032 	pci_set_drvdata(pdev, NULL);
1033 	kfree(pt1);
1034 	pci_iounmap(pdev, regs);
1035 	pci_release_regions(pdev);
1036 	pci_disable_device(pdev);
1037 }
1038 
1039 static int __devinit
pt1_probe(struct pci_dev * pdev,const struct pci_device_id * ent)1040 pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1041 {
1042 	int ret;
1043 	void __iomem *regs;
1044 	struct pt1 *pt1;
1045 	struct i2c_adapter *i2c_adap;
1046 	struct task_struct *kthread;
1047 
1048 	ret = pci_enable_device(pdev);
1049 	if (ret < 0)
1050 		goto err;
1051 
1052 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1053 	if (ret < 0)
1054 		goto err_pci_disable_device;
1055 
1056 	pci_set_master(pdev);
1057 
1058 	ret = pci_request_regions(pdev, DRIVER_NAME);
1059 	if (ret < 0)
1060 		goto err_pci_disable_device;
1061 
1062 	regs = pci_iomap(pdev, 0, 0);
1063 	if (!regs) {
1064 		ret = -EIO;
1065 		goto err_pci_release_regions;
1066 	}
1067 
1068 	pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
1069 	if (!pt1) {
1070 		ret = -ENOMEM;
1071 		goto err_pci_iounmap;
1072 	}
1073 
1074 	mutex_init(&pt1->lock);
1075 	pt1->pdev = pdev;
1076 	pt1->regs = regs;
1077 	pci_set_drvdata(pdev, pt1);
1078 
1079 	ret = pt1_init_adapters(pt1);
1080 	if (ret < 0)
1081 		goto err_kfree;
1082 
1083 	mutex_init(&pt1->lock);
1084 
1085 	pt1->power = 0;
1086 	pt1->reset = 1;
1087 	pt1_update_power(pt1);
1088 
1089 	i2c_adap = &pt1->i2c_adap;
1090 	i2c_adap->algo = &pt1_i2c_algo;
1091 	i2c_adap->algo_data = NULL;
1092 	i2c_adap->dev.parent = &pdev->dev;
1093 	strcpy(i2c_adap->name, DRIVER_NAME);
1094 	i2c_set_adapdata(i2c_adap, pt1);
1095 	ret = i2c_add_adapter(i2c_adap);
1096 	if (ret < 0)
1097 		goto err_pt1_cleanup_adapters;
1098 
1099 	pt1_i2c_init(pt1);
1100 	pt1_i2c_wait(pt1);
1101 
1102 	ret = pt1_sync(pt1);
1103 	if (ret < 0)
1104 		goto err_i2c_del_adapter;
1105 
1106 	pt1_identify(pt1);
1107 
1108 	ret = pt1_unlock(pt1);
1109 	if (ret < 0)
1110 		goto err_i2c_del_adapter;
1111 
1112 	ret = pt1_reset_pci(pt1);
1113 	if (ret < 0)
1114 		goto err_i2c_del_adapter;
1115 
1116 	ret = pt1_reset_ram(pt1);
1117 	if (ret < 0)
1118 		goto err_i2c_del_adapter;
1119 
1120 	ret = pt1_enable_ram(pt1);
1121 	if (ret < 0)
1122 		goto err_i2c_del_adapter;
1123 
1124 	pt1_init_streams(pt1);
1125 
1126 	pt1->power = 1;
1127 	pt1_update_power(pt1);
1128 	schedule_timeout_uninterruptible((HZ + 49) / 50);
1129 
1130 	pt1->reset = 0;
1131 	pt1_update_power(pt1);
1132 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
1133 
1134 	ret = pt1_init_frontends(pt1);
1135 	if (ret < 0)
1136 		goto err_pt1_disable_ram;
1137 
1138 	ret = pt1_init_tables(pt1);
1139 	if (ret < 0)
1140 		goto err_pt1_cleanup_frontends;
1141 
1142 	kthread = kthread_run(pt1_thread, pt1, "pt1");
1143 	if (IS_ERR(kthread)) {
1144 		ret = PTR_ERR(kthread);
1145 		goto err_pt1_cleanup_tables;
1146 	}
1147 
1148 	pt1->kthread = kthread;
1149 	return 0;
1150 
1151 err_pt1_cleanup_tables:
1152 	pt1_cleanup_tables(pt1);
1153 err_pt1_cleanup_frontends:
1154 	pt1_cleanup_frontends(pt1);
1155 err_pt1_disable_ram:
1156 	pt1_disable_ram(pt1);
1157 	pt1->power = 0;
1158 	pt1->reset = 1;
1159 	pt1_update_power(pt1);
1160 err_i2c_del_adapter:
1161 	i2c_del_adapter(i2c_adap);
1162 err_pt1_cleanup_adapters:
1163 	pt1_cleanup_adapters(pt1);
1164 err_kfree:
1165 	pci_set_drvdata(pdev, NULL);
1166 	kfree(pt1);
1167 err_pci_iounmap:
1168 	pci_iounmap(pdev, regs);
1169 err_pci_release_regions:
1170 	pci_release_regions(pdev);
1171 err_pci_disable_device:
1172 	pci_disable_device(pdev);
1173 err:
1174 	return ret;
1175 
1176 }
1177 
1178 static struct pci_device_id pt1_id_table[] = {
1179 	{ PCI_DEVICE(0x10ee, 0x211a) },
1180 	{ PCI_DEVICE(0x10ee, 0x222a) },
1181 	{ },
1182 };
1183 MODULE_DEVICE_TABLE(pci, pt1_id_table);
1184 
1185 static struct pci_driver pt1_driver = {
1186 	.name		= DRIVER_NAME,
1187 	.probe		= pt1_probe,
1188 	.remove		= __devexit_p(pt1_remove),
1189 	.id_table	= pt1_id_table,
1190 };
1191 
1192 
pt1_init(void)1193 static int __init pt1_init(void)
1194 {
1195 	return pci_register_driver(&pt1_driver);
1196 }
1197 
1198 
pt1_cleanup(void)1199 static void __exit pt1_cleanup(void)
1200 {
1201 	pci_unregister_driver(&pt1_driver);
1202 }
1203 
1204 module_init(pt1_init);
1205 module_exit(pt1_cleanup);
1206 
1207 MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
1208 MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
1209 MODULE_LICENSE("GPL");
1210