2 * driver for Earthsoft PT1/PT2
4 * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
6 * based on pt1dvr - http://pt1dvr.sourceforge.jp/
7 * by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/kernel.h>
21 #include <linux/sched/signal.h>
22 #include <linux/module.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/pci.h>
26 #include <linux/kthread.h>
27 #include <linux/freezer.h>
28 #include <linux/ratelimit.h>
31 #include "dvb_demux.h"
34 #include "dvb_frontend.h"
36 #include "va1j5jf8007t.h"
37 #include "va1j5jf8007s.h"
39 #define DRIVER_NAME "earth-pt1"
41 #define PT1_PAGE_SHIFT 12
42 #define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
43 #define PT1_NR_UPACKETS 1024
44 #define PT1_NR_BUFS 511
46 struct pt1_buffer_page {
47 __le32 upackets[PT1_NR_UPACKETS];
50 struct pt1_table_page {
52 __le32 buf_pfns[PT1_NR_BUFS];
56 struct pt1_buffer_page *page;
61 struct pt1_table_page *page;
63 struct pt1_buffer bufs[PT1_NR_BUFS];
66 #define PT1_NR_ADAPS 4
73 struct i2c_adapter i2c_adap;
75 struct pt1_adapter *adaps[PT1_NR_ADAPS];
76 struct pt1_table *tables;
77 struct task_struct *kthread;
95 struct dvb_adapter adap;
96 struct dvb_demux demux;
99 struct dvb_frontend *fe;
100 int (*orig_set_voltage)(struct dvb_frontend *fe,
101 enum fe_sec_voltage voltage);
102 int (*orig_sleep)(struct dvb_frontend *fe);
103 int (*orig_init)(struct dvb_frontend *fe);
105 enum fe_sec_voltage voltage;
109 static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
111 writel(data, pt1->regs + reg * 4);
114 static u32 pt1_read_reg(struct pt1 *pt1, int reg)
116 return readl(pt1->regs + reg * 4);
119 static int pt1_nr_tables = 8;
120 module_param_named(nr_tables, pt1_nr_tables, int, 0);
122 static void pt1_increment_table_count(struct pt1 *pt1)
124 pt1_write_reg(pt1, 0, 0x00000020);
127 static void pt1_init_table_count(struct pt1 *pt1)
129 pt1_write_reg(pt1, 0, 0x00000010);
132 static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
134 pt1_write_reg(pt1, 5, first_pfn);
135 pt1_write_reg(pt1, 0, 0x0c000040);
138 static void pt1_unregister_tables(struct pt1 *pt1)
140 pt1_write_reg(pt1, 0, 0x08080000);
143 static int pt1_sync(struct pt1 *pt1)
146 for (i = 0; i < 57; i++) {
147 if (pt1_read_reg(pt1, 0) & 0x20000000)
149 pt1_write_reg(pt1, 0, 0x00000008);
151 dev_err(&pt1->pdev->dev, "could not sync\n");
155 static u64 pt1_identify(struct pt1 *pt1)
160 for (i = 0; i < 57; i++) {
161 id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
162 pt1_write_reg(pt1, 0, 0x00000008);
167 static int pt1_unlock(struct pt1 *pt1)
170 pt1_write_reg(pt1, 0, 0x00000008);
171 for (i = 0; i < 3; i++) {
172 if (pt1_read_reg(pt1, 0) & 0x80000000)
174 schedule_timeout_uninterruptible((HZ + 999) / 1000);
176 dev_err(&pt1->pdev->dev, "could not unlock\n");
180 static int pt1_reset_pci(struct pt1 *pt1)
183 pt1_write_reg(pt1, 0, 0x01010000);
184 pt1_write_reg(pt1, 0, 0x01000000);
185 for (i = 0; i < 10; i++) {
186 if (pt1_read_reg(pt1, 0) & 0x00000001)
188 schedule_timeout_uninterruptible((HZ + 999) / 1000);
190 dev_err(&pt1->pdev->dev, "could not reset PCI\n");
194 static int pt1_reset_ram(struct pt1 *pt1)
197 pt1_write_reg(pt1, 0, 0x02020000);
198 pt1_write_reg(pt1, 0, 0x02000000);
199 for (i = 0; i < 10; i++) {
200 if (pt1_read_reg(pt1, 0) & 0x00000002)
202 schedule_timeout_uninterruptible((HZ + 999) / 1000);
204 dev_err(&pt1->pdev->dev, "could not reset RAM\n");
208 static int pt1_do_enable_ram(struct pt1 *pt1)
212 status = pt1_read_reg(pt1, 0) & 0x00000004;
213 pt1_write_reg(pt1, 0, 0x00000002);
214 for (i = 0; i < 10; i++) {
215 for (j = 0; j < 1024; j++) {
216 if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
219 schedule_timeout_uninterruptible((HZ + 999) / 1000);
221 dev_err(&pt1->pdev->dev, "could not enable RAM\n");
225 static int pt1_enable_ram(struct pt1 *pt1)
229 schedule_timeout_uninterruptible((HZ + 999) / 1000);
230 phase = pt1->pdev->device == 0x211a ? 128 : 166;
231 for (i = 0; i < phase; i++) {
232 ret = pt1_do_enable_ram(pt1);
239 static void pt1_disable_ram(struct pt1 *pt1)
241 pt1_write_reg(pt1, 0, 0x0b0b0000);
244 static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
246 pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
249 static void pt1_init_streams(struct pt1 *pt1)
252 for (i = 0; i < PT1_NR_ADAPS; i++)
253 pt1_set_stream(pt1, i, 0);
256 static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
261 struct pt1_adapter *adap;
266 if (!page->upackets[PT1_NR_UPACKETS - 1])
269 for (i = 0; i < PT1_NR_UPACKETS; i++) {
270 upacket = le32_to_cpu(page->upackets[i]);
271 index = (upacket >> 29) - 1;
272 if (index < 0 || index >= PT1_NR_ADAPS)
275 adap = pt1->adaps[index];
276 if (upacket >> 25 & 1)
277 adap->upacket_count = 0;
278 else if (!adap->upacket_count)
281 if (upacket >> 24 & 1)
282 printk_ratelimited(KERN_INFO "earth-pt1: device buffer overflowing. table[%d] buf[%d]\n",
283 pt1->table_index, pt1->buf_index);
284 sc = upacket >> 26 & 0x7;
285 if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
286 printk_ratelimited(KERN_INFO "earth-pt1: data loss in streamID(adapter)[%d]\n",
291 offset = adap->packet_count * 188 + adap->upacket_count * 3;
292 buf[offset] = upacket >> 16;
293 buf[offset + 1] = upacket >> 8;
294 if (adap->upacket_count != 62)
295 buf[offset + 2] = upacket;
297 if (++adap->upacket_count >= 63) {
298 adap->upacket_count = 0;
299 if (++adap->packet_count >= 21) {
300 dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
301 adap->packet_count = 0;
306 page->upackets[PT1_NR_UPACKETS - 1] = 0;
310 static int pt1_thread(void *data)
313 struct pt1_buffer_page *page;
318 while (!kthread_should_stop()) {
321 page = pt1->tables[pt1->table_index].bufs[pt1->buf_index].page;
322 if (!pt1_filter(pt1, page)) {
323 schedule_timeout_interruptible((HZ + 999) / 1000);
327 if (++pt1->buf_index >= PT1_NR_BUFS) {
328 pt1_increment_table_count(pt1);
330 if (++pt1->table_index >= pt1_nr_tables)
331 pt1->table_index = 0;
338 static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
340 dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
343 static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
348 page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
353 BUG_ON(addr & (PT1_PAGE_SIZE - 1));
354 BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
357 *pfnp = addr >> PT1_PAGE_SHIFT;
361 static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
363 pt1_free_page(pt1, buf->page, buf->addr);
367 pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf, u32 *pfnp)
369 struct pt1_buffer_page *page;
372 page = pt1_alloc_page(pt1, &addr, pfnp);
376 page->upackets[PT1_NR_UPACKETS - 1] = 0;
383 static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
387 for (i = 0; i < PT1_NR_BUFS; i++)
388 pt1_cleanup_buffer(pt1, &table->bufs[i]);
390 pt1_free_page(pt1, table->page, table->addr);
394 pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
396 struct pt1_table_page *page;
401 page = pt1_alloc_page(pt1, &addr, pfnp);
405 for (i = 0; i < PT1_NR_BUFS; i++) {
406 ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
410 page->buf_pfns[i] = cpu_to_le32(buf_pfn);
413 pt1_increment_table_count(pt1);
420 pt1_cleanup_buffer(pt1, &table->bufs[i]);
422 pt1_free_page(pt1, page, addr);
426 static void pt1_cleanup_tables(struct pt1 *pt1)
428 struct pt1_table *tables;
431 tables = pt1->tables;
432 pt1_unregister_tables(pt1);
434 for (i = 0; i < pt1_nr_tables; i++)
435 pt1_cleanup_table(pt1, &tables[i]);
440 static int pt1_init_tables(struct pt1 *pt1)
442 struct pt1_table *tables;
446 tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
450 pt1_init_table_count(pt1);
454 ret = pt1_init_table(pt1, &tables[0], &first_pfn);
460 while (i < pt1_nr_tables) {
461 ret = pt1_init_table(pt1, &tables[i], &pfn);
464 tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
468 tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
470 pt1_register_tables(pt1, first_pfn);
471 pt1->tables = tables;
476 pt1_cleanup_table(pt1, &tables[i]);
482 static int pt1_start_polling(struct pt1 *pt1)
486 mutex_lock(&pt1->lock);
488 pt1->kthread = kthread_run(pt1_thread, pt1, "earth-pt1");
489 if (IS_ERR(pt1->kthread)) {
490 ret = PTR_ERR(pt1->kthread);
494 mutex_unlock(&pt1->lock);
498 static int pt1_start_feed(struct dvb_demux_feed *feed)
500 struct pt1_adapter *adap;
501 adap = container_of(feed->demux, struct pt1_adapter, demux);
502 if (!adap->users++) {
505 ret = pt1_start_polling(adap->pt1);
508 pt1_set_stream(adap->pt1, adap->index, 1);
513 static void pt1_stop_polling(struct pt1 *pt1)
517 mutex_lock(&pt1->lock);
518 for (i = 0, count = 0; i < PT1_NR_ADAPS; i++)
519 count += pt1->adaps[i]->users;
521 if (count == 0 && pt1->kthread) {
522 kthread_stop(pt1->kthread);
525 mutex_unlock(&pt1->lock);
528 static int pt1_stop_feed(struct dvb_demux_feed *feed)
530 struct pt1_adapter *adap;
531 adap = container_of(feed->demux, struct pt1_adapter, demux);
532 if (!--adap->users) {
533 pt1_set_stream(adap->pt1, adap->index, 0);
534 pt1_stop_polling(adap->pt1);
540 pt1_update_power(struct pt1 *pt1)
544 struct pt1_adapter *adap;
545 static const int sleep_bits[] = {
552 bits = pt1->power | !pt1->reset << 3;
553 mutex_lock(&pt1->lock);
554 for (i = 0; i < PT1_NR_ADAPS; i++) {
555 adap = pt1->adaps[i];
556 switch (adap->voltage) {
557 case SEC_VOLTAGE_13: /* actually 11V */
560 case SEC_VOLTAGE_18: /* actually 15V */
561 bits |= 1 << 1 | 1 << 2;
567 /* XXX: The bits should be changed depending on adap->sleep. */
568 bits |= sleep_bits[i];
570 pt1_write_reg(pt1, 1, bits);
571 mutex_unlock(&pt1->lock);
574 static int pt1_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage)
576 struct pt1_adapter *adap;
578 adap = container_of(fe->dvb, struct pt1_adapter, adap);
579 adap->voltage = voltage;
580 pt1_update_power(adap->pt1);
582 if (adap->orig_set_voltage)
583 return adap->orig_set_voltage(fe, voltage);
588 static int pt1_sleep(struct dvb_frontend *fe)
590 struct pt1_adapter *adap;
592 adap = container_of(fe->dvb, struct pt1_adapter, adap);
594 pt1_update_power(adap->pt1);
596 if (adap->orig_sleep)
597 return adap->orig_sleep(fe);
602 static int pt1_wakeup(struct dvb_frontend *fe)
604 struct pt1_adapter *adap;
606 adap = container_of(fe->dvb, struct pt1_adapter, adap);
608 pt1_update_power(adap->pt1);
609 schedule_timeout_uninterruptible((HZ + 999) / 1000);
612 return adap->orig_init(fe);
617 static void pt1_free_adapter(struct pt1_adapter *adap)
619 adap->demux.dmx.close(&adap->demux.dmx);
620 dvb_dmxdev_release(&adap->dmxdev);
621 dvb_dmx_release(&adap->demux);
622 dvb_unregister_adapter(&adap->adap);
623 free_page((unsigned long)adap->buf);
627 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
629 static struct pt1_adapter *
630 pt1_alloc_adapter(struct pt1 *pt1)
632 struct pt1_adapter *adap;
634 struct dvb_adapter *dvb_adap;
635 struct dvb_demux *demux;
636 struct dmxdev *dmxdev;
639 adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
647 adap->voltage = SEC_VOLTAGE_OFF;
650 buf = (u8 *)__get_free_page(GFP_KERNEL);
657 adap->upacket_count = 0;
658 adap->packet_count = 0;
661 dvb_adap = &adap->adap;
662 dvb_adap->priv = adap;
663 ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
664 &pt1->pdev->dev, adapter_nr);
668 demux = &adap->demux;
669 demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
671 demux->feednum = 256;
672 demux->filternum = 256;
673 demux->start_feed = pt1_start_feed;
674 demux->stop_feed = pt1_stop_feed;
675 demux->write_to_decoder = NULL;
676 ret = dvb_dmx_init(demux);
678 goto err_unregister_adapter;
680 dmxdev = &adap->dmxdev;
681 dmxdev->filternum = 256;
682 dmxdev->demux = &demux->dmx;
683 dmxdev->capabilities = 0;
684 ret = dvb_dmxdev_init(dmxdev, dvb_adap);
686 goto err_dmx_release;
691 dvb_dmx_release(demux);
692 err_unregister_adapter:
693 dvb_unregister_adapter(dvb_adap);
695 free_page((unsigned long)buf);
702 static void pt1_cleanup_adapters(struct pt1 *pt1)
705 for (i = 0; i < PT1_NR_ADAPS; i++)
706 pt1_free_adapter(pt1->adaps[i]);
709 static int pt1_init_adapters(struct pt1 *pt1)
712 struct pt1_adapter *adap;
715 for (i = 0; i < PT1_NR_ADAPS; i++) {
716 adap = pt1_alloc_adapter(pt1);
723 pt1->adaps[i] = adap;
729 pt1_free_adapter(pt1->adaps[i]);
734 static void pt1_cleanup_frontend(struct pt1_adapter *adap)
736 dvb_unregister_frontend(adap->fe);
739 static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
743 adap->orig_set_voltage = fe->ops.set_voltage;
744 adap->orig_sleep = fe->ops.sleep;
745 adap->orig_init = fe->ops.init;
746 fe->ops.set_voltage = pt1_set_voltage;
747 fe->ops.sleep = pt1_sleep;
748 fe->ops.init = pt1_wakeup;
750 ret = dvb_register_frontend(&adap->adap, fe);
758 static void pt1_cleanup_frontends(struct pt1 *pt1)
761 for (i = 0; i < PT1_NR_ADAPS; i++)
762 pt1_cleanup_frontend(pt1->adaps[i]);
766 struct va1j5jf8007s_config va1j5jf8007s_config;
767 struct va1j5jf8007t_config va1j5jf8007t_config;
770 static const struct pt1_config pt1_configs[2] = {
773 .demod_address = 0x1b,
774 .frequency = VA1J5JF8007S_20MHZ,
777 .demod_address = 0x1a,
778 .frequency = VA1J5JF8007T_20MHZ,
782 .demod_address = 0x19,
783 .frequency = VA1J5JF8007S_20MHZ,
786 .demod_address = 0x18,
787 .frequency = VA1J5JF8007T_20MHZ,
792 static const struct pt1_config pt2_configs[2] = {
795 .demod_address = 0x1b,
796 .frequency = VA1J5JF8007S_25MHZ,
799 .demod_address = 0x1a,
800 .frequency = VA1J5JF8007T_25MHZ,
804 .demod_address = 0x19,
805 .frequency = VA1J5JF8007S_25MHZ,
808 .demod_address = 0x18,
809 .frequency = VA1J5JF8007T_25MHZ,
814 static int pt1_init_frontends(struct pt1 *pt1)
817 struct i2c_adapter *i2c_adap;
818 const struct pt1_config *configs, *config;
819 struct dvb_frontend *fe[4];
825 i2c_adap = &pt1->i2c_adap;
826 configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
828 config = &configs[i / 2];
830 fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
833 ret = -ENODEV; /* This does not sound nice... */
838 fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
846 ret = va1j5jf8007s_prepare(fe[i - 2]);
850 ret = va1j5jf8007t_prepare(fe[i - 1]);
857 ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
866 fe[i]->ops.release(fe[i]);
869 dvb_unregister_frontend(fe[j]);
874 static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
875 int clock, int data, int next_addr)
877 pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
878 !clock << 11 | !data << 10 | next_addr);
881 static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
883 pt1_i2c_emit(pt1, addr, 1, 0, 0, data, addr + 1);
884 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
885 pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
889 static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
891 pt1_i2c_emit(pt1, addr, 1, 0, 0, 1, addr + 1);
892 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
893 pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
894 pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
898 static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
901 for (i = 0; i < 8; i++)
902 pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
903 pt1_i2c_write_bit(pt1, addr, &addr, 1);
907 static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
910 for (i = 0; i < 8; i++)
911 pt1_i2c_read_bit(pt1, addr, &addr);
912 pt1_i2c_write_bit(pt1, addr, &addr, last);
916 static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
918 pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
919 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
920 pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
925 pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
928 pt1_i2c_prepare(pt1, addr, &addr);
929 pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
930 for (i = 0; i < msg->len; i++)
931 pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
936 pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
939 pt1_i2c_prepare(pt1, addr, &addr);
940 pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
941 for (i = 0; i < msg->len; i++)
942 pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
946 static int pt1_i2c_end(struct pt1 *pt1, int addr)
948 pt1_i2c_emit(pt1, addr, 1, 0, 0, 0, addr + 1);
949 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
950 pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
952 pt1_write_reg(pt1, 0, 0x00000004);
954 if (signal_pending(current))
956 schedule_timeout_interruptible((HZ + 999) / 1000);
957 } while (pt1_read_reg(pt1, 0) & 0x00000080);
961 static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
966 pt1_i2c_emit(pt1, addr, 0, 0, 1, 1, addr /* itself */);
969 if (!pt1->i2c_running) {
970 pt1_i2c_emit(pt1, addr, 1, 0, 1, 1, addr + 1);
971 pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
973 pt1->i2c_running = 1;
978 static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
982 struct i2c_msg *msg, *next_msg;
987 pt1 = i2c_get_adapdata(adap);
989 for (i = 0; i < num; i++) {
991 if (msg->flags & I2C_M_RD)
995 next_msg = &msgs[i + 1];
999 if (next_msg && next_msg->flags & I2C_M_RD) {
1002 len = next_msg->len;
1006 pt1_i2c_begin(pt1, &addr);
1007 pt1_i2c_write_msg(pt1, addr, &addr, msg);
1008 pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
1009 ret = pt1_i2c_end(pt1, addr);
1013 word = pt1_read_reg(pt1, 2);
1015 next_msg->buf[len] = word;
1019 pt1_i2c_begin(pt1, &addr);
1020 pt1_i2c_write_msg(pt1, addr, &addr, msg);
1021 ret = pt1_i2c_end(pt1, addr);
1030 static u32 pt1_i2c_func(struct i2c_adapter *adap)
1032 return I2C_FUNC_I2C;
1035 static const struct i2c_algorithm pt1_i2c_algo = {
1036 .master_xfer = pt1_i2c_xfer,
1037 .functionality = pt1_i2c_func,
1040 static void pt1_i2c_wait(struct pt1 *pt1)
1043 for (i = 0; i < 128; i++)
1044 pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
1047 static void pt1_i2c_init(struct pt1 *pt1)
1050 for (i = 0; i < 1024; i++)
1051 pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
1054 static void pt1_remove(struct pci_dev *pdev)
1059 pt1 = pci_get_drvdata(pdev);
1063 kthread_stop(pt1->kthread);
1064 pt1_cleanup_tables(pt1);
1065 pt1_cleanup_frontends(pt1);
1066 pt1_disable_ram(pt1);
1069 pt1_update_power(pt1);
1070 pt1_cleanup_adapters(pt1);
1071 i2c_del_adapter(&pt1->i2c_adap);
1073 pci_iounmap(pdev, regs);
1074 pci_release_regions(pdev);
1075 pci_disable_device(pdev);
1078 static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1083 struct i2c_adapter *i2c_adap;
1085 ret = pci_enable_device(pdev);
1089 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1091 goto err_pci_disable_device;
1093 pci_set_master(pdev);
1095 ret = pci_request_regions(pdev, DRIVER_NAME);
1097 goto err_pci_disable_device;
1099 regs = pci_iomap(pdev, 0, 0);
1102 goto err_pci_release_regions;
1105 pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
1108 goto err_pci_iounmap;
1111 mutex_init(&pt1->lock);
1114 pci_set_drvdata(pdev, pt1);
1116 ret = pt1_init_adapters(pt1);
1120 mutex_init(&pt1->lock);
1124 pt1_update_power(pt1);
1126 i2c_adap = &pt1->i2c_adap;
1127 i2c_adap->algo = &pt1_i2c_algo;
1128 i2c_adap->algo_data = NULL;
1129 i2c_adap->dev.parent = &pdev->dev;
1130 strcpy(i2c_adap->name, DRIVER_NAME);
1131 i2c_set_adapdata(i2c_adap, pt1);
1132 ret = i2c_add_adapter(i2c_adap);
1134 goto err_pt1_cleanup_adapters;
1139 ret = pt1_sync(pt1);
1141 goto err_i2c_del_adapter;
1145 ret = pt1_unlock(pt1);
1147 goto err_i2c_del_adapter;
1149 ret = pt1_reset_pci(pt1);
1151 goto err_i2c_del_adapter;
1153 ret = pt1_reset_ram(pt1);
1155 goto err_i2c_del_adapter;
1157 ret = pt1_enable_ram(pt1);
1159 goto err_i2c_del_adapter;
1161 pt1_init_streams(pt1);
1164 pt1_update_power(pt1);
1165 schedule_timeout_uninterruptible((HZ + 49) / 50);
1168 pt1_update_power(pt1);
1169 schedule_timeout_uninterruptible((HZ + 999) / 1000);
1171 ret = pt1_init_frontends(pt1);
1173 goto err_pt1_disable_ram;
1175 ret = pt1_init_tables(pt1);
1177 goto err_pt1_cleanup_frontends;
1181 err_pt1_cleanup_frontends:
1182 pt1_cleanup_frontends(pt1);
1183 err_pt1_disable_ram:
1184 pt1_disable_ram(pt1);
1187 pt1_update_power(pt1);
1188 err_i2c_del_adapter:
1189 i2c_del_adapter(i2c_adap);
1190 err_pt1_cleanup_adapters:
1191 pt1_cleanup_adapters(pt1);
1195 pci_iounmap(pdev, regs);
1196 err_pci_release_regions:
1197 pci_release_regions(pdev);
1198 err_pci_disable_device:
1199 pci_disable_device(pdev);
1205 static const struct pci_device_id pt1_id_table[] = {
1206 { PCI_DEVICE(0x10ee, 0x211a) },
1207 { PCI_DEVICE(0x10ee, 0x222a) },
1210 MODULE_DEVICE_TABLE(pci, pt1_id_table);
1212 static struct pci_driver pt1_driver = {
1213 .name = DRIVER_NAME,
1215 .remove = pt1_remove,
1216 .id_table = pt1_id_table,
1219 module_pci_driver(pt1_driver);
1221 MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
1222 MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
1223 MODULE_LICENSE("GPL");