2 * Driver for the Conexant CX23885 PCIe bridge
4 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kmod.h>
23 #include <linux/kernel.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/div64.h>
28 #include <linux/firmware.h>
32 #include "altera-ci.h"
33 #include "cx23888-ir.h"
34 #include "cx23885-ir.h"
35 #include "cx23885-av.h"
36 #include "cx23885-input.h"
38 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(CX23885_VERSION);
43 static unsigned int debug;
44 module_param(debug, int, 0644);
45 MODULE_PARM_DESC(debug, "enable debug messages");
47 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48 module_param_array(card, int, NULL, 0444);
49 MODULE_PARM_DESC(card, "card type");
51 #define dprintk(level, fmt, arg...)\
52 do { if (debug >= level)\
53 printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
56 static unsigned int cx23885_devcount;
58 #define NO_SYNC_LINE (-1U)
60 /* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
63 * 1 line = 16 bytes of CDT
65 * cdt size = 16 * linesize
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 * 15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
80 static struct sram_channel cx23885_sram_channels[] = {
83 .cmds_start = 0x10000,
84 .ctrl_start = 0x10380,
88 .ptr1_reg = DMA1_PTR1,
89 .ptr2_reg = DMA1_PTR2,
90 .cnt1_reg = DMA1_CNT1,
91 .cnt2_reg = DMA1_CNT2,
100 .ptr1_reg = DMA2_PTR1,
101 .ptr2_reg = DMA2_PTR2,
102 .cnt1_reg = DMA2_CNT1,
103 .cnt2_reg = DMA2_CNT2,
107 .cmds_start = 0x100A0,
108 .ctrl_start = 0x10400,
110 .fifo_start = 0x5000,
112 .ptr1_reg = DMA3_PTR1,
113 .ptr2_reg = DMA3_PTR2,
114 .cnt1_reg = DMA3_CNT1,
115 .cnt2_reg = DMA3_CNT2,
124 .ptr1_reg = DMA4_PTR1,
125 .ptr2_reg = DMA4_PTR2,
126 .cnt1_reg = DMA4_CNT1,
127 .cnt2_reg = DMA4_CNT2,
136 .ptr1_reg = DMA5_PTR1,
137 .ptr2_reg = DMA5_PTR2,
138 .cnt1_reg = DMA5_CNT1,
139 .cnt2_reg = DMA5_CNT2,
143 .cmds_start = 0x10140,
144 .ctrl_start = 0x10440,
146 .fifo_start = 0x6000,
148 .ptr1_reg = DMA5_PTR1,
149 .ptr2_reg = DMA5_PTR2,
150 .cnt1_reg = DMA5_CNT1,
151 .cnt2_reg = DMA5_CNT2,
155 .cmds_start = 0x10190,
156 .ctrl_start = 0x10480,
158 .fifo_start = 0x7000,
160 .ptr1_reg = DMA6_PTR1,
161 .ptr2_reg = DMA6_PTR2,
162 .cnt1_reg = DMA6_CNT1,
163 .cnt2_reg = DMA6_CNT2,
172 .ptr1_reg = DMA7_PTR1,
173 .ptr2_reg = DMA7_PTR2,
174 .cnt1_reg = DMA7_CNT1,
175 .cnt2_reg = DMA7_CNT2,
184 .ptr1_reg = DMA8_PTR1,
185 .ptr2_reg = DMA8_PTR2,
186 .cnt1_reg = DMA8_CNT1,
187 .cnt2_reg = DMA8_CNT2,
191 static struct sram_channel cx23887_sram_channels[] = {
194 .cmds_start = 0x10000,
195 .ctrl_start = 0x105b0,
199 .ptr1_reg = DMA1_PTR1,
200 .ptr2_reg = DMA1_PTR2,
201 .cnt1_reg = DMA1_CNT1,
202 .cnt2_reg = DMA1_CNT2,
205 .name = "VID A (VBI)",
206 .cmds_start = 0x10050,
207 .ctrl_start = 0x105F0,
209 .fifo_start = 0x3000,
211 .ptr1_reg = DMA2_PTR1,
212 .ptr2_reg = DMA2_PTR2,
213 .cnt1_reg = DMA2_CNT1,
214 .cnt2_reg = DMA2_CNT2,
218 .cmds_start = 0x100A0,
219 .ctrl_start = 0x10630,
221 .fifo_start = 0x5000,
223 .ptr1_reg = DMA3_PTR1,
224 .ptr2_reg = DMA3_PTR2,
225 .cnt1_reg = DMA3_CNT1,
226 .cnt2_reg = DMA3_CNT2,
235 .ptr1_reg = DMA4_PTR1,
236 .ptr2_reg = DMA4_PTR2,
237 .cnt1_reg = DMA4_CNT1,
238 .cnt2_reg = DMA4_CNT2,
247 .ptr1_reg = DMA5_PTR1,
248 .ptr2_reg = DMA5_PTR2,
249 .cnt1_reg = DMA5_CNT1,
250 .cnt2_reg = DMA5_CNT2,
254 .cmds_start = 0x10140,
255 .ctrl_start = 0x10670,
257 .fifo_start = 0x6000,
259 .ptr1_reg = DMA5_PTR1,
260 .ptr2_reg = DMA5_PTR2,
261 .cnt1_reg = DMA5_CNT1,
262 .cnt2_reg = DMA5_CNT2,
266 .cmds_start = 0x10190,
267 .ctrl_start = 0x106B0,
269 .fifo_start = 0x7000,
271 .ptr1_reg = DMA6_PTR1,
272 .ptr2_reg = DMA6_PTR2,
273 .cnt1_reg = DMA6_CNT1,
274 .cnt2_reg = DMA6_CNT2,
283 .ptr1_reg = DMA7_PTR1,
284 .ptr2_reg = DMA7_PTR2,
285 .cnt1_reg = DMA7_CNT1,
286 .cnt2_reg = DMA7_CNT2,
295 .ptr1_reg = DMA8_PTR1,
296 .ptr2_reg = DMA8_PTR2,
297 .cnt1_reg = DMA8_CNT1,
298 .cnt2_reg = DMA8_CNT2,
302 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
305 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
307 dev->pci_irqmask |= mask;
309 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
312 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
315 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
317 dev->pci_irqmask |= mask;
318 cx_set(PCI_INT_MSK, mask);
320 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
323 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
327 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
329 v = mask & dev->pci_irqmask;
331 cx_set(PCI_INT_MSK, v);
333 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
336 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
338 cx23885_irq_enable(dev, 0xffffffff);
341 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
344 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
346 cx_clear(PCI_INT_MSK, mask);
348 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
351 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
353 cx23885_irq_disable(dev, 0xffffffff);
356 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
359 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
361 dev->pci_irqmask &= ~mask;
362 cx_clear(PCI_INT_MSK, mask);
364 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
367 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
371 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
373 v = cx_read(PCI_INT_MSK);
375 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
379 static int cx23885_risc_decode(u32 risc)
381 static char *instr[16] = {
382 [RISC_SYNC >> 28] = "sync",
383 [RISC_WRITE >> 28] = "write",
384 [RISC_WRITEC >> 28] = "writec",
385 [RISC_READ >> 28] = "read",
386 [RISC_READC >> 28] = "readc",
387 [RISC_JUMP >> 28] = "jump",
388 [RISC_SKIP >> 28] = "skip",
389 [RISC_WRITERM >> 28] = "writerm",
390 [RISC_WRITECM >> 28] = "writecm",
391 [RISC_WRITECR >> 28] = "writecr",
393 static int incr[16] = {
394 [RISC_WRITE >> 28] = 3,
395 [RISC_JUMP >> 28] = 3,
396 [RISC_SKIP >> 28] = 1,
397 [RISC_SYNC >> 28] = 1,
398 [RISC_WRITERM >> 28] = 3,
399 [RISC_WRITECM >> 28] = 3,
400 [RISC_WRITECR >> 28] = 4,
402 static char *bits[] = {
403 "12", "13", "14", "resync",
404 "cnt0", "cnt1", "18", "19",
405 "20", "21", "22", "23",
406 "irq1", "irq2", "eol", "sol",
410 printk("0x%08x [ %s", risc,
411 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
412 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
413 if (risc & (1 << (i + 12)))
414 printk(" %s", bits[i]);
415 printk(" count=%d ]\n", risc & 0xfff);
416 return incr[risc >> 28] ? incr[risc >> 28] : 1;
419 static void cx23885_wakeup(struct cx23885_tsport *port,
420 struct cx23885_dmaqueue *q, u32 count)
422 struct cx23885_dev *dev = port->dev;
423 struct cx23885_buffer *buf;
425 if (list_empty(&q->active))
427 buf = list_entry(q->active.next,
428 struct cx23885_buffer, queue);
430 buf->vb.vb2_buf.timestamp = ktime_get_ns();
431 buf->vb.sequence = q->count++;
432 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
433 buf->vb.vb2_buf.index,
435 list_del(&buf->queue);
436 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
439 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
440 struct sram_channel *ch,
441 unsigned int bpl, u32 risc)
443 unsigned int i, lines;
446 if (ch->cmds_start == 0) {
447 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
449 cx_write(ch->ptr1_reg, 0);
450 cx_write(ch->ptr2_reg, 0);
451 cx_write(ch->cnt2_reg, 0);
452 cx_write(ch->cnt1_reg, 0);
455 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
459 bpl = (bpl + 7) & ~7; /* alignment */
461 lines = ch->fifo_size / bpl;
466 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
471 for (i = 0; i < lines; i++) {
472 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
473 ch->fifo_start + bpl*i);
474 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
475 cx_write(cdt + 16*i + 4, 0);
476 cx_write(cdt + 16*i + 8, 0);
477 cx_write(cdt + 16*i + 12, 0);
482 cx_write(ch->cmds_start + 0, 8);
484 cx_write(ch->cmds_start + 0, risc);
485 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
486 cx_write(ch->cmds_start + 8, cdt);
487 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
488 cx_write(ch->cmds_start + 16, ch->ctrl_start);
490 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
492 cx_write(ch->cmds_start + 20, 64 >> 2);
493 for (i = 24; i < 80; i += 4)
494 cx_write(ch->cmds_start + i, 0);
497 cx_write(ch->ptr1_reg, ch->fifo_start);
498 cx_write(ch->ptr2_reg, cdt);
499 cx_write(ch->cnt2_reg, (lines*16) >> 3);
500 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
502 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
511 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
512 struct sram_channel *ch)
514 static char *name[] = {
531 unsigned int i, j, n;
533 printk(KERN_WARNING "%s: %s - dma channel status dump\n",
534 dev->name, ch->name);
535 for (i = 0; i < ARRAY_SIZE(name); i++)
536 printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
538 cx_read(ch->cmds_start + 4*i));
540 for (i = 0; i < 4; i++) {
541 risc = cx_read(ch->cmds_start + 4 * (i + 14));
542 printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
543 cx23885_risc_decode(risc);
545 for (i = 0; i < (64 >> 2); i += n) {
546 risc = cx_read(ch->ctrl_start + 4 * i);
547 /* No consideration for bits 63-32 */
549 printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
550 ch->ctrl_start + 4 * i, i);
551 n = cx23885_risc_decode(risc);
552 for (j = 1; j < n; j++) {
553 risc = cx_read(ch->ctrl_start + 4 * (i + j));
554 printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
555 dev->name, i+j, risc, j);
559 printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
560 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
561 printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
562 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
563 printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
564 dev->name, cx_read(ch->ptr1_reg));
565 printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
566 dev->name, cx_read(ch->ptr2_reg));
567 printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
568 dev->name, cx_read(ch->cnt1_reg));
569 printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
570 dev->name, cx_read(ch->cnt2_reg));
573 static void cx23885_risc_disasm(struct cx23885_tsport *port,
574 struct cx23885_riscmem *risc)
576 struct cx23885_dev *dev = port->dev;
577 unsigned int i, j, n;
579 printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
580 dev->name, risc->cpu, (unsigned long)risc->dma);
581 for (i = 0; i < (risc->size >> 2); i += n) {
582 printk(KERN_INFO "%s: %04d: ", dev->name, i);
583 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
584 for (j = 1; j < n; j++)
585 printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
586 dev->name, i + j, risc->cpu[i + j], j);
587 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
592 static void cx23885_shutdown(struct cx23885_dev *dev)
594 /* disable RISC controller */
595 cx_write(DEV_CNTRL2, 0);
597 /* Disable all IR activity */
598 cx_write(IR_CNTRL_REG, 0);
600 /* Disable Video A/B activity */
601 cx_write(VID_A_DMA_CTL, 0);
602 cx_write(VID_B_DMA_CTL, 0);
603 cx_write(VID_C_DMA_CTL, 0);
605 /* Disable Audio activity */
606 cx_write(AUD_INT_DMA_CTL, 0);
607 cx_write(AUD_EXT_DMA_CTL, 0);
609 /* Disable Serial port */
610 cx_write(UART_CTL, 0);
612 /* Disable Interrupts */
613 cx23885_irq_disable_all(dev);
614 cx_write(VID_A_INT_MSK, 0);
615 cx_write(VID_B_INT_MSK, 0);
616 cx_write(VID_C_INT_MSK, 0);
617 cx_write(AUDIO_INT_INT_MSK, 0);
618 cx_write(AUDIO_EXT_INT_MSK, 0);
622 static void cx23885_reset(struct cx23885_dev *dev)
624 dprintk(1, "%s()\n", __func__);
626 cx23885_shutdown(dev);
628 cx_write(PCI_INT_STAT, 0xffffffff);
629 cx_write(VID_A_INT_STAT, 0xffffffff);
630 cx_write(VID_B_INT_STAT, 0xffffffff);
631 cx_write(VID_C_INT_STAT, 0xffffffff);
632 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
633 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
634 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
635 cx_write(PAD_CTRL, 0x00500300);
639 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
641 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
642 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
644 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
645 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
646 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
648 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
649 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
650 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
652 cx23885_gpio_setup(dev);
656 static int cx23885_pci_quirks(struct cx23885_dev *dev)
658 dprintk(1, "%s()\n", __func__);
660 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
661 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
662 * occur on the cx23887 bridge.
664 if (dev->bridge == CX23885_BRIDGE_885)
665 cx_clear(RDR_TLCTL0, 1 << 4);
670 static int get_resources(struct cx23885_dev *dev)
672 if (request_mem_region(pci_resource_start(dev->pci, 0),
673 pci_resource_len(dev->pci, 0),
677 printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
678 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
683 static int cx23885_init_tsport(struct cx23885_dev *dev,
684 struct cx23885_tsport *port, int portno)
686 dprintk(1, "%s(portno=%d)\n", __func__, portno);
688 /* Transport bus init dma queue - Common settings */
689 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
690 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
691 port->vld_misc_val = 0x0;
692 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
694 spin_lock_init(&port->slock);
698 INIT_LIST_HEAD(&port->mpegq.active);
699 mutex_init(&port->frontends.lock);
700 INIT_LIST_HEAD(&port->frontends.felist);
701 port->frontends.active_fe_id = 0;
703 /* This should be hardcoded allow a single frontend
704 * attachment to this tsport, keeping the -dvb.c
705 * code clean and safe.
707 if (!port->num_frontends)
708 port->num_frontends = 1;
712 port->reg_gpcnt = VID_B_GPCNT;
713 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
714 port->reg_dma_ctl = VID_B_DMA_CTL;
715 port->reg_lngth = VID_B_LNGTH;
716 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
717 port->reg_gen_ctrl = VID_B_GEN_CTL;
718 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
719 port->reg_sop_status = VID_B_SOP_STATUS;
720 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
721 port->reg_vld_misc = VID_B_VLD_MISC;
722 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
723 port->reg_src_sel = VID_B_SRC_SEL;
724 port->reg_ts_int_msk = VID_B_INT_MSK;
725 port->reg_ts_int_stat = VID_B_INT_STAT;
726 port->sram_chno = SRAM_CH03; /* VID_B */
727 port->pci_irqmask = 0x02; /* VID_B bit1 */
730 port->reg_gpcnt = VID_C_GPCNT;
731 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
732 port->reg_dma_ctl = VID_C_DMA_CTL;
733 port->reg_lngth = VID_C_LNGTH;
734 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
735 port->reg_gen_ctrl = VID_C_GEN_CTL;
736 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
737 port->reg_sop_status = VID_C_SOP_STATUS;
738 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
739 port->reg_vld_misc = VID_C_VLD_MISC;
740 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
741 port->reg_src_sel = 0;
742 port->reg_ts_int_msk = VID_C_INT_MSK;
743 port->reg_ts_int_stat = VID_C_INT_STAT;
744 port->sram_chno = SRAM_CH06; /* VID_C */
745 port->pci_irqmask = 0x04; /* VID_C bit2 */
754 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
756 switch (cx_read(RDR_CFG2) & 0xff) {
759 dev->hwrevision = 0xa0;
763 dev->hwrevision = 0xa1;
766 /* CX23885-13Z/14Z */
767 dev->hwrevision = 0xb0;
770 if (dev->pci->device == 0x8880) {
771 /* CX23888-21Z/22Z */
772 dev->hwrevision = 0xc0;
775 dev->hwrevision = 0xa4;
779 if (dev->pci->device == 0x8880) {
781 dev->hwrevision = 0xd0;
783 /* CX23885-15Z, CX23888-31Z */
784 dev->hwrevision = 0xa5;
789 dev->hwrevision = 0xc0;
793 dev->hwrevision = 0xb1;
796 printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
797 __func__, dev->hwrevision);
800 printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
801 __func__, dev->hwrevision);
803 printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
804 __func__, dev->hwrevision);
807 /* Find the first v4l2_subdev member of the group id in hw */
808 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
810 struct v4l2_subdev *result = NULL;
811 struct v4l2_subdev *sd;
813 spin_lock(&dev->v4l2_dev.lock);
814 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
815 if (sd->grp_id == hw) {
820 spin_unlock(&dev->v4l2_dev.lock);
824 static int cx23885_dev_setup(struct cx23885_dev *dev)
828 spin_lock_init(&dev->pci_irqmask_lock);
829 spin_lock_init(&dev->slock);
831 mutex_init(&dev->lock);
832 mutex_init(&dev->gpio_lock);
834 atomic_inc(&dev->refcount);
836 dev->nr = cx23885_devcount++;
837 sprintf(dev->name, "cx23885[%d]", dev->nr);
839 /* Configure the internal memory */
840 if (dev->pci->device == 0x8880) {
841 /* Could be 887 or 888, assume a default */
842 dev->bridge = CX23885_BRIDGE_887;
843 /* Apply a sensible clock frequency for the PCIe bridge */
844 dev->clk_freq = 25000000;
845 dev->sram_channels = cx23887_sram_channels;
847 if (dev->pci->device == 0x8852) {
848 dev->bridge = CX23885_BRIDGE_885;
849 /* Apply a sensible clock frequency for the PCIe bridge */
850 dev->clk_freq = 28000000;
851 dev->sram_channels = cx23885_sram_channels;
855 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
856 __func__, dev->bridge);
860 if (card[dev->nr] < cx23885_bcount)
861 dev->board = card[dev->nr];
862 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
863 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
864 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
865 dev->board = cx23885_subids[i].card;
866 if (UNSET == dev->board) {
867 dev->board = CX23885_BOARD_UNKNOWN;
868 cx23885_card_list(dev);
871 /* If the user specific a clk freq override, apply it */
872 if (cx23885_boards[dev->board].clk_freq > 0)
873 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
875 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
876 dev->pci->subsystem_device == 0x7137) {
877 /* Hauppauge ImpactVCBe device ID 0x7137 is populated
878 * with an 888, and a 25Mhz crystal, instead of the
879 * usual third overtone 50Mhz. The default clock rate must
880 * be overridden so the cx25840 is properly configured
882 dev->clk_freq = 25000000;
885 dev->pci_bus = dev->pci->bus->number;
886 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
887 cx23885_irq_add(dev, 0x001f00);
889 /* External Master 1 Bus */
890 dev->i2c_bus[0].nr = 0;
891 dev->i2c_bus[0].dev = dev;
892 dev->i2c_bus[0].reg_stat = I2C1_STAT;
893 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
894 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
895 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
896 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
897 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
899 /* External Master 2 Bus */
900 dev->i2c_bus[1].nr = 1;
901 dev->i2c_bus[1].dev = dev;
902 dev->i2c_bus[1].reg_stat = I2C2_STAT;
903 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
904 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
905 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
906 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
907 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
909 /* Internal Master 3 Bus */
910 dev->i2c_bus[2].nr = 2;
911 dev->i2c_bus[2].dev = dev;
912 dev->i2c_bus[2].reg_stat = I2C3_STAT;
913 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
914 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
915 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
916 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
917 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
919 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
920 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
921 cx23885_init_tsport(dev, &dev->ts1, 1);
923 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
924 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
925 cx23885_init_tsport(dev, &dev->ts2, 2);
927 if (get_resources(dev) < 0) {
928 printk(KERN_ERR "CORE %s No more PCIe resources for "
929 "subsystem: %04x:%04x\n",
930 dev->name, dev->pci->subsystem_vendor,
931 dev->pci->subsystem_device);
938 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
939 pci_resource_len(dev->pci, 0));
941 dev->bmmio = (u8 __iomem *)dev->lmmio;
943 printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
944 dev->name, dev->pci->subsystem_vendor,
945 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
946 dev->board, card[dev->nr] == dev->board ?
947 "insmod option" : "autodetected");
949 cx23885_pci_quirks(dev);
951 /* Assume some sensible defaults */
952 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
953 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
954 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
955 dev->radio_type = cx23885_boards[dev->board].radio_type;
956 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
958 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
959 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
960 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
961 __func__, dev->radio_type, dev->radio_addr);
963 /* The cx23417 encoder has GPIO's that need to be initialised
964 * before DVB, so that demodulators and tuners are out of
965 * reset before DVB uses them.
967 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
968 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
969 cx23885_mc417_init(dev);
974 cx23885_i2c_register(&dev->i2c_bus[0]);
975 cx23885_i2c_register(&dev->i2c_bus[1]);
976 cx23885_i2c_register(&dev->i2c_bus[2]);
977 cx23885_card_setup(dev);
978 call_all(dev, core, s_power, 0);
979 cx23885_ir_init(dev);
981 if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
983 * GPIOs 9/8 are input detection bits for the breakout video
984 * (gpio 8) and audio (gpio 9) cables. When they're attached,
985 * this gpios are pulled high. Make sure these GPIOs are marked
988 cx23885_gpio_enable(dev, 0x300, 0);
991 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
992 if (cx23885_video_register(dev) < 0) {
993 printk(KERN_ERR "%s() Failed to register analog "
994 "video adapters on VID_A\n", __func__);
998 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
999 if (cx23885_boards[dev->board].num_fds_portb)
1000 dev->ts1.num_frontends =
1001 cx23885_boards[dev->board].num_fds_portb;
1002 if (cx23885_dvb_register(&dev->ts1) < 0) {
1003 printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
1007 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1008 if (cx23885_417_register(dev) < 0) {
1010 "%s() Failed to register 417 on VID_B\n",
1015 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1016 if (cx23885_boards[dev->board].num_fds_portc)
1017 dev->ts2.num_frontends =
1018 cx23885_boards[dev->board].num_fds_portc;
1019 if (cx23885_dvb_register(&dev->ts2) < 0) {
1021 "%s() Failed to register dvb on VID_C\n",
1025 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1026 if (cx23885_417_register(dev) < 0) {
1028 "%s() Failed to register 417 on VID_C\n",
1033 cx23885_dev_checkrevision(dev);
1035 /* disable MSI for NetUP cards, otherwise CI is not working */
1036 if (cx23885_boards[dev->board].ci_type > 0)
1037 cx_clear(RDR_RDRCTL1, 1 << 8);
1039 switch (dev->board) {
1040 case CX23885_BOARD_TEVII_S470:
1041 case CX23885_BOARD_TEVII_S471:
1042 cx_clear(RDR_RDRCTL1, 1 << 8);
1049 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1051 release_mem_region(pci_resource_start(dev->pci, 0),
1052 pci_resource_len(dev->pci, 0));
1054 if (!atomic_dec_and_test(&dev->refcount))
1057 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1058 cx23885_video_unregister(dev);
1060 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1061 cx23885_dvb_unregister(&dev->ts1);
1063 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1064 cx23885_417_unregister(dev);
1066 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1067 cx23885_dvb_unregister(&dev->ts2);
1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1070 cx23885_417_unregister(dev);
1072 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1073 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1074 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1076 iounmap(dev->lmmio);
1079 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1080 unsigned int offset, u32 sync_line,
1081 unsigned int bpl, unsigned int padding,
1082 unsigned int lines, unsigned int lpi, bool jump)
1084 struct scatterlist *sg;
1085 unsigned int line, todo, sol;
1089 *(rp++) = cpu_to_le32(RISC_JUMP);
1090 *(rp++) = cpu_to_le32(0);
1091 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1094 /* sync instruction */
1095 if (sync_line != NO_SYNC_LINE)
1096 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1100 for (line = 0; line < lines; line++) {
1101 while (offset && offset >= sg_dma_len(sg)) {
1102 offset -= sg_dma_len(sg);
1106 if (lpi && line > 0 && !(line % lpi))
1107 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1111 if (bpl <= sg_dma_len(sg)-offset) {
1112 /* fits into current chunk */
1113 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1115 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1118 /* scanline needs to be split */
1120 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1121 (sg_dma_len(sg)-offset));
1122 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1123 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1124 todo -= (sg_dma_len(sg)-offset);
1127 while (todo > sg_dma_len(sg)) {
1128 *(rp++) = cpu_to_le32(RISC_WRITE|
1130 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1131 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1132 todo -= sg_dma_len(sg);
1135 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1136 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1137 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1146 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1147 struct scatterlist *sglist, unsigned int top_offset,
1148 unsigned int bottom_offset, unsigned int bpl,
1149 unsigned int padding, unsigned int lines)
1151 u32 instructions, fields;
1155 if (UNSET != top_offset)
1157 if (UNSET != bottom_offset)
1160 /* estimate risc mem: worst case is one write per page border +
1161 one write per scan line + syncs + jump (all 2 dwords). Padding
1162 can cause next bpl to start close to a page border. First DMA
1163 region may be smaller than PAGE_SIZE */
1164 /* write and jump need and extra dword */
1165 instructions = fields * (1 + ((bpl + padding) * lines)
1166 / PAGE_SIZE + lines);
1168 risc->size = instructions * 12;
1169 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1170 if (risc->cpu == NULL)
1173 /* write risc instructions */
1175 if (UNSET != top_offset)
1176 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1177 bpl, padding, lines, 0, true);
1178 if (UNSET != bottom_offset)
1179 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1180 bpl, padding, lines, 0, UNSET == top_offset);
1182 /* save pointer to jmp instruction address */
1184 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1188 int cx23885_risc_databuffer(struct pci_dev *pci,
1189 struct cx23885_riscmem *risc,
1190 struct scatterlist *sglist,
1192 unsigned int lines, unsigned int lpi)
1197 /* estimate risc mem: worst case is one write per page border +
1198 one write per scan line + syncs + jump (all 2 dwords). Here
1199 there is no padding and no sync. First DMA region may be smaller
1201 /* Jump and write need an extra dword */
1202 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1205 risc->size = instructions * 12;
1206 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1207 if (risc->cpu == NULL)
1210 /* write risc instructions */
1212 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1213 bpl, 0, lines, lpi, lpi == 0);
1215 /* save pointer to jmp instruction address */
1217 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1221 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1222 struct scatterlist *sglist, unsigned int top_offset,
1223 unsigned int bottom_offset, unsigned int bpl,
1224 unsigned int padding, unsigned int lines)
1226 u32 instructions, fields;
1230 if (UNSET != top_offset)
1232 if (UNSET != bottom_offset)
1235 /* estimate risc mem: worst case is one write per page border +
1236 one write per scan line + syncs + jump (all 2 dwords). Padding
1237 can cause next bpl to start close to a page border. First DMA
1238 region may be smaller than PAGE_SIZE */
1239 /* write and jump need and extra dword */
1240 instructions = fields * (1 + ((bpl + padding) * lines)
1241 / PAGE_SIZE + lines);
1243 risc->size = instructions * 12;
1244 risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1245 if (risc->cpu == NULL)
1247 /* write risc instructions */
1250 /* Sync to line 6, so US CC line 21 will appear in line '12'
1251 * in the userland vbi payload */
1252 if (UNSET != top_offset)
1253 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1254 bpl, padding, lines, 0, true);
1256 if (UNSET != bottom_offset)
1257 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1258 bpl, padding, lines, 0, UNSET == top_offset);
1262 /* save pointer to jmp instruction address */
1264 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1269 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1271 struct cx23885_riscmem *risc = &buf->risc;
1273 BUG_ON(in_interrupt());
1274 pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1277 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1279 struct cx23885_dev *dev = port->dev;
1281 dprintk(1, "%s() Register Dump\n", __func__);
1282 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1283 cx_read(DEV_CNTRL2));
1284 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1285 cx23885_irq_get_mask(dev));
1286 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1287 cx_read(AUDIO_INT_INT_MSK));
1288 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1289 cx_read(AUD_INT_DMA_CTL));
1290 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1291 cx_read(AUDIO_EXT_INT_MSK));
1292 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1293 cx_read(AUD_EXT_DMA_CTL));
1294 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1296 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1297 cx_read(ALT_PIN_OUT_SEL));
1298 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1300 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1301 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1302 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1303 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1304 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1305 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1306 if (port->reg_src_sel)
1307 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1308 port->reg_src_sel, cx_read(port->reg_src_sel));
1309 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1310 port->reg_lngth, cx_read(port->reg_lngth));
1311 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1312 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1313 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1314 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1315 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1316 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1317 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1318 port->reg_sop_status, cx_read(port->reg_sop_status));
1319 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1320 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1321 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1322 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1323 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1324 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1325 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1326 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1329 int cx23885_start_dma(struct cx23885_tsport *port,
1330 struct cx23885_dmaqueue *q,
1331 struct cx23885_buffer *buf)
1333 struct cx23885_dev *dev = port->dev;
1336 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1337 dev->width, dev->height, dev->field);
1339 /* Stop the fifo and risc engine for this port */
1340 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1342 /* setup fifo + format */
1343 cx23885_sram_channel_setup(dev,
1344 &dev->sram_channels[port->sram_chno],
1345 port->ts_packet_size, buf->risc.dma);
1347 cx23885_sram_channel_dump(dev,
1348 &dev->sram_channels[port->sram_chno]);
1349 cx23885_risc_disasm(port, &buf->risc);
1352 /* write TS length to chip */
1353 cx_write(port->reg_lngth, port->ts_packet_size);
1355 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1356 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1357 printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1359 cx23885_boards[dev->board].portb,
1360 cx23885_boards[dev->board].portc);
1364 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1365 cx23885_av_clk(dev, 0);
1369 /* If the port supports SRC SELECT, configure it */
1370 if (port->reg_src_sel)
1371 cx_write(port->reg_src_sel, port->src_sel_val);
1373 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1374 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1375 cx_write(port->reg_vld_misc, port->vld_misc_val);
1376 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1379 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1380 /* reset counter to zero */
1381 cx_write(port->reg_gpcnt_ctl, 3);
1384 /* Set VIDB pins to input */
1385 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1386 reg = cx_read(PAD_CTRL);
1387 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1388 cx_write(PAD_CTRL, reg);
1391 /* Set VIDC pins to input */
1392 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1393 reg = cx_read(PAD_CTRL);
1394 reg &= ~0x4; /* Clear TS2_SOP_OE */
1395 cx_write(PAD_CTRL, reg);
1398 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1400 reg = cx_read(PAD_CTRL);
1401 reg = reg & ~0x1; /* Clear TS1_OE */
1403 /* FIXME, bit 2 writing here is questionable */
1404 /* set TS1_SOP_OE and TS1_OE_HI */
1406 cx_write(PAD_CTRL, reg);
1408 /* FIXME and these two registers should be documented. */
1409 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1410 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1413 switch (dev->bridge) {
1414 case CX23885_BRIDGE_885:
1415 case CX23885_BRIDGE_887:
1416 case CX23885_BRIDGE_888:
1418 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1419 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1420 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1421 cx23885_irq_add(dev, port->pci_irqmask);
1422 cx23885_irq_enable_all(dev);
1428 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1430 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1431 cx23885_av_clk(dev, 1);
1434 cx23885_tsport_reg_dump(port);
1439 static int cx23885_stop_dma(struct cx23885_tsport *port)
1441 struct cx23885_dev *dev = port->dev;
1444 dprintk(1, "%s()\n", __func__);
1446 /* Stop interrupts and DMA */
1447 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1448 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1450 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1452 reg = cx_read(PAD_CTRL);
1457 /* clear TS1_SOP_OE and TS1_OE_HI */
1459 cx_write(PAD_CTRL, reg);
1460 cx_write(port->reg_src_sel, 0);
1461 cx_write(port->reg_gen_ctrl, 8);
1465 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1466 cx23885_av_clk(dev, 0);
1471 /* ------------------------------------------------------------------ */
1473 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1475 struct cx23885_dev *dev = port->dev;
1476 int size = port->ts_packet_size * port->ts_packet_count;
1477 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1479 dprintk(1, "%s: %p\n", __func__, buf);
1480 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1482 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1484 cx23885_risc_databuffer(dev->pci, &buf->risc,
1486 port->ts_packet_size, port->ts_packet_count, 0);
1491 * The risc program for each buffer works as follows: it starts with a simple
1492 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1493 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1494 * the initial JUMP).
1496 * This is the risc program of the first buffer to be queued if the active list
1497 * is empty and it just keeps DMAing this buffer without generating any
1500 * If a new buffer is added then the initial JUMP in the code for that buffer
1501 * will generate an interrupt which signals that the previous buffer has been
1502 * DMAed successfully and that it can be returned to userspace.
1504 * It also sets the final jump of the previous buffer to the start of the new
1505 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1506 * atomic u32 write, so there is no race condition.
1508 * The end-result of all this that you only get an interrupt when a buffer
1509 * is ready, so the control flow is very easy.
1511 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1513 struct cx23885_buffer *prev;
1514 struct cx23885_dev *dev = port->dev;
1515 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1516 unsigned long flags;
1518 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1519 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1520 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1521 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1523 spin_lock_irqsave(&dev->slock, flags);
1524 if (list_empty(&cx88q->active)) {
1525 list_add_tail(&buf->queue, &cx88q->active);
1526 dprintk(1, "[%p/%d] %s - first active\n",
1527 buf, buf->vb.vb2_buf.index, __func__);
1529 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1530 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1532 list_add_tail(&buf->queue, &cx88q->active);
1533 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1534 dprintk(1, "[%p/%d] %s - append to active\n",
1535 buf, buf->vb.vb2_buf.index, __func__);
1537 spin_unlock_irqrestore(&dev->slock, flags);
1540 /* ----------------------------------------------------------- */
1542 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1544 struct cx23885_dev *dev = port->dev;
1545 struct cx23885_dmaqueue *q = &port->mpegq;
1546 struct cx23885_buffer *buf;
1547 unsigned long flags;
1549 spin_lock_irqsave(&port->slock, flags);
1550 while (!list_empty(&q->active)) {
1551 buf = list_entry(q->active.next, struct cx23885_buffer,
1553 list_del(&buf->queue);
1554 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1555 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1556 buf, buf->vb.vb2_buf.index, reason,
1557 (unsigned long)buf->risc.dma);
1559 spin_unlock_irqrestore(&port->slock, flags);
1562 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1564 struct cx23885_dev *dev = port->dev;
1566 dprintk(1, "%s()\n", __func__);
1567 cx23885_stop_dma(port);
1568 do_cancel_buffers(port, "cancel");
1571 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1573 /* FIXME: port1 assumption here. */
1574 struct cx23885_tsport *port = &dev->ts1;
1581 count = cx_read(port->reg_gpcnt);
1582 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1583 status, cx_read(port->reg_ts_int_msk), count);
1585 if ((status & VID_B_MSK_BAD_PKT) ||
1586 (status & VID_B_MSK_OPC_ERR) ||
1587 (status & VID_B_MSK_VBI_OPC_ERR) ||
1588 (status & VID_B_MSK_SYNC) ||
1589 (status & VID_B_MSK_VBI_SYNC) ||
1590 (status & VID_B_MSK_OF) ||
1591 (status & VID_B_MSK_VBI_OF)) {
1592 printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1593 "= 0x%x\n", dev->name, status);
1594 if (status & VID_B_MSK_BAD_PKT)
1595 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1596 if (status & VID_B_MSK_OPC_ERR)
1597 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1598 if (status & VID_B_MSK_VBI_OPC_ERR)
1599 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1600 if (status & VID_B_MSK_SYNC)
1601 dprintk(1, " VID_B_MSK_SYNC\n");
1602 if (status & VID_B_MSK_VBI_SYNC)
1603 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1604 if (status & VID_B_MSK_OF)
1605 dprintk(1, " VID_B_MSK_OF\n");
1606 if (status & VID_B_MSK_VBI_OF)
1607 dprintk(1, " VID_B_MSK_VBI_OF\n");
1609 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1610 cx23885_sram_channel_dump(dev,
1611 &dev->sram_channels[port->sram_chno]);
1612 cx23885_417_check_encoder(dev);
1613 } else if (status & VID_B_MSK_RISCI1) {
1614 dprintk(7, " VID_B_MSK_RISCI1\n");
1615 spin_lock(&port->slock);
1616 cx23885_wakeup(port, &port->mpegq, count);
1617 spin_unlock(&port->slock);
1620 cx_write(port->reg_ts_int_stat, status);
1627 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1629 struct cx23885_dev *dev = port->dev;
1633 if ((status & VID_BC_MSK_OPC_ERR) ||
1634 (status & VID_BC_MSK_BAD_PKT) ||
1635 (status & VID_BC_MSK_SYNC) ||
1636 (status & VID_BC_MSK_OF)) {
1638 if (status & VID_BC_MSK_OPC_ERR)
1639 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1640 VID_BC_MSK_OPC_ERR);
1642 if (status & VID_BC_MSK_BAD_PKT)
1643 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1644 VID_BC_MSK_BAD_PKT);
1646 if (status & VID_BC_MSK_SYNC)
1647 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1650 if (status & VID_BC_MSK_OF)
1651 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1654 printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1656 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1657 cx23885_sram_channel_dump(dev,
1658 &dev->sram_channels[port->sram_chno]);
1660 } else if (status & VID_BC_MSK_RISCI1) {
1662 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1664 spin_lock(&port->slock);
1665 count = cx_read(port->reg_gpcnt);
1666 cx23885_wakeup(port, &port->mpegq, count);
1667 spin_unlock(&port->slock);
1671 cx_write(port->reg_ts_int_stat, status);
1678 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1680 struct cx23885_dev *dev = dev_id;
1681 struct cx23885_tsport *ts1 = &dev->ts1;
1682 struct cx23885_tsport *ts2 = &dev->ts2;
1683 u32 pci_status, pci_mask;
1684 u32 vida_status, vida_mask;
1685 u32 audint_status, audint_mask;
1686 u32 ts1_status, ts1_mask;
1687 u32 ts2_status, ts2_mask;
1688 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1689 int audint_count = 0;
1690 bool subdev_handled;
1692 pci_status = cx_read(PCI_INT_STAT);
1693 pci_mask = cx23885_irq_get_mask(dev);
1694 vida_status = cx_read(VID_A_INT_STAT);
1695 vida_mask = cx_read(VID_A_INT_MSK);
1696 audint_status = cx_read(AUDIO_INT_INT_STAT);
1697 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1698 ts1_status = cx_read(VID_B_INT_STAT);
1699 ts1_mask = cx_read(VID_B_INT_MSK);
1700 ts2_status = cx_read(VID_C_INT_STAT);
1701 ts2_mask = cx_read(VID_C_INT_MSK);
1703 if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1706 vida_count = cx_read(VID_A_GPCNT);
1707 audint_count = cx_read(AUD_INT_A_GPCNT);
1708 ts1_count = cx_read(ts1->reg_gpcnt);
1709 ts2_count = cx_read(ts2->reg_gpcnt);
1710 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1711 pci_status, pci_mask);
1712 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1713 vida_status, vida_mask, vida_count);
1714 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1715 audint_status, audint_mask, audint_count);
1716 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1717 ts1_status, ts1_mask, ts1_count);
1718 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1719 ts2_status, ts2_mask, ts2_count);
1721 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1722 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1723 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1724 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1725 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1726 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1728 if (pci_status & PCI_MSK_RISC_RD)
1729 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1732 if (pci_status & PCI_MSK_RISC_WR)
1733 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1736 if (pci_status & PCI_MSK_AL_RD)
1737 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1740 if (pci_status & PCI_MSK_AL_WR)
1741 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1744 if (pci_status & PCI_MSK_APB_DMA)
1745 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1748 if (pci_status & PCI_MSK_VID_C)
1749 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1752 if (pci_status & PCI_MSK_VID_B)
1753 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1756 if (pci_status & PCI_MSK_VID_A)
1757 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1760 if (pci_status & PCI_MSK_AUD_INT)
1761 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1764 if (pci_status & PCI_MSK_AUD_EXT)
1765 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1768 if (pci_status & PCI_MSK_GPIO0)
1769 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1772 if (pci_status & PCI_MSK_GPIO1)
1773 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1776 if (pci_status & PCI_MSK_AV_CORE)
1777 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1780 if (pci_status & PCI_MSK_IR)
1781 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1785 if (cx23885_boards[dev->board].ci_type == 1 &&
1786 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1787 handled += netup_ci_slot_status(dev, pci_status);
1789 if (cx23885_boards[dev->board].ci_type == 2 &&
1790 (pci_status & PCI_MSK_GPIO0))
1791 handled += altera_ci_irq(dev);
1794 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1795 handled += cx23885_irq_ts(ts1, ts1_status);
1797 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1798 handled += cx23885_irq_417(dev, ts1_status);
1802 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1803 handled += cx23885_irq_ts(ts2, ts2_status);
1805 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1806 handled += cx23885_irq_417(dev, ts2_status);
1810 handled += cx23885_video_irq(dev, vida_status);
1813 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1815 if (pci_status & PCI_MSK_IR) {
1816 subdev_handled = false;
1817 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1818 pci_status, &subdev_handled);
1823 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1824 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1825 schedule_work(&dev->cx25840_work);
1830 cx_write(PCI_INT_STAT, pci_status);
1832 return IRQ_RETVAL(handled);
1835 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1836 unsigned int notification, void *arg)
1838 struct cx23885_dev *dev;
1843 dev = to_cx23885(sd->v4l2_dev);
1845 switch (notification) {
1846 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1847 if (sd == dev->sd_ir)
1848 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1850 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1851 if (sd == dev->sd_ir)
1852 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1857 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1859 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1860 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1861 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1862 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1865 static inline int encoder_on_portb(struct cx23885_dev *dev)
1867 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1870 static inline int encoder_on_portc(struct cx23885_dev *dev)
1872 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1875 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1876 * registers depending on the board configuration (and whether the
1877 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1878 * be pushed into the correct hardware register, regardless of the
1879 * physical location. Certain registers are shared so we sanity check
1880 * and report errors if we think we're tampering with a GPIo that might
1881 * be assigned to the encoder (and used for the host bus).
1883 * GPIO 2 thru 0 - On the cx23885 bridge
1884 * GPIO 18 thru 3 - On the cx23417 host bus interface
1885 * GPIO 23 thru 19 - On the cx25840 a/v core
1887 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1890 cx_set(GP0_IO, mask & 0x7);
1892 if (mask & 0x0007fff8) {
1893 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1895 "%s: Setting GPIO on encoder ports\n",
1897 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1901 if (mask & 0x00f80000)
1902 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1905 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1907 if (mask & 0x00000007)
1908 cx_clear(GP0_IO, mask & 0x7);
1910 if (mask & 0x0007fff8) {
1911 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1913 "%s: Clearing GPIO moving on encoder ports\n",
1915 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1919 if (mask & 0x00f80000)
1920 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1923 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1925 if (mask & 0x00000007)
1926 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1928 if (mask & 0x0007fff8) {
1929 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1931 "%s: Reading GPIO moving on encoder ports\n",
1933 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1937 if (mask & 0x00f80000)
1938 printk(KERN_INFO "%s: Unsupported\n", dev->name);
1943 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1945 if ((mask & 0x00000007) && asoutput)
1946 cx_set(GP0_IO, (mask & 0x7) << 16);
1947 else if ((mask & 0x00000007) && !asoutput)
1948 cx_clear(GP0_IO, (mask & 0x7) << 16);
1950 if (mask & 0x0007fff8) {
1951 if (encoder_on_portb(dev) || encoder_on_portc(dev))
1953 "%s: Enabling GPIO on encoder ports\n",
1957 /* MC417_OEN is active low for output, write 1 for an input */
1958 if ((mask & 0x0007fff8) && asoutput)
1959 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1961 else if ((mask & 0x0007fff8) && !asoutput)
1962 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1967 static int cx23885_initdev(struct pci_dev *pci_dev,
1968 const struct pci_device_id *pci_id)
1970 struct cx23885_dev *dev;
1971 struct v4l2_ctrl_handler *hdl;
1974 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1978 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1982 hdl = &dev->ctrl_handler;
1983 v4l2_ctrl_handler_init(hdl, 6);
1988 dev->v4l2_dev.ctrl_handler = hdl;
1990 /* Prepare to handle notifications from subdevices */
1991 cx23885_v4l2_dev_notify_init(dev);
1995 if (pci_enable_device(pci_dev)) {
2000 if (cx23885_dev_setup(dev) < 0) {
2005 /* print pci info */
2006 dev->pci_rev = pci_dev->revision;
2007 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2008 printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
2009 "latency: %d, mmio: 0x%llx\n", dev->name,
2010 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2012 (unsigned long long)pci_resource_start(pci_dev, 0));
2014 pci_set_master(pci_dev);
2015 err = pci_set_dma_mask(pci_dev, 0xffffffff);
2017 printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2021 err = request_irq(pci_dev->irq, cx23885_irq,
2022 IRQF_SHARED, dev->name, dev);
2024 printk(KERN_ERR "%s: can't get IRQ %d\n",
2025 dev->name, pci_dev->irq);
2029 switch (dev->board) {
2030 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2031 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2033 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2034 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2039 * The CX2388[58] IR controller can start firing interrupts when
2040 * enabled, so these have to take place after the cx23885_irq() handler
2041 * is hooked up by the call to request_irq() above.
2043 cx23885_ir_pci_int_enable(dev);
2044 cx23885_input_init(dev);
2049 cx23885_dev_unregister(dev);
2051 v4l2_ctrl_handler_free(hdl);
2052 v4l2_device_unregister(&dev->v4l2_dev);
2058 static void cx23885_finidev(struct pci_dev *pci_dev)
2060 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2061 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2063 cx23885_input_fini(dev);
2064 cx23885_ir_fini(dev);
2066 cx23885_shutdown(dev);
2068 /* unregister stuff */
2069 free_irq(pci_dev->irq, dev);
2071 pci_disable_device(pci_dev);
2073 cx23885_dev_unregister(dev);
2074 v4l2_ctrl_handler_free(&dev->ctrl_handler);
2075 v4l2_device_unregister(v4l2_dev);
2079 static struct pci_device_id cx23885_pci_tbl[] = {
2084 .subvendor = PCI_ANY_ID,
2085 .subdevice = PCI_ANY_ID,
2090 .subvendor = PCI_ANY_ID,
2091 .subdevice = PCI_ANY_ID,
2093 /* --- end of list --- */
2096 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2098 static struct pci_driver cx23885_pci_driver = {
2100 .id_table = cx23885_pci_tbl,
2101 .probe = cx23885_initdev,
2102 .remove = cx23885_finidev,
2108 static int __init cx23885_init(void)
2110 printk(KERN_INFO "cx23885 driver version %s loaded\n",
2112 return pci_register_driver(&cx23885_pci_driver);
2115 static void __exit cx23885_fini(void)
2117 pci_unregister_driver(&cx23885_pci_driver);
2120 module_init(cx23885_init);
2121 module_exit(cx23885_fini);