1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for the Conexant CX23885 PCIe bridge
5 * Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/kmod.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/interrupt.h>
19 #include <linux/delay.h>
20 #include <asm/div64.h>
21 #include <linux/firmware.h>
24 #include "altera-ci.h"
25 #include "cx23888-ir.h"
26 #include "cx23885-ir.h"
27 #include "cx23885-av.h"
28 #include "cx23885-input.h"
30 MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
31 MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
32 MODULE_LICENSE("GPL");
33 MODULE_VERSION(CX23885_VERSION);
36 * Some platforms have been found to require periodic resetting of the DMA
37 * engine. Ryzen and XEON platforms are known to be affected. The symptom
38 * encountered is "mpeg risc op code error". Only Ryzen platforms employ
39 * this workaround if the option equals 1. The workaround can be explicitly
40 * disabled for all platforms by setting to 0, the workaround can be forced
41 * on for any platform by setting to 2.
43 static unsigned int dma_reset_workaround = 1;
44 module_param(dma_reset_workaround, int, 0644);
45 MODULE_PARM_DESC(dma_reset_workaround, "periodic RiSC dma engine reset; 0-force disable, 1-driver detect (default), 2-force enable");
47 static unsigned int debug;
48 module_param(debug, int, 0644);
49 MODULE_PARM_DESC(debug, "enable debug messages");
51 static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
52 module_param_array(card, int, NULL, 0444);
53 MODULE_PARM_DESC(card, "card type");
55 #define dprintk(level, fmt, arg...)\
56 do { if (debug >= level)\
57 printk(KERN_DEBUG pr_fmt("%s: " fmt), \
61 static unsigned int cx23885_devcount;
63 #define NO_SYNC_LINE (-1U)
65 /* FIXME, these allocations will change when
66 * analog arrives. The be reviewed.
68 * 1 line = 16 bytes of CDT
70 * cdt size = 16 * linesize
75 * 0x00000000 0x00008fff FIFO clusters
76 * 0x00010000 0x000104af Channel Management Data Structures
77 * 0x000104b0 0x000104ff Free
78 * 0x00010500 0x000108bf 15 channels * iqsize
79 * 0x000108c0 0x000108ff Free
80 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
81 * 15 channels * (iqsize + (maxlines * linesize))
82 * 0x00010ea0 0x00010xxx Free
85 static struct sram_channel cx23885_sram_channels[] = {
88 .cmds_start = 0x10000,
89 .ctrl_start = 0x10380,
93 .ptr1_reg = DMA1_PTR1,
94 .ptr2_reg = DMA1_PTR2,
95 .cnt1_reg = DMA1_CNT1,
96 .cnt2_reg = DMA1_CNT2,
105 .ptr1_reg = DMA2_PTR1,
106 .ptr2_reg = DMA2_PTR2,
107 .cnt1_reg = DMA2_CNT1,
108 .cnt2_reg = DMA2_CNT2,
112 .cmds_start = 0x100A0,
113 .ctrl_start = 0x10400,
115 .fifo_start = 0x5000,
117 .ptr1_reg = DMA3_PTR1,
118 .ptr2_reg = DMA3_PTR2,
119 .cnt1_reg = DMA3_CNT1,
120 .cnt2_reg = DMA3_CNT2,
129 .ptr1_reg = DMA4_PTR1,
130 .ptr2_reg = DMA4_PTR2,
131 .cnt1_reg = DMA4_CNT1,
132 .cnt2_reg = DMA4_CNT2,
141 .ptr1_reg = DMA5_PTR1,
142 .ptr2_reg = DMA5_PTR2,
143 .cnt1_reg = DMA5_CNT1,
144 .cnt2_reg = DMA5_CNT2,
148 .cmds_start = 0x10140,
149 .ctrl_start = 0x10440,
151 .fifo_start = 0x6000,
153 .ptr1_reg = DMA5_PTR1,
154 .ptr2_reg = DMA5_PTR2,
155 .cnt1_reg = DMA5_CNT1,
156 .cnt2_reg = DMA5_CNT2,
160 .cmds_start = 0x10190,
161 .ctrl_start = 0x10480,
163 .fifo_start = 0x7000,
165 .ptr1_reg = DMA6_PTR1,
166 .ptr2_reg = DMA6_PTR2,
167 .cnt1_reg = DMA6_CNT1,
168 .cnt2_reg = DMA6_CNT2,
177 .ptr1_reg = DMA7_PTR1,
178 .ptr2_reg = DMA7_PTR2,
179 .cnt1_reg = DMA7_CNT1,
180 .cnt2_reg = DMA7_CNT2,
189 .ptr1_reg = DMA8_PTR1,
190 .ptr2_reg = DMA8_PTR2,
191 .cnt1_reg = DMA8_CNT1,
192 .cnt2_reg = DMA8_CNT2,
196 static struct sram_channel cx23887_sram_channels[] = {
199 .cmds_start = 0x10000,
200 .ctrl_start = 0x105b0,
204 .ptr1_reg = DMA1_PTR1,
205 .ptr2_reg = DMA1_PTR2,
206 .cnt1_reg = DMA1_CNT1,
207 .cnt2_reg = DMA1_CNT2,
210 .name = "VID A (VBI)",
211 .cmds_start = 0x10050,
212 .ctrl_start = 0x105F0,
214 .fifo_start = 0x3000,
216 .ptr1_reg = DMA2_PTR1,
217 .ptr2_reg = DMA2_PTR2,
218 .cnt1_reg = DMA2_CNT1,
219 .cnt2_reg = DMA2_CNT2,
223 .cmds_start = 0x100A0,
224 .ctrl_start = 0x10630,
226 .fifo_start = 0x5000,
228 .ptr1_reg = DMA3_PTR1,
229 .ptr2_reg = DMA3_PTR2,
230 .cnt1_reg = DMA3_CNT1,
231 .cnt2_reg = DMA3_CNT2,
240 .ptr1_reg = DMA4_PTR1,
241 .ptr2_reg = DMA4_PTR2,
242 .cnt1_reg = DMA4_CNT1,
243 .cnt2_reg = DMA4_CNT2,
252 .ptr1_reg = DMA5_PTR1,
253 .ptr2_reg = DMA5_PTR2,
254 .cnt1_reg = DMA5_CNT1,
255 .cnt2_reg = DMA5_CNT2,
259 .cmds_start = 0x10140,
260 .ctrl_start = 0x10670,
262 .fifo_start = 0x6000,
264 .ptr1_reg = DMA5_PTR1,
265 .ptr2_reg = DMA5_PTR2,
266 .cnt1_reg = DMA5_CNT1,
267 .cnt2_reg = DMA5_CNT2,
271 .cmds_start = 0x10190,
272 .ctrl_start = 0x106B0,
274 .fifo_start = 0x7000,
276 .ptr1_reg = DMA6_PTR1,
277 .ptr2_reg = DMA6_PTR2,
278 .cnt1_reg = DMA6_CNT1,
279 .cnt2_reg = DMA6_CNT2,
288 .ptr1_reg = DMA7_PTR1,
289 .ptr2_reg = DMA7_PTR2,
290 .cnt1_reg = DMA7_CNT1,
291 .cnt2_reg = DMA7_CNT2,
300 .ptr1_reg = DMA8_PTR1,
301 .ptr2_reg = DMA8_PTR2,
302 .cnt1_reg = DMA8_CNT1,
303 .cnt2_reg = DMA8_CNT2,
307 static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
310 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
312 dev->pci_irqmask |= mask;
314 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
317 void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
320 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
322 dev->pci_irqmask |= mask;
323 cx_set(PCI_INT_MSK, mask);
325 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
328 void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
332 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
334 v = mask & dev->pci_irqmask;
336 cx_set(PCI_INT_MSK, v);
338 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
341 static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
343 cx23885_irq_enable(dev, 0xffffffff);
346 void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
349 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
351 cx_clear(PCI_INT_MSK, mask);
353 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
356 static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
358 cx23885_irq_disable(dev, 0xffffffff);
361 void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
364 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
366 dev->pci_irqmask &= ~mask;
367 cx_clear(PCI_INT_MSK, mask);
369 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
372 static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
376 spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
378 v = cx_read(PCI_INT_MSK);
380 spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
384 static int cx23885_risc_decode(u32 risc)
386 static char *instr[16] = {
387 [RISC_SYNC >> 28] = "sync",
388 [RISC_WRITE >> 28] = "write",
389 [RISC_WRITEC >> 28] = "writec",
390 [RISC_READ >> 28] = "read",
391 [RISC_READC >> 28] = "readc",
392 [RISC_JUMP >> 28] = "jump",
393 [RISC_SKIP >> 28] = "skip",
394 [RISC_WRITERM >> 28] = "writerm",
395 [RISC_WRITECM >> 28] = "writecm",
396 [RISC_WRITECR >> 28] = "writecr",
398 static int incr[16] = {
399 [RISC_WRITE >> 28] = 3,
400 [RISC_JUMP >> 28] = 3,
401 [RISC_SKIP >> 28] = 1,
402 [RISC_SYNC >> 28] = 1,
403 [RISC_WRITERM >> 28] = 3,
404 [RISC_WRITECM >> 28] = 3,
405 [RISC_WRITECR >> 28] = 4,
407 static char *bits[] = {
408 "12", "13", "14", "resync",
409 "cnt0", "cnt1", "18", "19",
410 "20", "21", "22", "23",
411 "irq1", "irq2", "eol", "sol",
415 printk(KERN_DEBUG "0x%08x [ %s", risc,
416 instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
417 for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
418 if (risc & (1 << (i + 12)))
419 pr_cont(" %s", bits[i]);
420 pr_cont(" count=%d ]\n", risc & 0xfff);
421 return incr[risc >> 28] ? incr[risc >> 28] : 1;
424 static void cx23885_wakeup(struct cx23885_tsport *port,
425 struct cx23885_dmaqueue *q, u32 count)
427 struct cx23885_buffer *buf;
429 int max_buf_done = 5; /* service maximum five buffers */
432 if (list_empty(&q->active))
434 buf = list_entry(q->active.next,
435 struct cx23885_buffer, queue);
437 buf->vb.vb2_buf.timestamp = ktime_get_ns();
438 buf->vb.sequence = q->count++;
439 if (count != (q->count % 65536)) {
440 dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
441 buf->vb.vb2_buf.index, count, q->count);
443 dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
444 buf->vb.vb2_buf.index, count, q->count);
446 list_del(&buf->queue);
447 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
449 /* count register is 16 bits so apply modulo appropriately */
450 count_delta = ((int)count - (int)(q->count % 65536));
451 } while ((count_delta > 0) && (max_buf_done > 0));
454 int cx23885_sram_channel_setup(struct cx23885_dev *dev,
455 struct sram_channel *ch,
456 unsigned int bpl, u32 risc)
458 unsigned int i, lines;
461 if (ch->cmds_start == 0) {
462 dprintk(1, "%s() Erasing channel [%s]\n", __func__,
464 cx_write(ch->ptr1_reg, 0);
465 cx_write(ch->ptr2_reg, 0);
466 cx_write(ch->cnt2_reg, 0);
467 cx_write(ch->cnt1_reg, 0);
470 dprintk(1, "%s() Configuring channel [%s]\n", __func__,
474 bpl = (bpl + 7) & ~7; /* alignment */
476 lines = ch->fifo_size / bpl;
481 cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
486 for (i = 0; i < lines; i++) {
487 dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
488 ch->fifo_start + bpl*i);
489 cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
490 cx_write(cdt + 16*i + 4, 0);
491 cx_write(cdt + 16*i + 8, 0);
492 cx_write(cdt + 16*i + 12, 0);
497 cx_write(ch->cmds_start + 0, 8);
499 cx_write(ch->cmds_start + 0, risc);
500 cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
501 cx_write(ch->cmds_start + 8, cdt);
502 cx_write(ch->cmds_start + 12, (lines*16) >> 3);
503 cx_write(ch->cmds_start + 16, ch->ctrl_start);
505 cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
507 cx_write(ch->cmds_start + 20, 64 >> 2);
508 for (i = 24; i < 80; i += 4)
509 cx_write(ch->cmds_start + i, 0);
512 cx_write(ch->ptr1_reg, ch->fifo_start);
513 cx_write(ch->ptr2_reg, cdt);
514 cx_write(ch->cnt2_reg, (lines*16) >> 3);
515 cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
517 dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
526 void cx23885_sram_channel_dump(struct cx23885_dev *dev,
527 struct sram_channel *ch)
529 static char *name[] = {
546 unsigned int i, j, n;
548 pr_warn("%s: %s - dma channel status dump\n",
549 dev->name, ch->name);
550 for (i = 0; i < ARRAY_SIZE(name); i++)
551 pr_warn("%s: cmds: %-15s: 0x%08x\n",
553 cx_read(ch->cmds_start + 4*i));
555 for (i = 0; i < 4; i++) {
556 risc = cx_read(ch->cmds_start + 4 * (i + 14));
557 pr_warn("%s: risc%d: ", dev->name, i);
558 cx23885_risc_decode(risc);
560 for (i = 0; i < (64 >> 2); i += n) {
561 risc = cx_read(ch->ctrl_start + 4 * i);
562 /* No consideration for bits 63-32 */
564 pr_warn("%s: (0x%08x) iq %x: ", dev->name,
565 ch->ctrl_start + 4 * i, i);
566 n = cx23885_risc_decode(risc);
567 for (j = 1; j < n; j++) {
568 risc = cx_read(ch->ctrl_start + 4 * (i + j));
569 pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
570 dev->name, i+j, risc, j);
574 pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
575 dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
576 pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
577 dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
578 pr_warn("%s: ptr1_reg: 0x%08x\n",
579 dev->name, cx_read(ch->ptr1_reg));
580 pr_warn("%s: ptr2_reg: 0x%08x\n",
581 dev->name, cx_read(ch->ptr2_reg));
582 pr_warn("%s: cnt1_reg: 0x%08x\n",
583 dev->name, cx_read(ch->cnt1_reg));
584 pr_warn("%s: cnt2_reg: 0x%08x\n",
585 dev->name, cx_read(ch->cnt2_reg));
588 static void cx23885_risc_disasm(struct cx23885_tsport *port,
589 struct cx23885_riscmem *risc)
591 struct cx23885_dev *dev = port->dev;
592 unsigned int i, j, n;
594 pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
595 dev->name, risc->cpu, (unsigned long)risc->dma);
596 for (i = 0; i < (risc->size >> 2); i += n) {
597 pr_info("%s: %04d: ", dev->name, i);
598 n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
599 for (j = 1; j < n; j++)
600 pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
601 dev->name, i + j, risc->cpu[i + j], j);
602 if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
607 static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
609 uint32_t reg1_val, reg2_val;
611 if (!dev->need_dma_reset)
614 reg1_val = cx_read(TC_REQ); /* read-only */
615 reg2_val = cx_read(TC_REQ_SET);
617 if (reg1_val && reg2_val) {
618 cx_write(TC_REQ, reg1_val);
619 cx_write(TC_REQ_SET, reg2_val);
625 dev_info(&dev->pci->dev,
626 "dma in progress detected 0x%08x 0x%08x, clearing\n",
631 static void cx23885_shutdown(struct cx23885_dev *dev)
633 /* disable RISC controller */
634 cx_write(DEV_CNTRL2, 0);
636 /* Disable all IR activity */
637 cx_write(IR_CNTRL_REG, 0);
639 /* Disable Video A/B activity */
640 cx_write(VID_A_DMA_CTL, 0);
641 cx_write(VID_B_DMA_CTL, 0);
642 cx_write(VID_C_DMA_CTL, 0);
644 /* Disable Audio activity */
645 cx_write(AUD_INT_DMA_CTL, 0);
646 cx_write(AUD_EXT_DMA_CTL, 0);
648 /* Disable Serial port */
649 cx_write(UART_CTL, 0);
651 /* Disable Interrupts */
652 cx23885_irq_disable_all(dev);
653 cx_write(VID_A_INT_MSK, 0);
654 cx_write(VID_B_INT_MSK, 0);
655 cx_write(VID_C_INT_MSK, 0);
656 cx_write(AUDIO_INT_INT_MSK, 0);
657 cx_write(AUDIO_EXT_INT_MSK, 0);
661 static void cx23885_reset(struct cx23885_dev *dev)
663 dprintk(1, "%s()\n", __func__);
665 cx23885_shutdown(dev);
667 cx_write(PCI_INT_STAT, 0xffffffff);
668 cx_write(VID_A_INT_STAT, 0xffffffff);
669 cx_write(VID_B_INT_STAT, 0xffffffff);
670 cx_write(VID_C_INT_STAT, 0xffffffff);
671 cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
672 cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
673 cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
674 cx_write(PAD_CTRL, 0x00500300);
676 /* clear dma in progress */
677 cx23885_clear_bridge_error(dev);
680 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
682 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
683 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
685 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
686 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
687 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
689 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
690 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
691 cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
693 cx23885_gpio_setup(dev);
695 cx23885_irq_get_mask(dev);
697 /* clear dma in progress */
698 cx23885_clear_bridge_error(dev);
702 static int cx23885_pci_quirks(struct cx23885_dev *dev)
704 dprintk(1, "%s()\n", __func__);
706 /* The cx23885 bridge has a weird bug which causes NMI to be asserted
707 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
708 * occur on the cx23887 bridge.
710 if (dev->bridge == CX23885_BRIDGE_885)
711 cx_clear(RDR_TLCTL0, 1 << 4);
713 /* clear dma in progress */
714 cx23885_clear_bridge_error(dev);
718 static int get_resources(struct cx23885_dev *dev)
720 if (request_mem_region(pci_resource_start(dev->pci, 0),
721 pci_resource_len(dev->pci, 0),
725 pr_err("%s: can't get MMIO memory @ 0x%llx\n",
726 dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
731 static int cx23885_init_tsport(struct cx23885_dev *dev,
732 struct cx23885_tsport *port, int portno)
734 dprintk(1, "%s(portno=%d)\n", __func__, portno);
736 /* Transport bus init dma queue - Common settings */
737 port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
738 port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
739 port->vld_misc_val = 0x0;
740 port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
742 spin_lock_init(&port->slock);
746 INIT_LIST_HEAD(&port->mpegq.active);
747 mutex_init(&port->frontends.lock);
748 INIT_LIST_HEAD(&port->frontends.felist);
749 port->frontends.active_fe_id = 0;
751 /* This should be hardcoded allow a single frontend
752 * attachment to this tsport, keeping the -dvb.c
753 * code clean and safe.
755 if (!port->num_frontends)
756 port->num_frontends = 1;
760 port->reg_gpcnt = VID_B_GPCNT;
761 port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
762 port->reg_dma_ctl = VID_B_DMA_CTL;
763 port->reg_lngth = VID_B_LNGTH;
764 port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
765 port->reg_gen_ctrl = VID_B_GEN_CTL;
766 port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
767 port->reg_sop_status = VID_B_SOP_STATUS;
768 port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
769 port->reg_vld_misc = VID_B_VLD_MISC;
770 port->reg_ts_clk_en = VID_B_TS_CLK_EN;
771 port->reg_src_sel = VID_B_SRC_SEL;
772 port->reg_ts_int_msk = VID_B_INT_MSK;
773 port->reg_ts_int_stat = VID_B_INT_STAT;
774 port->sram_chno = SRAM_CH03; /* VID_B */
775 port->pci_irqmask = 0x02; /* VID_B bit1 */
778 port->reg_gpcnt = VID_C_GPCNT;
779 port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
780 port->reg_dma_ctl = VID_C_DMA_CTL;
781 port->reg_lngth = VID_C_LNGTH;
782 port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
783 port->reg_gen_ctrl = VID_C_GEN_CTL;
784 port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
785 port->reg_sop_status = VID_C_SOP_STATUS;
786 port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
787 port->reg_vld_misc = VID_C_VLD_MISC;
788 port->reg_ts_clk_en = VID_C_TS_CLK_EN;
789 port->reg_src_sel = 0;
790 port->reg_ts_int_msk = VID_C_INT_MSK;
791 port->reg_ts_int_stat = VID_C_INT_STAT;
792 port->sram_chno = SRAM_CH06; /* VID_C */
793 port->pci_irqmask = 0x04; /* VID_C bit2 */
802 static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
804 switch (cx_read(RDR_CFG2) & 0xff) {
807 dev->hwrevision = 0xa0;
811 dev->hwrevision = 0xa1;
814 /* CX23885-13Z/14Z */
815 dev->hwrevision = 0xb0;
818 if (dev->pci->device == 0x8880) {
819 /* CX23888-21Z/22Z */
820 dev->hwrevision = 0xc0;
823 dev->hwrevision = 0xa4;
827 if (dev->pci->device == 0x8880) {
829 dev->hwrevision = 0xd0;
831 /* CX23885-15Z, CX23888-31Z */
832 dev->hwrevision = 0xa5;
837 dev->hwrevision = 0xc0;
841 dev->hwrevision = 0xb1;
844 pr_err("%s() New hardware revision found 0x%x\n",
845 __func__, dev->hwrevision);
848 pr_info("%s() Hardware revision = 0x%02x\n",
849 __func__, dev->hwrevision);
851 pr_err("%s() Hardware revision unknown 0x%x\n",
852 __func__, dev->hwrevision);
855 /* Find the first v4l2_subdev member of the group id in hw */
856 struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
858 struct v4l2_subdev *result = NULL;
859 struct v4l2_subdev *sd;
861 spin_lock(&dev->v4l2_dev.lock);
862 v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
863 if (sd->grp_id == hw) {
868 spin_unlock(&dev->v4l2_dev.lock);
872 static int cx23885_dev_setup(struct cx23885_dev *dev)
876 spin_lock_init(&dev->pci_irqmask_lock);
877 spin_lock_init(&dev->slock);
879 mutex_init(&dev->lock);
880 mutex_init(&dev->gpio_lock);
882 atomic_inc(&dev->refcount);
884 dev->nr = cx23885_devcount++;
885 sprintf(dev->name, "cx23885[%d]", dev->nr);
887 /* Configure the internal memory */
888 if (dev->pci->device == 0x8880) {
889 /* Could be 887 or 888, assume an 888 default */
890 dev->bridge = CX23885_BRIDGE_888;
891 /* Apply a sensible clock frequency for the PCIe bridge */
892 dev->clk_freq = 50000000;
893 dev->sram_channels = cx23887_sram_channels;
895 if (dev->pci->device == 0x8852) {
896 dev->bridge = CX23885_BRIDGE_885;
897 /* Apply a sensible clock frequency for the PCIe bridge */
898 dev->clk_freq = 28000000;
899 dev->sram_channels = cx23885_sram_channels;
903 dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
904 __func__, dev->bridge);
908 if (card[dev->nr] < cx23885_bcount)
909 dev->board = card[dev->nr];
910 for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
911 if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
912 dev->pci->subsystem_device == cx23885_subids[i].subdevice)
913 dev->board = cx23885_subids[i].card;
914 if (UNSET == dev->board) {
915 dev->board = CX23885_BOARD_UNKNOWN;
916 cx23885_card_list(dev);
919 if (dev->pci->device == 0x8852) {
920 /* no DIF on cx23885, so no analog tuner support possible */
921 if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
922 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
923 else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
924 dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
927 /* If the user specific a clk freq override, apply it */
928 if (cx23885_boards[dev->board].clk_freq > 0)
929 dev->clk_freq = cx23885_boards[dev->board].clk_freq;
931 if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
932 dev->pci->subsystem_device == 0x7137) {
933 /* Hauppauge ImpactVCBe device ID 0x7137 is populated
934 * with an 888, and a 25Mhz crystal, instead of the
935 * usual third overtone 50Mhz. The default clock rate must
936 * be overridden so the cx25840 is properly configured
938 dev->clk_freq = 25000000;
941 dev->pci_bus = dev->pci->bus->number;
942 dev->pci_slot = PCI_SLOT(dev->pci->devfn);
943 cx23885_irq_add(dev, 0x001f00);
945 /* External Master 1 Bus */
946 dev->i2c_bus[0].nr = 0;
947 dev->i2c_bus[0].dev = dev;
948 dev->i2c_bus[0].reg_stat = I2C1_STAT;
949 dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
950 dev->i2c_bus[0].reg_addr = I2C1_ADDR;
951 dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
952 dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
953 dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
955 /* External Master 2 Bus */
956 dev->i2c_bus[1].nr = 1;
957 dev->i2c_bus[1].dev = dev;
958 dev->i2c_bus[1].reg_stat = I2C2_STAT;
959 dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
960 dev->i2c_bus[1].reg_addr = I2C2_ADDR;
961 dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
962 dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
963 dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
965 /* Internal Master 3 Bus */
966 dev->i2c_bus[2].nr = 2;
967 dev->i2c_bus[2].dev = dev;
968 dev->i2c_bus[2].reg_stat = I2C3_STAT;
969 dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
970 dev->i2c_bus[2].reg_addr = I2C3_ADDR;
971 dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
972 dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
973 dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
975 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
976 (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
977 cx23885_init_tsport(dev, &dev->ts1, 1);
979 if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
980 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
981 cx23885_init_tsport(dev, &dev->ts2, 2);
983 if (get_resources(dev) < 0) {
984 pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
985 dev->name, dev->pci->subsystem_vendor,
986 dev->pci->subsystem_device);
993 dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
994 pci_resource_len(dev->pci, 0));
996 dev->bmmio = (u8 __iomem *)dev->lmmio;
998 pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
999 dev->name, dev->pci->subsystem_vendor,
1000 dev->pci->subsystem_device, cx23885_boards[dev->board].name,
1001 dev->board, card[dev->nr] == dev->board ?
1002 "insmod option" : "autodetected");
1004 cx23885_pci_quirks(dev);
1006 /* Assume some sensible defaults */
1007 dev->tuner_type = cx23885_boards[dev->board].tuner_type;
1008 dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
1009 dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
1010 dev->radio_type = cx23885_boards[dev->board].radio_type;
1011 dev->radio_addr = cx23885_boards[dev->board].radio_addr;
1013 dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
1014 __func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
1015 dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
1016 __func__, dev->radio_type, dev->radio_addr);
1018 /* The cx23417 encoder has GPIO's that need to be initialised
1019 * before DVB, so that demodulators and tuners are out of
1020 * reset before DVB uses them.
1022 if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
1023 (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
1024 cx23885_mc417_init(dev);
1029 cx23885_i2c_register(&dev->i2c_bus[0]);
1030 cx23885_i2c_register(&dev->i2c_bus[1]);
1031 cx23885_i2c_register(&dev->i2c_bus[2]);
1032 cx23885_card_setup(dev);
1033 call_all(dev, tuner, standby);
1034 cx23885_ir_init(dev);
1036 if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
1038 * GPIOs 9/8 are input detection bits for the breakout video
1039 * (gpio 8) and audio (gpio 9) cables. When they're attached,
1040 * this gpios are pulled high. Make sure these GPIOs are marked
1043 cx23885_gpio_enable(dev, 0x300, 0);
1046 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
1047 if (cx23885_video_register(dev) < 0) {
1048 pr_err("%s() Failed to register analog video adapters on VID_A\n",
1053 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1054 if (cx23885_boards[dev->board].num_fds_portb)
1055 dev->ts1.num_frontends =
1056 cx23885_boards[dev->board].num_fds_portb;
1057 if (cx23885_dvb_register(&dev->ts1) < 0) {
1058 pr_err("%s() Failed to register dvb adapters on VID_B\n",
1062 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1063 if (cx23885_417_register(dev) < 0) {
1064 pr_err("%s() Failed to register 417 on VID_B\n",
1069 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1070 if (cx23885_boards[dev->board].num_fds_portc)
1071 dev->ts2.num_frontends =
1072 cx23885_boards[dev->board].num_fds_portc;
1073 if (cx23885_dvb_register(&dev->ts2) < 0) {
1074 pr_err("%s() Failed to register dvb on VID_C\n",
1078 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1079 if (cx23885_417_register(dev) < 0) {
1080 pr_err("%s() Failed to register 417 on VID_C\n",
1085 cx23885_dev_checkrevision(dev);
1087 /* disable MSI for NetUP cards, otherwise CI is not working */
1088 if (cx23885_boards[dev->board].ci_type > 0)
1089 cx_clear(RDR_RDRCTL1, 1 << 8);
1091 switch (dev->board) {
1092 case CX23885_BOARD_TEVII_S470:
1093 case CX23885_BOARD_TEVII_S471:
1094 cx_clear(RDR_RDRCTL1, 1 << 8);
1101 static void cx23885_dev_unregister(struct cx23885_dev *dev)
1103 release_mem_region(pci_resource_start(dev->pci, 0),
1104 pci_resource_len(dev->pci, 0));
1106 if (!atomic_dec_and_test(&dev->refcount))
1109 if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1110 cx23885_video_unregister(dev);
1112 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1113 cx23885_dvb_unregister(&dev->ts1);
1115 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1116 cx23885_417_unregister(dev);
1118 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1119 cx23885_dvb_unregister(&dev->ts2);
1121 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1122 cx23885_417_unregister(dev);
1124 cx23885_i2c_unregister(&dev->i2c_bus[2]);
1125 cx23885_i2c_unregister(&dev->i2c_bus[1]);
1126 cx23885_i2c_unregister(&dev->i2c_bus[0]);
1128 iounmap(dev->lmmio);
1131 static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1132 unsigned int offset, u32 sync_line,
1133 unsigned int bpl, unsigned int padding,
1134 unsigned int lines, unsigned int lpi, bool jump)
1136 struct scatterlist *sg;
1137 unsigned int line, todo, sol;
1141 *(rp++) = cpu_to_le32(RISC_JUMP);
1142 *(rp++) = cpu_to_le32(0);
1143 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1146 /* sync instruction */
1147 if (sync_line != NO_SYNC_LINE)
1148 *(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1152 for (line = 0; line < lines; line++) {
1153 while (offset && offset >= sg_dma_len(sg)) {
1154 offset -= sg_dma_len(sg);
1158 if (lpi && line > 0 && !(line % lpi))
1159 sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1163 if (bpl <= sg_dma_len(sg)-offset) {
1164 /* fits into current chunk */
1165 *(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1166 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1167 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1170 /* scanline needs to be split */
1172 *(rp++) = cpu_to_le32(RISC_WRITE|sol|
1173 (sg_dma_len(sg)-offset));
1174 *(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1175 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1176 todo -= (sg_dma_len(sg)-offset);
1179 while (todo > sg_dma_len(sg)) {
1180 *(rp++) = cpu_to_le32(RISC_WRITE|
1182 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1183 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1184 todo -= sg_dma_len(sg);
1187 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1188 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1189 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1198 int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1199 struct scatterlist *sglist, unsigned int top_offset,
1200 unsigned int bottom_offset, unsigned int bpl,
1201 unsigned int padding, unsigned int lines)
1203 u32 instructions, fields;
1207 if (UNSET != top_offset)
1209 if (UNSET != bottom_offset)
1212 /* estimate risc mem: worst case is one write per page border +
1213 one write per scan line + syncs + jump (all 2 dwords). Padding
1214 can cause next bpl to start close to a page border. First DMA
1215 region may be smaller than PAGE_SIZE */
1216 /* write and jump need and extra dword */
1217 instructions = fields * (1 + ((bpl + padding) * lines)
1218 / PAGE_SIZE + lines);
1220 risc->size = instructions * 12;
1221 risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
1223 if (risc->cpu == NULL)
1226 /* write risc instructions */
1228 if (UNSET != top_offset)
1229 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1230 bpl, padding, lines, 0, true);
1231 if (UNSET != bottom_offset)
1232 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1233 bpl, padding, lines, 0, UNSET == top_offset);
1235 /* save pointer to jmp instruction address */
1237 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1241 int cx23885_risc_databuffer(struct pci_dev *pci,
1242 struct cx23885_riscmem *risc,
1243 struct scatterlist *sglist,
1245 unsigned int lines, unsigned int lpi)
1250 /* estimate risc mem: worst case is one write per page border +
1251 one write per scan line + syncs + jump (all 2 dwords). Here
1252 there is no padding and no sync. First DMA region may be smaller
1254 /* Jump and write need an extra dword */
1255 instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
1258 risc->size = instructions * 12;
1259 risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
1261 if (risc->cpu == NULL)
1264 /* write risc instructions */
1266 rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1267 bpl, 0, lines, lpi, lpi == 0);
1269 /* save pointer to jmp instruction address */
1271 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1275 int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1276 struct scatterlist *sglist, unsigned int top_offset,
1277 unsigned int bottom_offset, unsigned int bpl,
1278 unsigned int padding, unsigned int lines)
1280 u32 instructions, fields;
1284 if (UNSET != top_offset)
1286 if (UNSET != bottom_offset)
1289 /* estimate risc mem: worst case is one write per page border +
1290 one write per scan line + syncs + jump (all 2 dwords). Padding
1291 can cause next bpl to start close to a page border. First DMA
1292 region may be smaller than PAGE_SIZE */
1293 /* write and jump need and extra dword */
1294 instructions = fields * (1 + ((bpl + padding) * lines)
1295 / PAGE_SIZE + lines);
1297 risc->size = instructions * 12;
1298 risc->cpu = dma_alloc_coherent(&pci->dev, risc->size, &risc->dma,
1300 if (risc->cpu == NULL)
1302 /* write risc instructions */
1305 /* Sync to line 6, so US CC line 21 will appear in line '12'
1306 * in the userland vbi payload */
1307 if (UNSET != top_offset)
1308 rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1309 bpl, padding, lines, 0, true);
1311 if (UNSET != bottom_offset)
1312 rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1313 bpl, padding, lines, 0, UNSET == top_offset);
1317 /* save pointer to jmp instruction address */
1319 BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1324 void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1326 struct cx23885_riscmem *risc = &buf->risc;
1328 dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu, risc->dma);
1331 static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1333 struct cx23885_dev *dev = port->dev;
1335 dprintk(1, "%s() Register Dump\n", __func__);
1336 dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
1337 cx_read(DEV_CNTRL2));
1338 dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
1339 cx23885_irq_get_mask(dev));
1340 dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
1341 cx_read(AUDIO_INT_INT_MSK));
1342 dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
1343 cx_read(AUD_INT_DMA_CTL));
1344 dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
1345 cx_read(AUDIO_EXT_INT_MSK));
1346 dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
1347 cx_read(AUD_EXT_DMA_CTL));
1348 dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
1350 dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
1351 cx_read(ALT_PIN_OUT_SEL));
1352 dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
1354 dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
1355 port->reg_gpcnt, cx_read(port->reg_gpcnt));
1356 dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
1357 port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1358 dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
1359 port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1360 if (port->reg_src_sel)
1361 dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
1362 port->reg_src_sel, cx_read(port->reg_src_sel));
1363 dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
1364 port->reg_lngth, cx_read(port->reg_lngth));
1365 dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
1366 port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1367 dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
1368 port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1369 dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
1370 port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1371 dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
1372 port->reg_sop_status, cx_read(port->reg_sop_status));
1373 dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1374 port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1375 dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
1376 port->reg_vld_misc, cx_read(port->reg_vld_misc));
1377 dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
1378 port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1379 dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
1380 port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1381 dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__,
1382 port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat));
1383 dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__,
1384 cx_read(PCI_INT_STAT));
1385 dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__,
1386 cx_read(VID_B_INT_MSTAT));
1387 dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__,
1388 cx_read(VID_B_INT_SSTAT));
1389 dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__,
1390 cx_read(VID_C_INT_MSTAT));
1391 dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__,
1392 cx_read(VID_C_INT_SSTAT));
1395 int cx23885_start_dma(struct cx23885_tsport *port,
1396 struct cx23885_dmaqueue *q,
1397 struct cx23885_buffer *buf)
1399 struct cx23885_dev *dev = port->dev;
1402 dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1403 dev->width, dev->height, dev->field);
1405 /* clear dma in progress */
1406 cx23885_clear_bridge_error(dev);
1408 /* Stop the fifo and risc engine for this port */
1409 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1411 /* setup fifo + format */
1412 cx23885_sram_channel_setup(dev,
1413 &dev->sram_channels[port->sram_chno],
1414 port->ts_packet_size, buf->risc.dma);
1416 cx23885_sram_channel_dump(dev,
1417 &dev->sram_channels[port->sram_chno]);
1418 cx23885_risc_disasm(port, &buf->risc);
1421 /* write TS length to chip */
1422 cx_write(port->reg_lngth, port->ts_packet_size);
1424 if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1425 (!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1426 pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1428 cx23885_boards[dev->board].portb,
1429 cx23885_boards[dev->board].portc);
1433 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1434 cx23885_av_clk(dev, 0);
1438 /* If the port supports SRC SELECT, configure it */
1439 if (port->reg_src_sel)
1440 cx_write(port->reg_src_sel, port->src_sel_val);
1442 cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1443 cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1444 cx_write(port->reg_vld_misc, port->vld_misc_val);
1445 cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1448 /* NOTE: this is 2 (reserved) for portb, does it matter? */
1449 /* reset counter to zero */
1450 cx_write(port->reg_gpcnt_ctl, 3);
1453 /* Set VIDB pins to input */
1454 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1455 reg = cx_read(PAD_CTRL);
1456 reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1457 cx_write(PAD_CTRL, reg);
1460 /* Set VIDC pins to input */
1461 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1462 reg = cx_read(PAD_CTRL);
1463 reg &= ~0x4; /* Clear TS2_SOP_OE */
1464 cx_write(PAD_CTRL, reg);
1467 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1469 reg = cx_read(PAD_CTRL);
1470 reg = reg & ~0x1; /* Clear TS1_OE */
1472 /* FIXME, bit 2 writing here is questionable */
1473 /* set TS1_SOP_OE and TS1_OE_HI */
1475 cx_write(PAD_CTRL, reg);
1477 /* Sets MOE_CLK_DIS to disable MoE clock */
1478 /* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */
1479 cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1481 /* ALT_GPIO_ALT_SET: GPIO[0]
1482 * IR_ALT_TX_SEL: GPIO[1]
1483 * GPIO1_ALT_SEL: VIP_656_DATA[0]
1484 * GPIO0_ALT_SEL: VIP_656_CLK
1486 cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1489 switch (dev->bridge) {
1490 case CX23885_BRIDGE_885:
1491 case CX23885_BRIDGE_887:
1492 case CX23885_BRIDGE_888:
1494 dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1495 /* clear dma in progress */
1496 cx23885_clear_bridge_error(dev);
1497 cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
1498 cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1500 /* clear dma in progress */
1501 cx23885_clear_bridge_error(dev);
1502 cx23885_irq_add(dev, port->pci_irqmask);
1503 cx23885_irq_enable_all(dev);
1505 /* clear dma in progress */
1506 cx23885_clear_bridge_error(dev);
1512 cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1513 /* clear dma in progress */
1514 cx23885_clear_bridge_error(dev);
1516 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1517 cx23885_av_clk(dev, 1);
1520 cx23885_tsport_reg_dump(port);
1522 cx23885_irq_get_mask(dev);
1524 /* clear dma in progress */
1525 cx23885_clear_bridge_error(dev);
1530 static int cx23885_stop_dma(struct cx23885_tsport *port)
1532 struct cx23885_dev *dev = port->dev;
1538 dprintk(1, "%s()\n", __func__);
1540 /* Stop interrupts and DMA */
1541 cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1542 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1543 /* just in case wait for any dma to complete before allowing dealloc */
1545 for (delay = 0; delay < 100; delay++) {
1546 reg1_val = cx_read(TC_REQ);
1547 reg2_val = cx_read(TC_REQ_SET);
1548 if (reg1_val == 0 || reg2_val == 0)
1552 dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n",
1553 delay, reg1_val, reg2_val);
1555 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1556 reg = cx_read(PAD_CTRL);
1561 /* clear TS1_SOP_OE and TS1_OE_HI */
1563 cx_write(PAD_CTRL, reg);
1564 cx_write(port->reg_src_sel, 0);
1565 cx_write(port->reg_gen_ctrl, 8);
1568 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1569 cx23885_av_clk(dev, 0);
1574 /* ------------------------------------------------------------------ */
1576 int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1578 struct cx23885_dev *dev = port->dev;
1579 int size = port->ts_packet_size * port->ts_packet_count;
1580 struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
1582 dprintk(1, "%s: %p\n", __func__, buf);
1583 if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
1585 vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
1587 cx23885_risc_databuffer(dev->pci, &buf->risc,
1589 port->ts_packet_size, port->ts_packet_count, 0);
1594 * The risc program for each buffer works as follows: it starts with a simple
1595 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1596 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1597 * the initial JUMP).
1599 * This is the risc program of the first buffer to be queued if the active list
1600 * is empty and it just keeps DMAing this buffer without generating any
1603 * If a new buffer is added then the initial JUMP in the code for that buffer
1604 * will generate an interrupt which signals that the previous buffer has been
1605 * DMAed successfully and that it can be returned to userspace.
1607 * It also sets the final jump of the previous buffer to the start of the new
1608 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1609 * atomic u32 write, so there is no race condition.
1611 * The end-result of all this that you only get an interrupt when a buffer
1612 * is ready, so the control flow is very easy.
1614 void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1616 struct cx23885_buffer *prev;
1617 struct cx23885_dev *dev = port->dev;
1618 struct cx23885_dmaqueue *cx88q = &port->mpegq;
1619 unsigned long flags;
1621 buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1622 buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1623 buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1624 buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1626 spin_lock_irqsave(&dev->slock, flags);
1627 if (list_empty(&cx88q->active)) {
1628 list_add_tail(&buf->queue, &cx88q->active);
1629 dprintk(1, "[%p/%d] %s - first active\n",
1630 buf, buf->vb.vb2_buf.index, __func__);
1632 buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1633 prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1635 list_add_tail(&buf->queue, &cx88q->active);
1636 prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1637 dprintk(1, "[%p/%d] %s - append to active\n",
1638 buf, buf->vb.vb2_buf.index, __func__);
1640 spin_unlock_irqrestore(&dev->slock, flags);
1643 /* ----------------------------------------------------------- */
1645 static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1647 struct cx23885_dmaqueue *q = &port->mpegq;
1648 struct cx23885_buffer *buf;
1649 unsigned long flags;
1651 spin_lock_irqsave(&port->slock, flags);
1652 while (!list_empty(&q->active)) {
1653 buf = list_entry(q->active.next, struct cx23885_buffer,
1655 list_del(&buf->queue);
1656 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
1657 dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1658 buf, buf->vb.vb2_buf.index, reason,
1659 (unsigned long)buf->risc.dma);
1661 spin_unlock_irqrestore(&port->slock, flags);
1664 void cx23885_cancel_buffers(struct cx23885_tsport *port)
1666 dprintk(1, "%s()\n", __func__);
1667 cx23885_stop_dma(port);
1668 do_cancel_buffers(port, "cancel");
1671 int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1673 /* FIXME: port1 assumption here. */
1674 struct cx23885_tsport *port = &dev->ts1;
1681 count = cx_read(port->reg_gpcnt);
1682 dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
1683 status, cx_read(port->reg_ts_int_msk), count);
1685 if ((status & VID_B_MSK_BAD_PKT) ||
1686 (status & VID_B_MSK_OPC_ERR) ||
1687 (status & VID_B_MSK_VBI_OPC_ERR) ||
1688 (status & VID_B_MSK_SYNC) ||
1689 (status & VID_B_MSK_VBI_SYNC) ||
1690 (status & VID_B_MSK_OF) ||
1691 (status & VID_B_MSK_VBI_OF)) {
1692 pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
1694 if (status & VID_B_MSK_BAD_PKT)
1695 dprintk(1, " VID_B_MSK_BAD_PKT\n");
1696 if (status & VID_B_MSK_OPC_ERR)
1697 dprintk(1, " VID_B_MSK_OPC_ERR\n");
1698 if (status & VID_B_MSK_VBI_OPC_ERR)
1699 dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
1700 if (status & VID_B_MSK_SYNC)
1701 dprintk(1, " VID_B_MSK_SYNC\n");
1702 if (status & VID_B_MSK_VBI_SYNC)
1703 dprintk(1, " VID_B_MSK_VBI_SYNC\n");
1704 if (status & VID_B_MSK_OF)
1705 dprintk(1, " VID_B_MSK_OF\n");
1706 if (status & VID_B_MSK_VBI_OF)
1707 dprintk(1, " VID_B_MSK_VBI_OF\n");
1709 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1710 cx23885_sram_channel_dump(dev,
1711 &dev->sram_channels[port->sram_chno]);
1712 cx23885_417_check_encoder(dev);
1713 } else if (status & VID_B_MSK_RISCI1) {
1714 dprintk(7, " VID_B_MSK_RISCI1\n");
1715 spin_lock(&port->slock);
1716 cx23885_wakeup(port, &port->mpegq, count);
1717 spin_unlock(&port->slock);
1720 cx_write(port->reg_ts_int_stat, status);
1727 static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1729 struct cx23885_dev *dev = port->dev;
1733 if ((status & VID_BC_MSK_OPC_ERR) ||
1734 (status & VID_BC_MSK_BAD_PKT) ||
1735 (status & VID_BC_MSK_SYNC) ||
1736 (status & VID_BC_MSK_OF)) {
1738 if (status & VID_BC_MSK_OPC_ERR)
1739 dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1740 VID_BC_MSK_OPC_ERR);
1742 if (status & VID_BC_MSK_BAD_PKT)
1743 dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1744 VID_BC_MSK_BAD_PKT);
1746 if (status & VID_BC_MSK_SYNC)
1747 dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
1750 if (status & VID_BC_MSK_OF)
1751 dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
1754 pr_err("%s: mpeg risc op code error\n", dev->name);
1756 cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1757 cx23885_sram_channel_dump(dev,
1758 &dev->sram_channels[port->sram_chno]);
1760 } else if (status & VID_BC_MSK_RISCI1) {
1762 dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
1764 spin_lock(&port->slock);
1765 count = cx_read(port->reg_gpcnt);
1766 cx23885_wakeup(port, &port->mpegq, count);
1767 spin_unlock(&port->slock);
1771 cx_write(port->reg_ts_int_stat, status);
1778 static irqreturn_t cx23885_irq(int irq, void *dev_id)
1780 struct cx23885_dev *dev = dev_id;
1781 struct cx23885_tsport *ts1 = &dev->ts1;
1782 struct cx23885_tsport *ts2 = &dev->ts2;
1783 u32 pci_status, pci_mask;
1784 u32 vida_status, vida_mask;
1785 u32 audint_status, audint_mask;
1786 u32 ts1_status, ts1_mask;
1787 u32 ts2_status, ts2_mask;
1788 int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1789 int audint_count = 0;
1790 bool subdev_handled;
1792 pci_status = cx_read(PCI_INT_STAT);
1793 pci_mask = cx23885_irq_get_mask(dev);
1794 if ((pci_status & pci_mask) == 0) {
1795 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1796 pci_status, pci_mask);
1800 vida_status = cx_read(VID_A_INT_STAT);
1801 vida_mask = cx_read(VID_A_INT_MSK);
1802 audint_status = cx_read(AUDIO_INT_INT_STAT);
1803 audint_mask = cx_read(AUDIO_INT_INT_MSK);
1804 ts1_status = cx_read(VID_B_INT_STAT);
1805 ts1_mask = cx_read(VID_B_INT_MSK);
1806 ts2_status = cx_read(VID_C_INT_STAT);
1807 ts2_mask = cx_read(VID_C_INT_MSK);
1809 if (((pci_status & pci_mask) == 0) &&
1810 ((ts2_status & ts2_mask) == 0) &&
1811 ((ts1_status & ts1_mask) == 0))
1814 vida_count = cx_read(VID_A_GPCNT);
1815 audint_count = cx_read(AUD_INT_A_GPCNT);
1816 ts1_count = cx_read(ts1->reg_gpcnt);
1817 ts2_count = cx_read(ts2->reg_gpcnt);
1818 dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
1819 pci_status, pci_mask);
1820 dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1821 vida_status, vida_mask, vida_count);
1822 dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1823 audint_status, audint_mask, audint_count);
1824 dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
1825 ts1_status, ts1_mask, ts1_count);
1826 dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
1827 ts2_status, ts2_mask, ts2_count);
1829 if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1830 PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
1831 PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
1832 PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1833 PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
1834 PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1836 if (pci_status & PCI_MSK_RISC_RD)
1837 dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
1840 if (pci_status & PCI_MSK_RISC_WR)
1841 dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
1844 if (pci_status & PCI_MSK_AL_RD)
1845 dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
1848 if (pci_status & PCI_MSK_AL_WR)
1849 dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
1852 if (pci_status & PCI_MSK_APB_DMA)
1853 dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
1856 if (pci_status & PCI_MSK_VID_C)
1857 dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
1860 if (pci_status & PCI_MSK_VID_B)
1861 dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
1864 if (pci_status & PCI_MSK_VID_A)
1865 dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
1868 if (pci_status & PCI_MSK_AUD_INT)
1869 dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
1872 if (pci_status & PCI_MSK_AUD_EXT)
1873 dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
1876 if (pci_status & PCI_MSK_GPIO0)
1877 dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
1880 if (pci_status & PCI_MSK_GPIO1)
1881 dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
1884 if (pci_status & PCI_MSK_AV_CORE)
1885 dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
1888 if (pci_status & PCI_MSK_IR)
1889 dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
1893 if (cx23885_boards[dev->board].ci_type == 1 &&
1894 (pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1895 handled += netup_ci_slot_status(dev, pci_status);
1897 if (cx23885_boards[dev->board].ci_type == 2 &&
1898 (pci_status & PCI_MSK_GPIO0))
1899 handled += altera_ci_irq(dev);
1902 if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1903 handled += cx23885_irq_ts(ts1, ts1_status);
1905 if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1906 handled += cx23885_irq_417(dev, ts1_status);
1910 if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1911 handled += cx23885_irq_ts(ts2, ts2_status);
1913 if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1914 handled += cx23885_irq_417(dev, ts2_status);
1918 handled += cx23885_video_irq(dev, vida_status);
1921 handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1923 if (pci_status & PCI_MSK_IR) {
1924 subdev_handled = false;
1925 v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1926 pci_status, &subdev_handled);
1931 if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1932 cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1933 schedule_work(&dev->cx25840_work);
1938 cx_write(PCI_INT_STAT, pci_status & pci_mask);
1940 return IRQ_RETVAL(handled);
1943 static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1944 unsigned int notification, void *arg)
1946 struct cx23885_dev *dev;
1951 dev = to_cx23885(sd->v4l2_dev);
1953 switch (notification) {
1954 case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1955 if (sd == dev->sd_ir)
1956 cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1958 case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1959 if (sd == dev->sd_ir)
1960 cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1965 static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1967 INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1968 INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1969 INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1970 dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1973 static inline int encoder_on_portb(struct cx23885_dev *dev)
1975 return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1978 static inline int encoder_on_portc(struct cx23885_dev *dev)
1980 return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1983 /* Mask represents 32 different GPIOs, GPIO's are split into multiple
1984 * registers depending on the board configuration (and whether the
1985 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1986 * be pushed into the correct hardware register, regardless of the
1987 * physical location. Certain registers are shared so we sanity check
1988 * and report errors if we think we're tampering with a GPIo that might
1989 * be assigned to the encoder (and used for the host bus).
1991 * GPIO 2 through 0 - On the cx23885 bridge
1992 * GPIO 18 through 3 - On the cx23417 host bus interface
1993 * GPIO 23 through 19 - On the cx25840 a/v core
1995 void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1998 cx_set(GP0_IO, mask & 0x7);
2000 if (mask & 0x0007fff8) {
2001 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2002 pr_err("%s: Setting GPIO on encoder ports\n",
2004 cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
2008 if (mask & 0x00f80000)
2009 pr_info("%s: Unsupported\n", dev->name);
2012 void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
2014 if (mask & 0x00000007)
2015 cx_clear(GP0_IO, mask & 0x7);
2017 if (mask & 0x0007fff8) {
2018 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2019 pr_err("%s: Clearing GPIO moving on encoder ports\n",
2021 cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
2025 if (mask & 0x00f80000)
2026 pr_info("%s: Unsupported\n", dev->name);
2029 u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
2031 if (mask & 0x00000007)
2032 return (cx_read(GP0_IO) >> 8) & mask & 0x7;
2034 if (mask & 0x0007fff8) {
2035 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2036 pr_err("%s: Reading GPIO moving on encoder ports\n",
2038 return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
2042 if (mask & 0x00f80000)
2043 pr_info("%s: Unsupported\n", dev->name);
2048 void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
2050 if ((mask & 0x00000007) && asoutput)
2051 cx_set(GP0_IO, (mask & 0x7) << 16);
2052 else if ((mask & 0x00000007) && !asoutput)
2053 cx_clear(GP0_IO, (mask & 0x7) << 16);
2055 if (mask & 0x0007fff8) {
2056 if (encoder_on_portb(dev) || encoder_on_portc(dev))
2057 pr_err("%s: Enabling GPIO on encoder ports\n",
2061 /* MC417_OEN is active low for output, write 1 for an input */
2062 if ((mask & 0x0007fff8) && asoutput)
2063 cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
2065 else if ((mask & 0x0007fff8) && !asoutput)
2066 cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
2073 } const broken_dev_id[] = {
2075 * https://openbenchmarking.org/system/1703021-RI-AMDZEN08075/Ryzen%207%201800X/lspci,
2076 * 0x1451 is PCI ID for the IOMMU found on Ryzen
2078 { PCI_VENDOR_ID_AMD, 0x1451 },
2079 /* According to sudo lspci -nn,
2080 * 0x1423 is the PCI ID for the IOMMU found on Kaveri
2082 { PCI_VENDOR_ID_AMD, 0x1423 },
2083 /* 0x1481 is the PCI ID for the IOMMU found on Starship/Matisse
2085 { PCI_VENDOR_ID_AMD, 0x1481 },
2086 /* 0x1419 is the PCI ID for the IOMMU found on 15h (Models 10h-1fh) family
2088 { PCI_VENDOR_ID_AMD, 0x1419 },
2089 /* 0x5a23 is the PCI ID for the IOMMU found on RD890S/RD990
2091 { PCI_VENDOR_ID_ATI, 0x5a23 },
2094 static bool cx23885_does_need_dma_reset(void)
2097 struct pci_dev *pdev = NULL;
2099 if (dma_reset_workaround == 0)
2101 else if (dma_reset_workaround == 2)
2104 for (i = 0; i < ARRAY_SIZE(broken_dev_id); i++) {
2105 pdev = pci_get_device(broken_dev_id[i].vendor,
2106 broken_dev_id[i].dev, NULL);
2115 static int cx23885_initdev(struct pci_dev *pci_dev,
2116 const struct pci_device_id *pci_id)
2118 struct cx23885_dev *dev;
2119 struct v4l2_ctrl_handler *hdl;
2122 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2126 dev->need_dma_reset = cx23885_does_need_dma_reset();
2128 err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
2132 hdl = &dev->ctrl_handler;
2133 v4l2_ctrl_handler_init(hdl, 6);
2138 dev->v4l2_dev.ctrl_handler = hdl;
2140 /* Prepare to handle notifications from subdevices */
2141 cx23885_v4l2_dev_notify_init(dev);
2145 if (pci_enable_device(pci_dev)) {
2150 if (cx23885_dev_setup(dev) < 0) {
2155 /* print pci info */
2156 dev->pci_rev = pci_dev->revision;
2157 pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
2158 pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
2160 pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
2162 (unsigned long long)pci_resource_start(pci_dev, 0));
2164 pci_set_master(pci_dev);
2165 err = dma_set_mask(&pci_dev->dev, 0xffffffff);
2167 pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
2168 goto fail_dma_set_mask;
2171 err = request_irq(pci_dev->irq, cx23885_irq,
2172 IRQF_SHARED, dev->name, dev);
2174 pr_err("%s: can't get IRQ %d\n",
2175 dev->name, pci_dev->irq);
2176 goto fail_dma_set_mask;
2179 switch (dev->board) {
2180 case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2181 cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2183 case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2184 cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2189 * The CX2388[58] IR controller can start firing interrupts when
2190 * enabled, so these have to take place after the cx23885_irq() handler
2191 * is hooked up by the call to request_irq() above.
2193 cx23885_ir_pci_int_enable(dev);
2194 cx23885_input_init(dev);
2199 cx23885_dev_unregister(dev);
2201 v4l2_ctrl_handler_free(hdl);
2202 v4l2_device_unregister(&dev->v4l2_dev);
2208 static void cx23885_finidev(struct pci_dev *pci_dev)
2210 struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2211 struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2213 cx23885_input_fini(dev);
2214 cx23885_ir_fini(dev);
2216 cx23885_shutdown(dev);
2218 /* unregister stuff */
2219 free_irq(pci_dev->irq, dev);
2221 pci_disable_device(pci_dev);
2223 cx23885_dev_unregister(dev);
2224 v4l2_ctrl_handler_free(&dev->ctrl_handler);
2225 v4l2_device_unregister(v4l2_dev);
2229 static const struct pci_device_id cx23885_pci_tbl[] = {
2234 .subvendor = PCI_ANY_ID,
2235 .subdevice = PCI_ANY_ID,
2240 .subvendor = PCI_ANY_ID,
2241 .subdevice = PCI_ANY_ID,
2243 /* --- end of list --- */
2246 MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2248 static struct pci_driver cx23885_pci_driver = {
2250 .id_table = cx23885_pci_tbl,
2251 .probe = cx23885_initdev,
2252 .remove = cx23885_finidev,
2255 static int __init cx23885_init(void)
2257 pr_info("cx23885 driver version %s loaded\n",
2259 return pci_register_driver(&cx23885_pci_driver);
2262 static void __exit cx23885_fini(void)
2264 pci_unregister_driver(&cx23885_pci_driver);
2267 module_init(cx23885_init);
2268 module_exit(cx23885_fini);