1 // SPDX-License-Identifier: GPL-2.0-only
5 * Author Karsten Keil <keil@isdn4linux.de>
7 * Copyright 2009 by Karsten Keil <keil@isdn4linux.de>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/pci.h>
13 #include <linux/delay.h>
14 #include <linux/mISDNhw.h>
15 #include <linux/slab.h>
21 #define NETJET_REV "2.0"
49 struct isdnhdlc_vars hsend;
50 struct isdnhdlc_vars hrecv;
55 #define TX_INIT 0x0001
56 #define TX_IDLE 0x0002
58 #define TX_UNDERRUN 0x0100
59 #define RX_OVERRUN 0x0100
64 struct list_head list;
66 char name[MISDN_MAX_IDLEN];
74 spinlock_t lock; /* lock HW */
76 struct tiger_dma send;
77 struct tiger_dma recv;
78 struct tiger_ch bc[2];
87 static LIST_HEAD(Cards);
88 static DEFINE_RWLOCK(card_lock); /* protect Cards */
93 _set_debug(struct tiger_hw *card)
95 card->isac.dch.debug = debug;
96 card->bc[0].bch.debug = debug;
97 card->bc[1].bch.debug = debug;
101 set_debug(const char *val, const struct kernel_param *kp)
104 struct tiger_hw *card;
106 ret = param_set_uint(val, kp);
108 read_lock(&card_lock);
109 list_for_each_entry(card, &Cards, list)
111 read_unlock(&card_lock);
116 MODULE_AUTHOR("Karsten Keil");
117 MODULE_LICENSE("GPL v2");
118 MODULE_VERSION(NETJET_REV);
119 module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
120 MODULE_PARM_DESC(debug, "Netjet debug mask");
123 nj_disable_hwirq(struct tiger_hw *card)
125 outb(0, card->base + NJ_IRQMASK0);
126 outb(0, card->base + NJ_IRQMASK1);
131 ReadISAC_nj(void *p, u8 offset)
133 struct tiger_hw *card = p;
137 card->auxd |= (offset >> 4) & 3;
138 outb(card->auxd, card->base + NJ_AUXDATA);
139 ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
144 WriteISAC_nj(void *p, u8 offset, u8 value)
146 struct tiger_hw *card = p;
149 card->auxd |= (offset >> 4) & 3;
150 outb(card->auxd, card->base + NJ_AUXDATA);
151 outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
155 ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
157 struct tiger_hw *card = p;
160 outb(card->auxd, card->base + NJ_AUXDATA);
161 insb(card->base + NJ_ISAC_OFF, data, size);
165 WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
167 struct tiger_hw *card = p;
170 outb(card->auxd, card->base + NJ_AUXDATA);
171 outsb(card->base + NJ_ISAC_OFF, data, size);
175 fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
177 struct tiger_hw *card = bc->bch.hw;
178 u32 mask = 0xff, val;
180 pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
181 bc->bch.nr, fill, cnt, idx, card->send.idx);
182 if (bc->bch.nr & 2) {
188 val = card->send.start[idx];
191 card->send.start[idx++] = val;
192 if (idx >= card->send.size)
198 mode_tiger(struct tiger_ch *bc, u32 protocol)
200 struct tiger_hw *card = bc->bch.hw;
202 pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
203 bc->bch.nr, bc->bch.state, protocol);
206 if (bc->bch.state == ISDN_P_NONE)
208 fill_mem(bc, 0, card->send.size, 0xff);
209 bc->bch.state = protocol;
210 /* only stop dma and interrupts if both channels NULL */
211 if ((card->bc[0].bch.state == ISDN_P_NONE) &&
212 (card->bc[1].bch.state == ISDN_P_NONE)) {
214 outb(card->dmactrl, card->base + NJ_DMACTRL);
215 outb(0, card->base + NJ_IRQMASK0);
217 test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
218 test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
224 test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
225 bc->bch.state = protocol;
227 bc->free = card->send.size / 2;
229 bc->txstate = TX_INIT | TX_IDLE;
231 if (!card->dmactrl) {
233 outb(card->dmactrl, card->base + NJ_DMACTRL);
234 outb(0x0f, card->base + NJ_IRQMASK0);
238 test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
239 bc->bch.state = protocol;
241 bc->free = card->send.size / 2;
243 bc->txstate = TX_INIT | TX_IDLE;
244 isdnhdlc_rcv_init(&bc->hrecv, 0);
245 isdnhdlc_out_init(&bc->hsend, 0);
247 if (!card->dmactrl) {
249 outb(card->dmactrl, card->base + NJ_DMACTRL);
250 outb(0x0f, card->base + NJ_IRQMASK0);
254 pr_info("%s: %s protocol %x not handled\n", card->name,
258 card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
259 card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
260 card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
261 card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
262 pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n",
263 card->name, __func__,
264 inb(card->base + NJ_DMACTRL),
265 inb(card->base + NJ_IRQMASK0),
266 inb(card->base + NJ_IRQSTAT0),
273 nj_reset(struct tiger_hw *card)
275 outb(0xff, card->base + NJ_CTRL); /* Reset On */
278 /* now edge triggered for TJ320 GE 13/07/00 */
279 /* see comment in IRQ function */
280 if (card->typ == NETJET_S_TJ320) /* TJ320 */
281 card->ctrlreg = 0x40; /* Reset Off and status read clear */
283 card->ctrlreg = 0x00; /* Reset Off and status read clear */
284 outb(card->ctrlreg, card->base + NJ_CTRL);
287 /* configure AUX pins (all output except ISAC IRQ pin) */
290 outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
291 outb(NJ_ISACIRQ, card->base + NJ_IRQMASK1);
292 outb(card->auxd, card->base + NJ_AUXDATA);
296 inittiger(struct tiger_hw *card)
300 card->dma_p = dma_alloc_coherent(&card->pdev->dev, NJ_DMA_SIZE,
301 &card->dma, GFP_ATOMIC);
303 pr_info("%s: No DMA memory\n", card->name);
306 if ((u64)card->dma > 0xffffffff) {
307 pr_info("%s: DMA outside 32 bit\n", card->name);
310 for (i = 0; i < 2; i++) {
311 card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
312 if (!card->bc[i].hsbuf) {
313 pr_info("%s: no B%d send buffer\n", card->name, i + 1);
316 card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
317 if (!card->bc[i].hrbuf) {
318 pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
322 memset(card->dma_p, 0xff, NJ_DMA_SIZE);
324 card->send.start = card->dma_p;
325 card->send.dmastart = (u32)card->dma;
326 card->send.dmaend = card->send.dmastart +
327 (4 * (NJ_DMA_TXSIZE - 1));
328 card->send.dmairq = card->send.dmastart +
329 (4 * ((NJ_DMA_TXSIZE / 2) - 1));
330 card->send.size = NJ_DMA_TXSIZE;
332 if (debug & DEBUG_HW)
333 pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p"
334 " size %zu u32\n", card->name,
335 card->send.dmastart, card->send.dmairq,
336 card->send.dmaend, card->send.start, card->send.size);
338 outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
339 outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
340 outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
342 card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
343 card->recv.dmastart = (u32)card->dma + (NJ_DMA_SIZE / 2);
344 card->recv.dmaend = card->recv.dmastart +
345 (4 * (NJ_DMA_RXSIZE - 1));
346 card->recv.dmairq = card->recv.dmastart +
347 (4 * ((NJ_DMA_RXSIZE / 2) - 1));
348 card->recv.size = NJ_DMA_RXSIZE;
350 if (debug & DEBUG_HW)
351 pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p"
352 " size %zu u32\n", card->name,
353 card->recv.dmastart, card->recv.dmairq,
354 card->recv.dmaend, card->recv.start, card->recv.size);
356 outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
357 outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
358 outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
363 read_dma(struct tiger_ch *bc, u32 idx, int cnt)
365 struct tiger_hw *card = bc->bch.hw;
370 if (bc->lastrx == idx) {
371 bc->rxstate |= RX_OVERRUN;
372 pr_info("%s: B%1d overrun at idx %d\n", card->name,
376 if (test_bit(FLG_RX_OFF, &bc->bch.Flags)) {
377 bc->bch.dropcnt += cnt;
380 stat = bchannel_get_rxbuf(&bc->bch, cnt);
381 /* only transparent use the count here, HDLC overun is detected later */
382 if (stat == -ENOMEM) {
383 pr_warn("%s.B%d: No memory for %d bytes\n",
384 card->name, bc->bch.nr, cnt);
387 if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
388 p = skb_put(bc->bch.rx_skb, cnt);
392 for (i = 0; i < cnt; i++) {
393 val = card->recv.start[idx++];
396 if (idx >= card->recv.size)
401 if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
402 recv_Bchannel(&bc->bch, 0, false);
408 stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
409 bc->bch.rx_skb->data, bc->bch.maxlen);
410 if (stat > 0) { /* valid frame received */
411 p = skb_put(bc->bch.rx_skb, stat);
412 if (debug & DEBUG_HW_BFIFO) {
413 snprintf(card->log, LOG_SIZE,
414 "B%1d-recv %s %d ", bc->bch.nr,
416 print_hex_dump_bytes(card->log,
417 DUMP_PREFIX_OFFSET, p,
420 recv_Bchannel(&bc->bch, 0, false);
421 stat = bchannel_get_rxbuf(&bc->bch, bc->bch.maxlen);
423 pr_warn("%s.B%d: No memory for %d bytes\n",
424 card->name, bc->bch.nr, cnt);
427 } else if (stat == -HDLC_CRC_ERROR) {
428 pr_info("%s: B%1d receive frame CRC error\n",
429 card->name, bc->bch.nr);
430 } else if (stat == -HDLC_FRAMING_ERROR) {
431 pr_info("%s: B%1d receive framing error\n",
432 card->name, bc->bch.nr);
433 } else if (stat == -HDLC_LENGTH_ERROR) {
434 pr_info("%s: B%1d receive frame too long (> %d)\n",
435 card->name, bc->bch.nr, bc->bch.maxlen);
443 recv_tiger(struct tiger_hw *card, u8 irq_stat)
446 int cnt = card->recv.size / 2;
448 /* Note receive is via the WRITE DMA channel */
449 card->last_is0 &= ~NJ_IRQM0_WR_MASK;
450 card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
452 if (irq_stat & NJ_IRQM0_WR_END)
455 idx = card->recv.size - 1;
457 if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
458 read_dma(&card->bc[0], idx, cnt);
459 if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
460 read_dma(&card->bc[1], idx, cnt);
463 /* sync with current DMA address at start or after exception */
465 resync(struct tiger_ch *bc, struct tiger_hw *card)
467 card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
468 card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
469 if (bc->free > card->send.size / 2)
470 bc->free = card->send.size / 2;
471 /* currently we simple sync to the next complete free area
472 * this hast the advantage that we have always maximum time to
475 if (card->send.idx < ((card->send.size / 2) - 1))
476 bc->idx = (card->recv.size / 2) - 1;
478 bc->idx = card->recv.size - 1;
479 bc->txstate = TX_RUN;
480 pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
481 __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
484 static int bc_next_frame(struct tiger_ch *);
487 fill_hdlc_flag(struct tiger_ch *bc)
489 struct tiger_hw *card = bc->bch.hw;
496 pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
497 __func__, bc->bch.nr, bc->free, bc->txstate,
498 bc->idx, card->send.idx);
499 if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
501 count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
502 bc->hsbuf, bc->free);
503 pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
507 m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
508 for (i = 0; i < count; i++) {
509 if (bc->idx >= card->send.size)
511 v = card->send.start[bc->idx];
513 v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
514 card->send.start[bc->idx++] = v;
516 if (debug & DEBUG_HW_BFIFO) {
517 snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
518 bc->bch.nr, card->name, count);
519 print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
524 fill_dma(struct tiger_ch *bc)
526 struct tiger_hw *card = bc->bch.hw;
527 int count, i, fillempty = 0;
533 if (!bc->bch.tx_skb) {
534 if (!test_bit(FLG_TX_EMPTY, &bc->bch.Flags))
537 count = card->send.size >> 1;
540 count = bc->bch.tx_skb->len - bc->bch.tx_idx;
543 pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n",
544 card->name, __func__, bc->bch.nr, count, bc->free,
545 bc->bch.tx_idx, bc->bch.tx_skb->len, bc->txstate,
546 bc->idx, card->send.idx);
547 p = bc->bch.tx_skb->data + bc->bch.tx_idx;
549 if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
551 if (test_bit(FLG_HDLC, &bc->bch.Flags) && !fillempty) {
552 count = isdnhdlc_encode(&bc->hsend, p, count, &i,
553 bc->hsbuf, bc->free);
554 pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
555 bc->bch.nr, i, count);
560 if (count > bc->free)
563 bc->bch.tx_idx += count;
566 m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
569 if (!(bc->bch.nr & 1))
571 for (i = 0; i < count; i++) {
572 if (bc->idx >= card->send.size)
574 v = card->send.start[bc->idx];
577 card->send.start[bc->idx++] = v;
580 for (i = 0; i < count; i++) {
581 if (bc->idx >= card->send.size)
583 v = card->send.start[bc->idx];
586 v |= (bc->bch.nr & 1) ? n : n << 8;
587 card->send.start[bc->idx++] = v;
590 if (debug & DEBUG_HW_BFIFO) {
591 snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
592 bc->bch.nr, card->name, count);
593 print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
601 bc_next_frame(struct tiger_ch *bc)
605 if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len) {
608 dev_kfree_skb(bc->bch.tx_skb);
609 if (get_next_bframe(&bc->bch)) {
611 test_and_clear_bit(FLG_TX_EMPTY, &bc->bch.Flags);
612 } else if (test_bit(FLG_TX_EMPTY, &bc->bch.Flags)) {
614 } else if (test_bit(FLG_FILLEMPTY, &bc->bch.Flags)) {
615 test_and_set_bit(FLG_TX_EMPTY, &bc->bch.Flags);
625 send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
629 bc->free += card->send.size / 2;
630 if (bc->free >= card->send.size) {
631 if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
632 pr_info("%s: B%1d TX underrun state %x\n", card->name,
633 bc->bch.nr, bc->txstate);
634 bc->txstate |= TX_UNDERRUN;
636 bc->free = card->send.size;
638 ret = bc_next_frame(bc);
640 if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
644 pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
645 bc->bch.nr, bc->free, bc->idx, card->send.idx);
646 if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
647 fill_mem(bc, bc->idx, bc->free, 0xff);
648 if (bc->free == card->send.size)
649 bc->txstate |= TX_IDLE;
655 send_tiger(struct tiger_hw *card, u8 irq_stat)
659 /* Note send is via the READ DMA channel */
660 if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
661 pr_info("%s: tiger warn write double dma %x/%x\n",
662 card->name, irq_stat, card->last_is0);
665 card->last_is0 &= ~NJ_IRQM0_RD_MASK;
666 card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
668 for (i = 0; i < 2; i++) {
669 if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
670 send_tiger_bc(card, &card->bc[i]);
675 nj_irq(int intno, void *dev_id)
677 struct tiger_hw *card = dev_id;
678 u8 val, s1val, s0val;
680 spin_lock(&card->lock);
681 s0val = inb(card->base | NJ_IRQSTAT0);
682 s1val = inb(card->base | NJ_IRQSTAT1);
683 if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
685 spin_unlock(&card->lock);
688 pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
690 if (!(s1val & NJ_ISACIRQ)) {
691 val = ReadISAC_nj(card, ISAC_ISTA);
693 mISDNisac_irq(&card->isac, val);
698 outb(s0val, card->base | NJ_IRQSTAT0);
702 /* set bits in sval to indicate which page is free */
703 card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
704 card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
705 if (card->recv.dmacur < card->recv.dmairq)
706 s0val = 0x08; /* the 2nd write area is free */
708 s0val = 0x04; /* the 1st write area is free */
710 card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
711 card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
712 if (card->send.dmacur < card->send.dmairq)
713 s0val |= 0x02; /* the 2nd read area is free */
715 s0val |= 0x01; /* the 1st read area is free */
717 pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
718 s1val, s0val, card->last_is0,
719 card->recv.idx, card->send.idx);
720 /* test if we have a DMA interrupt */
721 if (s0val != card->last_is0) {
722 if ((s0val & NJ_IRQM0_RD_MASK) !=
723 (card->last_is0 & NJ_IRQM0_RD_MASK))
724 /* got a write dma int */
725 send_tiger(card, s0val);
726 if ((s0val & NJ_IRQM0_WR_MASK) !=
727 (card->last_is0 & NJ_IRQM0_WR_MASK))
728 /* got a read dma int */
729 recv_tiger(card, s0val);
732 spin_unlock(&card->lock);
737 nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
740 struct bchannel *bch = container_of(ch, struct bchannel, ch);
741 struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
742 struct tiger_hw *card = bch->hw;
743 struct mISDNhead *hh = mISDN_HEAD_P(skb);
748 spin_lock_irqsave(&card->lock, flags);
749 ret = bchannel_senddata(bch, skb);
750 if (ret > 0) { /* direct TX */
754 spin_unlock_irqrestore(&card->lock, flags);
756 case PH_ACTIVATE_REQ:
757 spin_lock_irqsave(&card->lock, flags);
758 if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
759 ret = mode_tiger(bc, ch->protocol);
762 spin_unlock_irqrestore(&card->lock, flags);
764 _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
767 case PH_DEACTIVATE_REQ:
768 spin_lock_irqsave(&card->lock, flags);
769 mISDN_clear_bchannel(bch);
770 mode_tiger(bc, ISDN_P_NONE);
771 spin_unlock_irqrestore(&card->lock, flags);
772 _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
783 channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
785 return mISDN_ctrl_bchannel(&bc->bch, cq);
789 nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
791 struct bchannel *bch = container_of(ch, struct bchannel, ch);
792 struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
793 struct tiger_hw *card = bch->hw;
797 pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
800 test_and_clear_bit(FLG_OPEN, &bch->Flags);
801 cancel_work_sync(&bch->workq);
802 spin_lock_irqsave(&card->lock, flags);
803 mISDN_clear_bchannel(bch);
804 mode_tiger(bc, ISDN_P_NONE);
805 spin_unlock_irqrestore(&card->lock, flags);
806 ch->protocol = ISDN_P_NONE;
808 module_put(THIS_MODULE);
811 case CONTROL_CHANNEL:
812 ret = channel_bctrl(bc, arg);
815 pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
821 channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
826 case MISDN_CTRL_GETOP:
827 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
829 case MISDN_CTRL_LOOP:
830 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
831 if (cq->channel < 0 || cq->channel > 3) {
835 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
837 case MISDN_CTRL_L1_TIMER3:
838 ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
841 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
849 open_bchannel(struct tiger_hw *card, struct channel_req *rq)
851 struct bchannel *bch;
853 if (rq->adr.channel == 0 || rq->adr.channel > 2)
855 if (rq->protocol == ISDN_P_NONE)
857 bch = &card->bc[rq->adr.channel - 1].bch;
858 if (test_and_set_bit(FLG_OPEN, &bch->Flags))
859 return -EBUSY; /* b-channel can be only open once */
860 test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
861 bch->ch.protocol = rq->protocol;
867 * device control function
870 nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
872 struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
873 struct dchannel *dch = container_of(dev, struct dchannel, dev);
874 struct tiger_hw *card = dch->hw;
875 struct channel_req *rq;
878 pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
882 if (rq->protocol == ISDN_P_TE_S0)
883 err = card->isac.open(&card->isac, rq);
885 err = open_bchannel(card, rq);
888 if (!try_module_get(THIS_MODULE))
889 pr_info("%s: cannot get module\n", card->name);
892 pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
893 __builtin_return_address(0));
894 module_put(THIS_MODULE);
896 case CONTROL_CHANNEL:
897 err = channel_ctrl(card, arg);
900 pr_debug("%s: %s unknown command %x\n",
901 card->name, __func__, cmd);
908 nj_init_card(struct tiger_hw *card)
913 spin_lock_irqsave(&card->lock, flags);
914 nj_disable_hwirq(card);
915 spin_unlock_irqrestore(&card->lock, flags);
917 card->irq = card->pdev->irq;
918 if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
919 pr_info("%s: couldn't get interrupt %d\n",
920 card->name, card->irq);
925 spin_lock_irqsave(&card->lock, flags);
927 ret = card->isac.init(&card->isac);
930 ret = inittiger(card);
933 mode_tiger(&card->bc[0], ISDN_P_NONE);
934 mode_tiger(&card->bc[1], ISDN_P_NONE);
936 spin_unlock_irqrestore(&card->lock, flags);
942 nj_release(struct tiger_hw *card)
948 spin_lock_irqsave(&card->lock, flags);
949 nj_disable_hwirq(card);
950 mode_tiger(&card->bc[0], ISDN_P_NONE);
951 mode_tiger(&card->bc[1], ISDN_P_NONE);
952 spin_unlock_irqrestore(&card->lock, flags);
953 card->isac.release(&card->isac);
954 release_region(card->base, card->base_s);
958 free_irq(card->irq, card);
959 if (device_is_registered(&card->isac.dch.dev.dev))
960 mISDN_unregister_device(&card->isac.dch.dev);
962 for (i = 0; i < 2; i++) {
963 mISDN_freebchannel(&card->bc[i].bch);
964 kfree(card->bc[i].hsbuf);
965 kfree(card->bc[i].hrbuf);
968 dma_free_coherent(&card->pdev->dev, NJ_DMA_SIZE, card->dma_p,
970 write_lock_irqsave(&card_lock, flags);
971 list_del(&card->list);
972 write_unlock_irqrestore(&card_lock, flags);
973 pci_disable_device(card->pdev);
974 pci_set_drvdata(card->pdev, NULL);
980 nj_setup(struct tiger_hw *card)
982 card->base = pci_resource_start(card->pdev, 0);
983 card->base_s = pci_resource_len(card->pdev, 0);
984 if (!request_region(card->base, card->base_s, card->name)) {
985 pr_info("%s: NETjet config port %#x-%#x already in use\n",
986 card->name, card->base,
987 (u32)(card->base + card->base_s - 1));
991 ASSIGN_FUNC(nj, ISAC, card->isac);
997 setup_instance(struct tiger_hw *card)
1002 snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
1003 write_lock_irqsave(&card_lock, flags);
1004 list_add_tail(&card->list, &Cards);
1005 write_unlock_irqrestore(&card_lock, flags);
1008 card->isac.name = card->name;
1009 spin_lock_init(&card->lock);
1010 card->isac.hwlock = &card->lock;
1011 mISDNisac_init(&card->isac, card);
1013 card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
1014 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
1015 card->isac.dch.dev.D.ctrl = nj_dctrl;
1016 for (i = 0; i < 2; i++) {
1017 card->bc[i].bch.nr = i + 1;
1018 set_channelmap(i + 1, card->isac.dch.dev.channelmap);
1019 mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM,
1020 NJ_DMA_RXSIZE >> 1);
1021 card->bc[i].bch.hw = card;
1022 card->bc[i].bch.ch.send = nj_l2l1B;
1023 card->bc[i].bch.ch.ctrl = nj_bctrl;
1024 card->bc[i].bch.ch.nr = i + 1;
1025 list_add(&card->bc[i].bch.ch.list,
1026 &card->isac.dch.dev.bchannels);
1027 card->bc[i].bch.hw = card;
1029 err = nj_setup(card);
1032 err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
1036 err = nj_init_card(card);
1039 pr_notice("Netjet %d cards installed\n", nj_cnt);
1048 nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1052 struct tiger_hw *card;
1054 if (pdev->subsystem_vendor == 0x8086 &&
1055 pdev->subsystem_device == 0x0003) {
1056 pr_notice("Netjet: Digium X100P/X101P not handled\n");
1060 if (pdev->subsystem_vendor == 0x55 &&
1061 pdev->subsystem_device == 0x02) {
1062 pr_notice("Netjet: Enter!Now not handled yet\n");
1066 if (pdev->subsystem_vendor == 0xb100 &&
1067 pdev->subsystem_device == 0x0003) {
1068 pr_notice("Netjet: Digium TDM400P not handled yet\n");
1072 card = kzalloc(sizeof(struct tiger_hw), GFP_KERNEL);
1074 pr_info("No kmem for Netjet\n");
1080 err = pci_enable_device(pdev);
1086 printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
1089 pci_set_master(pdev);
1091 /* the TJ300 and TJ320 must be detected, the IRQ handling is different
1092 * unfortunately the chips use the same device ID, but the TJ320 has
1093 * the bit20 in status PCI cfg register set
1095 pci_read_config_dword(pdev, 0x04, &cfg);
1096 if (cfg & 0x00100000)
1097 card->typ = NETJET_S_TJ320;
1099 card->typ = NETJET_S_TJ300;
1101 card->base = pci_resource_start(pdev, 0);
1102 pci_set_drvdata(pdev, card);
1103 err = setup_instance(card);
1105 pci_set_drvdata(pdev, NULL);
1111 static void nj_remove(struct pci_dev *pdev)
1113 struct tiger_hw *card = pci_get_drvdata(pdev);
1118 pr_info("%s drvdata already removed\n", __func__);
1121 /* We cannot select cards with PCI_SUB... IDs, since here are cards with
1122 * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
1123 * known other cards which not work with this driver - see probe function */
1124 static const struct pci_device_id nj_pci_ids[] = {
1125 { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
1126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
1129 MODULE_DEVICE_TABLE(pci, nj_pci_ids);
1131 static struct pci_driver nj_driver = {
1134 .remove = nj_remove,
1135 .id_table = nj_pci_ids,
1138 static int __init nj_init(void)
1142 pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
1143 err = pci_register_driver(&nj_driver);
1147 static void __exit nj_cleanup(void)
1149 pci_unregister_driver(&nj_driver);
1152 module_init(nj_init);
1153 module_exit(nj_cleanup);