2 * Thunderbolt Cactus Ridge driver - NHI driver
4 * The NHI (native host interface) is the pci device that allows us to send and
5 * receive frames from the thunderbolt bus.
7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
10 #include <linux/pm_runtime.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/pci.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/delay.h>
22 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
25 * Minimal number of vectors when we use MSI-X. Two for control channel
26 * Rx/Tx and the rest four are for cross domain DMA paths.
28 #define MSIX_MIN_VECS 6
29 #define MSIX_MAX_VECS 16
31 #define NHI_MAILBOX_TIMEOUT 500 /* ms */
33 static int ring_interrupt_index(const struct tb_ring *ring)
37 bit += ring->nhi->hop_count;
42 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
44 * ring->nhi->lock must be held.
46 static void ring_interrupt_active(struct tb_ring *ring, bool active)
48 int reg = REG_RING_INTERRUPT_BASE +
49 ring_interrupt_index(ring) / 32 * 4;
50 int bit = ring_interrupt_index(ring) & 31;
55 u32 step, shift, ivr, misc;
56 void __iomem *ivr_base;
62 index = ring->hop + ring->nhi->hop_count;
65 * Ask the hardware to clear interrupt status bits automatically
66 * since we already know which interrupt was triggered.
68 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
69 if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
70 misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
71 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
74 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
75 step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
76 shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
77 ivr = ioread32(ivr_base + step);
78 ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
80 ivr |= ring->vector << shift;
81 iowrite32(ivr, ivr_base + step);
84 old = ioread32(ring->nhi->iobase + reg);
90 dev_info(&ring->nhi->pdev->dev,
91 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
92 active ? "enabling" : "disabling", reg, bit, old, new);
95 dev_WARN(&ring->nhi->pdev->dev,
96 "interrupt for %s %d is already %s\n",
97 RING_TYPE(ring), ring->hop,
98 active ? "enabled" : "disabled");
99 iowrite32(new, ring->nhi->iobase + reg);
103 * nhi_disable_interrupts() - disable interrupts for all rings
105 * Use only during init and shutdown.
107 static void nhi_disable_interrupts(struct tb_nhi *nhi)
110 /* disable interrupts */
111 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
112 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
114 /* clear interrupt status bits */
115 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
116 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
119 /* ring helper methods */
121 static void __iomem *ring_desc_base(struct tb_ring *ring)
123 void __iomem *io = ring->nhi->iobase;
124 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
125 io += ring->hop * 16;
129 static void __iomem *ring_options_base(struct tb_ring *ring)
131 void __iomem *io = ring->nhi->iobase;
132 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
133 io += ring->hop * 32;
137 static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
140 * The other 16-bits in the register is read-only and writes to it
141 * are ignored by the hardware so we can save one ioread32() by
142 * filling the read-only bits with zeroes.
144 iowrite32(cons, ring_desc_base(ring) + 8);
147 static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
149 /* See ring_iowrite_cons() above for explanation */
150 iowrite32(prod << 16, ring_desc_base(ring) + 8);
153 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
155 iowrite32(value, ring_desc_base(ring) + offset);
158 static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
160 iowrite32(value, ring_desc_base(ring) + offset);
161 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
164 static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
166 iowrite32(value, ring_options_base(ring) + offset);
169 static bool ring_full(struct tb_ring *ring)
171 return ((ring->head + 1) % ring->size) == ring->tail;
174 static bool ring_empty(struct tb_ring *ring)
176 return ring->head == ring->tail;
180 * ring_write_descriptors() - post frames from ring->queue to the controller
182 * ring->lock is held.
184 static void ring_write_descriptors(struct tb_ring *ring)
186 struct ring_frame *frame, *n;
187 struct ring_desc *descriptor;
188 list_for_each_entry_safe(frame, n, &ring->queue, list) {
191 list_move_tail(&frame->list, &ring->in_flight);
192 descriptor = &ring->descriptors[ring->head];
193 descriptor->phys = frame->buffer_phy;
194 descriptor->time = 0;
195 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
197 descriptor->length = frame->size;
198 descriptor->eof = frame->eof;
199 descriptor->sof = frame->sof;
201 ring->head = (ring->head + 1) % ring->size;
203 ring_iowrite_prod(ring, ring->head);
205 ring_iowrite_cons(ring, ring->head);
210 * ring_work() - progress completed frames
212 * If the ring is shutting down then all frames are marked as canceled and
213 * their callbacks are invoked.
215 * Otherwise we collect all completed frame from the ring buffer, write new
216 * frame to the ring buffer and invoke the callbacks for the completed frames.
218 static void ring_work(struct work_struct *work)
220 struct tb_ring *ring = container_of(work, typeof(*ring), work);
221 struct ring_frame *frame;
222 bool canceled = false;
224 mutex_lock(&ring->lock);
226 if (!ring->running) {
227 /* Move all frames to done and mark them as canceled. */
228 list_splice_tail_init(&ring->in_flight, &done);
229 list_splice_tail_init(&ring->queue, &done);
231 goto invoke_callback;
234 while (!ring_empty(ring)) {
235 if (!(ring->descriptors[ring->tail].flags
236 & RING_DESC_COMPLETED))
238 frame = list_first_entry(&ring->in_flight, typeof(*frame),
240 list_move_tail(&frame->list, &done);
242 frame->size = ring->descriptors[ring->tail].length;
243 frame->eof = ring->descriptors[ring->tail].eof;
244 frame->sof = ring->descriptors[ring->tail].sof;
245 frame->flags = ring->descriptors[ring->tail].flags;
247 dev_WARN(&ring->nhi->pdev->dev,
248 "%s %d got unexpected SOF: %#x\n",
249 RING_TYPE(ring), ring->hop,
253 * raw not enabled, interupt not set: 0x2=0010
254 * raw enabled: 0xa=1010
255 * raw not enabled: 0xb=1011
256 * partial frame (>MAX_FRAME_SIZE): 0xe=1110
258 if (frame->flags != 0xa)
259 dev_WARN(&ring->nhi->pdev->dev,
260 "%s %d got unexpected flags: %#x\n",
261 RING_TYPE(ring), ring->hop,
264 ring->tail = (ring->tail + 1) % ring->size;
266 ring_write_descriptors(ring);
269 mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
270 while (!list_empty(&done)) {
271 frame = list_first_entry(&done, typeof(*frame), list);
273 * The callback may reenqueue or delete frame.
274 * Do not hold on to it.
276 list_del_init(&frame->list);
277 frame->callback(ring, frame, canceled);
281 int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
284 mutex_lock(&ring->lock);
286 list_add_tail(&frame->list, &ring->queue);
287 ring_write_descriptors(ring);
291 mutex_unlock(&ring->lock);
295 static irqreturn_t ring_msix(int irq, void *data)
297 struct tb_ring *ring = data;
299 schedule_work(&ring->work);
303 static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
305 struct tb_nhi *nhi = ring->nhi;
306 unsigned long irqflags;
309 if (!nhi->pdev->msix_enabled)
312 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
318 ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
324 irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
325 ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
332 ida_simple_remove(&nhi->msix_ida, ring->vector);
337 static void ring_release_msix(struct tb_ring *ring)
342 free_irq(ring->irq, ring);
343 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
348 static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
349 bool transmit, unsigned int flags)
351 struct tb_ring *ring = NULL;
352 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
353 transmit ? "TX" : "RX", hop, size);
355 mutex_lock(&nhi->lock);
356 if (hop >= nhi->hop_count) {
357 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
360 if (transmit && nhi->tx_rings[hop]) {
361 dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
363 } else if (!transmit && nhi->rx_rings[hop]) {
364 dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
367 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
371 mutex_init(&ring->lock);
372 INIT_LIST_HEAD(&ring->queue);
373 INIT_LIST_HEAD(&ring->in_flight);
374 INIT_WORK(&ring->work, ring_work);
378 ring->is_tx = transmit;
383 ring->running = false;
385 if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
388 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
389 size * sizeof(*ring->descriptors),
390 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
391 if (!ring->descriptors)
395 nhi->tx_rings[hop] = ring;
397 nhi->rx_rings[hop] = ring;
398 mutex_unlock(&nhi->lock);
403 mutex_destroy(&ring->lock);
405 mutex_unlock(&nhi->lock);
409 struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
412 return ring_alloc(nhi, hop, size, true, flags);
415 struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
418 return ring_alloc(nhi, hop, size, false, flags);
422 * ring_start() - enable a ring
424 * Must not be invoked in parallel with ring_stop().
426 void ring_start(struct tb_ring *ring)
428 mutex_lock(&ring->nhi->lock);
429 mutex_lock(&ring->lock);
430 if (ring->nhi->going_away)
433 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
436 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
437 RING_TYPE(ring), ring->hop);
439 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
441 ring_iowrite32desc(ring, ring->size, 12);
442 ring_iowrite32options(ring, 0, 4); /* time releated ? */
443 ring_iowrite32options(ring,
444 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
446 ring_iowrite32desc(ring,
447 (TB_FRAME_SIZE << 16) | ring->size, 12);
448 ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
449 ring_iowrite32options(ring,
450 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
452 ring_interrupt_active(ring, true);
453 ring->running = true;
455 mutex_unlock(&ring->lock);
456 mutex_unlock(&ring->nhi->lock);
461 * ring_stop() - shutdown a ring
463 * Must not be invoked from a callback.
465 * This method will disable the ring. Further calls to ring_tx/ring_rx will
466 * return -ESHUTDOWN until ring_stop has been called.
468 * All enqueued frames will be canceled and their callbacks will be executed
469 * with frame->canceled set to true (on the callback thread). This method
470 * returns only after all callback invocations have finished.
472 void ring_stop(struct tb_ring *ring)
474 mutex_lock(&ring->nhi->lock);
475 mutex_lock(&ring->lock);
476 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
477 RING_TYPE(ring), ring->hop);
478 if (ring->nhi->going_away)
480 if (!ring->running) {
481 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
482 RING_TYPE(ring), ring->hop);
485 ring_interrupt_active(ring, false);
487 ring_iowrite32options(ring, 0, 0);
488 ring_iowrite64desc(ring, 0, 0);
489 ring_iowrite32desc(ring, 0, 8);
490 ring_iowrite32desc(ring, 0, 12);
493 ring->running = false;
496 mutex_unlock(&ring->lock);
497 mutex_unlock(&ring->nhi->lock);
500 * schedule ring->work to invoke callbacks on all remaining frames.
502 schedule_work(&ring->work);
503 flush_work(&ring->work);
507 * ring_free() - free ring
509 * When this method returns all invocations of ring->callback will have
512 * Ring must be stopped.
514 * Must NOT be called from ring_frame->callback!
516 void ring_free(struct tb_ring *ring)
518 mutex_lock(&ring->nhi->lock);
520 * Dissociate the ring from the NHI. This also ensures that
521 * nhi_interrupt_work cannot reschedule ring->work.
524 ring->nhi->tx_rings[ring->hop] = NULL;
526 ring->nhi->rx_rings[ring->hop] = NULL;
529 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
530 RING_TYPE(ring), ring->hop);
533 ring_release_msix(ring);
535 dma_free_coherent(&ring->nhi->pdev->dev,
536 ring->size * sizeof(*ring->descriptors),
537 ring->descriptors, ring->descriptors_dma);
539 ring->descriptors = NULL;
540 ring->descriptors_dma = 0;
543 dev_info(&ring->nhi->pdev->dev,
548 mutex_unlock(&ring->nhi->lock);
550 * ring->work can no longer be scheduled (it is scheduled only
551 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
552 * to finish before freeing the ring.
554 flush_work(&ring->work);
555 mutex_destroy(&ring->lock);
560 * nhi_mailbox_cmd() - Send a command through NHI mailbox
561 * @nhi: Pointer to the NHI structure
562 * @cmd: Command to send
563 * @data: Data to be send with the command
565 * Sends mailbox command to the firmware running on NHI. Returns %0 in
566 * case of success and negative errno in case of failure.
568 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
573 iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
575 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
576 val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
577 val |= REG_INMAIL_OP_REQUEST | cmd;
578 iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
580 timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
582 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
583 if (!(val & REG_INMAIL_OP_REQUEST))
585 usleep_range(10, 20);
586 } while (ktime_before(ktime_get(), timeout));
588 if (val & REG_INMAIL_OP_REQUEST)
590 if (val & REG_INMAIL_ERROR)
597 * nhi_mailbox_mode() - Return current firmware operation mode
598 * @nhi: Pointer to the NHI structure
600 * The function reads current firmware operation mode using NHI mailbox
601 * registers and returns it to the caller.
603 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
607 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
608 val &= REG_OUTMAIL_CMD_OPMODE_MASK;
609 val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
611 return (enum nhi_fw_mode)val;
614 static void nhi_interrupt_work(struct work_struct *work)
616 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
617 int value = 0; /* Suppress uninitialized usage warning. */
620 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
621 struct tb_ring *ring;
623 mutex_lock(&nhi->lock);
626 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
627 * (TX, RX, RX overflow). We iterate over the bits and read a new
628 * dwords as required. The registers are cleared on read.
630 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
632 value = ioread32(nhi->iobase
633 + REG_RING_NOTIFY_BASE
635 if (++hop == nhi->hop_count) {
639 if ((value & (1 << (bit % 32))) == 0)
642 dev_warn(&nhi->pdev->dev,
643 "RX overflow for ring %d\n",
648 ring = nhi->tx_rings[hop];
650 ring = nhi->rx_rings[hop];
652 dev_warn(&nhi->pdev->dev,
653 "got interrupt for inactive %s ring %d\n",
658 /* we do not check ring->running, this is done in ring->work */
659 schedule_work(&ring->work);
661 mutex_unlock(&nhi->lock);
664 static irqreturn_t nhi_msi(int irq, void *data)
666 struct tb_nhi *nhi = data;
667 schedule_work(&nhi->interrupt_work);
671 static int nhi_suspend_noirq(struct device *dev)
673 struct pci_dev *pdev = to_pci_dev(dev);
674 struct tb *tb = pci_get_drvdata(pdev);
676 return tb_domain_suspend_noirq(tb);
679 static int nhi_resume_noirq(struct device *dev)
681 struct pci_dev *pdev = to_pci_dev(dev);
682 struct tb *tb = pci_get_drvdata(pdev);
685 * Check that the device is still there. It may be that the user
686 * unplugged last device which causes the host controller to go
689 if (!pci_device_is_present(pdev))
690 tb->nhi->going_away = true;
692 return tb_domain_resume_noirq(tb);
695 static int nhi_suspend(struct device *dev)
697 struct pci_dev *pdev = to_pci_dev(dev);
698 struct tb *tb = pci_get_drvdata(pdev);
700 return tb_domain_suspend(tb);
703 static void nhi_complete(struct device *dev)
705 struct pci_dev *pdev = to_pci_dev(dev);
706 struct tb *tb = pci_get_drvdata(pdev);
708 tb_domain_complete(tb);
711 static void nhi_shutdown(struct tb_nhi *nhi)
714 dev_info(&nhi->pdev->dev, "shutdown\n");
716 for (i = 0; i < nhi->hop_count; i++) {
717 if (nhi->tx_rings[i])
718 dev_WARN(&nhi->pdev->dev,
719 "TX ring %d is still active\n", i);
720 if (nhi->rx_rings[i])
721 dev_WARN(&nhi->pdev->dev,
722 "RX ring %d is still active\n", i);
724 nhi_disable_interrupts(nhi);
726 * We have to release the irq before calling flush_work. Otherwise an
727 * already executing IRQ handler could call schedule_work again.
729 if (!nhi->pdev->msix_enabled) {
730 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
731 flush_work(&nhi->interrupt_work);
733 mutex_destroy(&nhi->lock);
734 ida_destroy(&nhi->msix_ida);
737 static int nhi_init_msi(struct tb_nhi *nhi)
739 struct pci_dev *pdev = nhi->pdev;
742 /* In case someone left them on. */
743 nhi_disable_interrupts(nhi);
745 ida_init(&nhi->msix_ida);
748 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
749 * get all MSI-X vectors and if we succeed, each ring will have
750 * one MSI-X. If for some reason that does not work out, we
751 * fallback to a single MSI.
753 nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
756 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
760 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
762 irq = pci_irq_vector(nhi->pdev, 0);
766 res = devm_request_irq(&pdev->dev, irq, nhi_msi,
767 IRQF_NO_SUSPEND, "thunderbolt", nhi);
769 dev_err(&pdev->dev, "request_irq failed, aborting\n");
777 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
783 res = pcim_enable_device(pdev);
785 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
789 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
791 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
795 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
800 /* cannot fail - table is allocated bin pcim_iomap_regions */
801 nhi->iobase = pcim_iomap_table(pdev)[0];
802 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
803 if (nhi->hop_count != 12 && nhi->hop_count != 32)
804 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
807 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
808 sizeof(*nhi->tx_rings), GFP_KERNEL);
809 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
810 sizeof(*nhi->rx_rings), GFP_KERNEL);
811 if (!nhi->tx_rings || !nhi->rx_rings)
814 res = nhi_init_msi(nhi);
816 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
820 mutex_init(&nhi->lock);
822 pci_set_master(pdev);
824 /* magic value - clock related? */
825 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
831 dev_err(&nhi->pdev->dev,
832 "failed to determine connection manager, aborting\n");
836 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
838 res = tb_domain_add(tb);
841 * At this point the RX/TX rings might already have been
842 * activated. Do a proper shutdown.
848 pci_set_drvdata(pdev, tb);
853 static void nhi_remove(struct pci_dev *pdev)
855 struct tb *tb = pci_get_drvdata(pdev);
856 struct tb_nhi *nhi = tb->nhi;
858 tb_domain_remove(tb);
863 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
864 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
865 * resume_noirq until we are done.
867 static const struct dev_pm_ops nhi_pm_ops = {
868 .suspend_noirq = nhi_suspend_noirq,
869 .resume_noirq = nhi_resume_noirq,
870 .freeze_noirq = nhi_suspend_noirq, /*
871 * we just disable hotplug, the
872 * pci-tunnels stay alive.
874 .thaw_noirq = nhi_resume_noirq,
875 .restore_noirq = nhi_resume_noirq,
876 .suspend = nhi_suspend,
877 .freeze = nhi_suspend,
878 .poweroff = nhi_suspend,
879 .complete = nhi_complete,
882 static struct pci_device_id nhi_ids[] = {
884 * We have to specify class, the TB bridges use the same device and
885 * vendor (sub)id on gen 1 and gen 2 controllers.
888 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
889 .vendor = PCI_VENDOR_ID_INTEL,
890 .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
891 .subvendor = 0x2222, .subdevice = 0x1111,
894 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
895 .vendor = PCI_VENDOR_ID_INTEL,
896 .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
897 .subvendor = 0x2222, .subdevice = 0x1111,
900 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
901 .vendor = PCI_VENDOR_ID_INTEL,
902 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
903 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
906 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
907 .vendor = PCI_VENDOR_ID_INTEL,
908 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
909 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
913 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
914 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
915 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
916 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
917 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
918 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
919 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
920 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
925 MODULE_DEVICE_TABLE(pci, nhi_ids);
926 MODULE_LICENSE("GPL");
928 static struct pci_driver nhi_driver = {
929 .name = "thunderbolt",
932 .remove = nhi_remove,
933 .driver.pm = &nhi_pm_ops,
936 static int __init nhi_init(void)
940 ret = tb_domain_init();
943 ret = pci_register_driver(&nhi_driver);
949 static void __exit nhi_unload(void)
951 pci_unregister_driver(&nhi_driver);
955 module_init(nhi_init);
956 module_exit(nhi_unload);