2 * Thunderbolt Cactus Ridge driver - NHI driver
4 * The NHI (native host interface) is the pci device that allows us to send and
5 * receive frames from the thunderbolt bus.
7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
10 #include <linux/pm_runtime.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/pci.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/delay.h>
22 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
25 * Used to enable end-to-end workaround for missing RX packets. Do not
26 * use this ring for anything else.
28 #define RING_E2E_UNUSED_HOPID 2
29 /* HopIDs 0-7 are reserved by the Thunderbolt protocol */
30 #define RING_FIRST_USABLE_HOPID 8
33 * Minimal number of vectors when we use MSI-X. Two for control channel
34 * Rx/Tx and the rest four are for cross domain DMA paths.
36 #define MSIX_MIN_VECS 6
37 #define MSIX_MAX_VECS 16
39 #define NHI_MAILBOX_TIMEOUT 500 /* ms */
41 static int ring_interrupt_index(struct tb_ring *ring)
45 bit += ring->nhi->hop_count;
50 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
52 * ring->nhi->lock must be held.
54 static void ring_interrupt_active(struct tb_ring *ring, bool active)
56 int reg = REG_RING_INTERRUPT_BASE +
57 ring_interrupt_index(ring) / 32 * 4;
58 int bit = ring_interrupt_index(ring) & 31;
63 u32 step, shift, ivr, misc;
64 void __iomem *ivr_base;
70 index = ring->hop + ring->nhi->hop_count;
73 * Ask the hardware to clear interrupt status bits automatically
74 * since we already know which interrupt was triggered.
76 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
77 if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
78 misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
79 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
82 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
83 step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
84 shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
85 ivr = ioread32(ivr_base + step);
86 ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
88 ivr |= ring->vector << shift;
89 iowrite32(ivr, ivr_base + step);
92 old = ioread32(ring->nhi->iobase + reg);
98 dev_info(&ring->nhi->pdev->dev,
99 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
100 active ? "enabling" : "disabling", reg, bit, old, new);
103 dev_WARN(&ring->nhi->pdev->dev,
104 "interrupt for %s %d is already %s\n",
105 RING_TYPE(ring), ring->hop,
106 active ? "enabled" : "disabled");
107 iowrite32(new, ring->nhi->iobase + reg);
111 * nhi_disable_interrupts() - disable interrupts for all rings
113 * Use only during init and shutdown.
115 static void nhi_disable_interrupts(struct tb_nhi *nhi)
118 /* disable interrupts */
119 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
120 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
122 /* clear interrupt status bits */
123 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
124 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
127 /* ring helper methods */
129 static void __iomem *ring_desc_base(struct tb_ring *ring)
131 void __iomem *io = ring->nhi->iobase;
132 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
133 io += ring->hop * 16;
137 static void __iomem *ring_options_base(struct tb_ring *ring)
139 void __iomem *io = ring->nhi->iobase;
140 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
141 io += ring->hop * 32;
145 static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
148 * The other 16-bits in the register is read-only and writes to it
149 * are ignored by the hardware so we can save one ioread32() by
150 * filling the read-only bits with zeroes.
152 iowrite32(cons, ring_desc_base(ring) + 8);
155 static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
157 /* See ring_iowrite_cons() above for explanation */
158 iowrite32(prod << 16, ring_desc_base(ring) + 8);
161 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
163 iowrite32(value, ring_desc_base(ring) + offset);
166 static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
168 iowrite32(value, ring_desc_base(ring) + offset);
169 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
172 static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
174 iowrite32(value, ring_options_base(ring) + offset);
177 static bool ring_full(struct tb_ring *ring)
179 return ((ring->head + 1) % ring->size) == ring->tail;
182 static bool ring_empty(struct tb_ring *ring)
184 return ring->head == ring->tail;
188 * ring_write_descriptors() - post frames from ring->queue to the controller
190 * ring->lock is held.
192 static void ring_write_descriptors(struct tb_ring *ring)
194 struct ring_frame *frame, *n;
195 struct ring_desc *descriptor;
196 list_for_each_entry_safe(frame, n, &ring->queue, list) {
199 list_move_tail(&frame->list, &ring->in_flight);
200 descriptor = &ring->descriptors[ring->head];
201 descriptor->phys = frame->buffer_phy;
202 descriptor->time = 0;
203 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
205 descriptor->length = frame->size;
206 descriptor->eof = frame->eof;
207 descriptor->sof = frame->sof;
209 ring->head = (ring->head + 1) % ring->size;
211 ring_iowrite_prod(ring, ring->head);
213 ring_iowrite_cons(ring, ring->head);
218 * ring_work() - progress completed frames
220 * If the ring is shutting down then all frames are marked as canceled and
221 * their callbacks are invoked.
223 * Otherwise we collect all completed frame from the ring buffer, write new
224 * frame to the ring buffer and invoke the callbacks for the completed frames.
226 static void ring_work(struct work_struct *work)
228 struct tb_ring *ring = container_of(work, typeof(*ring), work);
229 struct ring_frame *frame;
230 bool canceled = false;
234 spin_lock_irqsave(&ring->lock, flags);
236 if (!ring->running) {
237 /* Move all frames to done and mark them as canceled. */
238 list_splice_tail_init(&ring->in_flight, &done);
239 list_splice_tail_init(&ring->queue, &done);
241 goto invoke_callback;
244 while (!ring_empty(ring)) {
245 if (!(ring->descriptors[ring->tail].flags
246 & RING_DESC_COMPLETED))
248 frame = list_first_entry(&ring->in_flight, typeof(*frame),
250 list_move_tail(&frame->list, &done);
252 frame->size = ring->descriptors[ring->tail].length;
253 frame->eof = ring->descriptors[ring->tail].eof;
254 frame->sof = ring->descriptors[ring->tail].sof;
255 frame->flags = ring->descriptors[ring->tail].flags;
257 ring->tail = (ring->tail + 1) % ring->size;
259 ring_write_descriptors(ring);
262 /* allow callbacks to schedule new work */
263 spin_unlock_irqrestore(&ring->lock, flags);
264 while (!list_empty(&done)) {
265 frame = list_first_entry(&done, typeof(*frame), list);
267 * The callback may reenqueue or delete frame.
268 * Do not hold on to it.
270 list_del_init(&frame->list);
272 frame->callback(ring, frame, canceled);
276 int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
281 spin_lock_irqsave(&ring->lock, flags);
283 list_add_tail(&frame->list, &ring->queue);
284 ring_write_descriptors(ring);
288 spin_unlock_irqrestore(&ring->lock, flags);
291 EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
294 * tb_ring_poll() - Poll one completed frame from the ring
295 * @ring: Ring to poll
297 * This function can be called when @start_poll callback of the @ring
298 * has been called. It will read one completed frame from the ring and
299 * return it to the caller. Returns %NULL if there is no more completed
302 struct ring_frame *tb_ring_poll(struct tb_ring *ring)
304 struct ring_frame *frame = NULL;
307 spin_lock_irqsave(&ring->lock, flags);
310 if (ring_empty(ring))
313 if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
314 frame = list_first_entry(&ring->in_flight, typeof(*frame),
316 list_del_init(&frame->list);
319 frame->size = ring->descriptors[ring->tail].length;
320 frame->eof = ring->descriptors[ring->tail].eof;
321 frame->sof = ring->descriptors[ring->tail].sof;
322 frame->flags = ring->descriptors[ring->tail].flags;
325 ring->tail = (ring->tail + 1) % ring->size;
329 spin_unlock_irqrestore(&ring->lock, flags);
332 EXPORT_SYMBOL_GPL(tb_ring_poll);
334 static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
336 int idx = ring_interrupt_index(ring);
337 int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
341 val = ioread32(ring->nhi->iobase + reg);
346 iowrite32(val, ring->nhi->iobase + reg);
349 /* Both @nhi->lock and @ring->lock should be held */
350 static void __ring_interrupt(struct tb_ring *ring)
355 if (ring->start_poll) {
356 __ring_interrupt_mask(ring, true);
357 ring->start_poll(ring->poll_data);
359 schedule_work(&ring->work);
364 * tb_ring_poll_complete() - Re-start interrupt for the ring
365 * @ring: Ring to re-start the interrupt
367 * This will re-start (unmask) the ring interrupt once the user is done
370 void tb_ring_poll_complete(struct tb_ring *ring)
374 spin_lock_irqsave(&ring->nhi->lock, flags);
375 spin_lock(&ring->lock);
376 if (ring->start_poll)
377 __ring_interrupt_mask(ring, false);
378 spin_unlock(&ring->lock);
379 spin_unlock_irqrestore(&ring->nhi->lock, flags);
381 EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
383 static irqreturn_t ring_msix(int irq, void *data)
385 struct tb_ring *ring = data;
387 spin_lock(&ring->nhi->lock);
388 spin_lock(&ring->lock);
389 __ring_interrupt(ring);
390 spin_unlock(&ring->lock);
391 spin_unlock(&ring->nhi->lock);
396 static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
398 struct tb_nhi *nhi = ring->nhi;
399 unsigned long irqflags;
402 if (!nhi->pdev->msix_enabled)
405 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
411 ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
417 irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
418 ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
425 ida_simple_remove(&nhi->msix_ida, ring->vector);
430 static void ring_release_msix(struct tb_ring *ring)
435 free_irq(ring->irq, ring);
436 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
441 static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
445 spin_lock_irq(&nhi->lock);
451 * Automatically allocate HopID from the non-reserved
452 * range 8 .. hop_count - 1.
454 for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
456 if (!nhi->tx_rings[i]) {
461 if (!nhi->rx_rings[i]) {
469 if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
470 dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
474 if (ring->is_tx && nhi->tx_rings[ring->hop]) {
475 dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
479 } else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
480 dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
487 nhi->tx_rings[ring->hop] = ring;
489 nhi->rx_rings[ring->hop] = ring;
492 spin_unlock_irq(&nhi->lock);
497 static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
498 bool transmit, unsigned int flags,
499 u16 sof_mask, u16 eof_mask,
500 void (*start_poll)(void *),
503 struct tb_ring *ring = NULL;
504 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
505 transmit ? "TX" : "RX", hop, size);
507 /* Tx Ring 2 is reserved for E2E workaround */
508 if (transmit && hop == RING_E2E_UNUSED_HOPID)
511 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
515 spin_lock_init(&ring->lock);
516 INIT_LIST_HEAD(&ring->queue);
517 INIT_LIST_HEAD(&ring->in_flight);
518 INIT_WORK(&ring->work, ring_work);
522 ring->is_tx = transmit;
525 ring->sof_mask = sof_mask;
526 ring->eof_mask = eof_mask;
529 ring->running = false;
530 ring->start_poll = start_poll;
531 ring->poll_data = poll_data;
533 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
534 size * sizeof(*ring->descriptors),
535 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
536 if (!ring->descriptors)
539 if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
542 if (nhi_alloc_hop(nhi, ring))
543 goto err_release_msix;
548 ring_release_msix(ring);
550 dma_free_coherent(&ring->nhi->pdev->dev,
551 ring->size * sizeof(*ring->descriptors),
552 ring->descriptors, ring->descriptors_dma);
560 * tb_ring_alloc_tx() - Allocate DMA ring for transmit
561 * @nhi: Pointer to the NHI the ring is to be allocated
562 * @hop: HopID (ring) to allocate
563 * @size: Number of entries in the ring
564 * @flags: Flags for the ring
566 struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
569 return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL);
571 EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
574 * tb_ring_alloc_rx() - Allocate DMA ring for receive
575 * @nhi: Pointer to the NHI the ring is to be allocated
576 * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
577 * @size: Number of entries in the ring
578 * @flags: Flags for the ring
579 * @sof_mask: Mask of PDF values that start a frame
580 * @eof_mask: Mask of PDF values that end a frame
581 * @start_poll: If not %NULL the ring will call this function when an
582 * interrupt is triggered and masked, instead of callback
584 * @poll_data: Optional data passed to @start_poll
586 struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
587 unsigned int flags, u16 sof_mask, u16 eof_mask,
588 void (*start_poll)(void *), void *poll_data)
590 return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask,
591 start_poll, poll_data);
593 EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
596 * tb_ring_start() - enable a ring
598 * Must not be invoked in parallel with tb_ring_stop().
600 void tb_ring_start(struct tb_ring *ring)
605 spin_lock_irq(&ring->nhi->lock);
606 spin_lock(&ring->lock);
607 if (ring->nhi->going_away)
610 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
613 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
614 RING_TYPE(ring), ring->hop);
616 if (ring->flags & RING_FLAG_FRAME) {
619 flags = RING_FLAG_ENABLE;
621 frame_size = TB_FRAME_SIZE;
622 flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
625 if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
629 * In order not to lose Rx packets we enable end-to-end
630 * workaround which transfers Rx credits to an unused Tx
633 hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT;
634 hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
635 flags |= hop | RING_FLAG_E2E_FLOW_CONTROL;
638 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
640 ring_iowrite32desc(ring, ring->size, 12);
641 ring_iowrite32options(ring, 0, 4); /* time releated ? */
642 ring_iowrite32options(ring, flags, 0);
644 u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
646 ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
647 ring_iowrite32options(ring, sof_eof_mask, 4);
648 ring_iowrite32options(ring, flags, 0);
650 ring_interrupt_active(ring, true);
651 ring->running = true;
653 spin_unlock(&ring->lock);
654 spin_unlock_irq(&ring->nhi->lock);
656 EXPORT_SYMBOL_GPL(tb_ring_start);
659 * tb_ring_stop() - shutdown a ring
661 * Must not be invoked from a callback.
663 * This method will disable the ring. Further calls to
664 * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
667 * All enqueued frames will be canceled and their callbacks will be executed
668 * with frame->canceled set to true (on the callback thread). This method
669 * returns only after all callback invocations have finished.
671 void tb_ring_stop(struct tb_ring *ring)
673 spin_lock_irq(&ring->nhi->lock);
674 spin_lock(&ring->lock);
675 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
676 RING_TYPE(ring), ring->hop);
677 if (ring->nhi->going_away)
679 if (!ring->running) {
680 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
681 RING_TYPE(ring), ring->hop);
684 ring_interrupt_active(ring, false);
686 ring_iowrite32options(ring, 0, 0);
687 ring_iowrite64desc(ring, 0, 0);
688 ring_iowrite32desc(ring, 0, 8);
689 ring_iowrite32desc(ring, 0, 12);
692 ring->running = false;
695 spin_unlock(&ring->lock);
696 spin_unlock_irq(&ring->nhi->lock);
699 * schedule ring->work to invoke callbacks on all remaining frames.
701 schedule_work(&ring->work);
702 flush_work(&ring->work);
704 EXPORT_SYMBOL_GPL(tb_ring_stop);
707 * tb_ring_free() - free ring
709 * When this method returns all invocations of ring->callback will have
712 * Ring must be stopped.
714 * Must NOT be called from ring_frame->callback!
716 void tb_ring_free(struct tb_ring *ring)
718 spin_lock_irq(&ring->nhi->lock);
720 * Dissociate the ring from the NHI. This also ensures that
721 * nhi_interrupt_work cannot reschedule ring->work.
724 ring->nhi->tx_rings[ring->hop] = NULL;
726 ring->nhi->rx_rings[ring->hop] = NULL;
729 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
730 RING_TYPE(ring), ring->hop);
732 spin_unlock_irq(&ring->nhi->lock);
734 ring_release_msix(ring);
736 dma_free_coherent(&ring->nhi->pdev->dev,
737 ring->size * sizeof(*ring->descriptors),
738 ring->descriptors, ring->descriptors_dma);
740 ring->descriptors = NULL;
741 ring->descriptors_dma = 0;
744 dev_info(&ring->nhi->pdev->dev,
750 * ring->work can no longer be scheduled (it is scheduled only
751 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
752 * to finish before freeing the ring.
754 flush_work(&ring->work);
757 EXPORT_SYMBOL_GPL(tb_ring_free);
760 * nhi_mailbox_cmd() - Send a command through NHI mailbox
761 * @nhi: Pointer to the NHI structure
762 * @cmd: Command to send
763 * @data: Data to be send with the command
765 * Sends mailbox command to the firmware running on NHI. Returns %0 in
766 * case of success and negative errno in case of failure.
768 int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
773 iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
775 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
776 val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
777 val |= REG_INMAIL_OP_REQUEST | cmd;
778 iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
780 timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
782 val = ioread32(nhi->iobase + REG_INMAIL_CMD);
783 if (!(val & REG_INMAIL_OP_REQUEST))
785 usleep_range(10, 20);
786 } while (ktime_before(ktime_get(), timeout));
788 if (val & REG_INMAIL_OP_REQUEST)
790 if (val & REG_INMAIL_ERROR)
797 * nhi_mailbox_mode() - Return current firmware operation mode
798 * @nhi: Pointer to the NHI structure
800 * The function reads current firmware operation mode using NHI mailbox
801 * registers and returns it to the caller.
803 enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
807 val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
808 val &= REG_OUTMAIL_CMD_OPMODE_MASK;
809 val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
811 return (enum nhi_fw_mode)val;
814 static void nhi_interrupt_work(struct work_struct *work)
816 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
817 int value = 0; /* Suppress uninitialized usage warning. */
820 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
821 struct tb_ring *ring;
823 spin_lock_irq(&nhi->lock);
826 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
827 * (TX, RX, RX overflow). We iterate over the bits and read a new
828 * dwords as required. The registers are cleared on read.
830 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
832 value = ioread32(nhi->iobase
833 + REG_RING_NOTIFY_BASE
835 if (++hop == nhi->hop_count) {
839 if ((value & (1 << (bit % 32))) == 0)
842 dev_warn(&nhi->pdev->dev,
843 "RX overflow for ring %d\n",
848 ring = nhi->tx_rings[hop];
850 ring = nhi->rx_rings[hop];
852 dev_warn(&nhi->pdev->dev,
853 "got interrupt for inactive %s ring %d\n",
859 spin_lock(&ring->lock);
860 __ring_interrupt(ring);
861 spin_unlock(&ring->lock);
863 spin_unlock_irq(&nhi->lock);
866 static irqreturn_t nhi_msi(int irq, void *data)
868 struct tb_nhi *nhi = data;
869 schedule_work(&nhi->interrupt_work);
873 static int nhi_suspend_noirq(struct device *dev)
875 struct pci_dev *pdev = to_pci_dev(dev);
876 struct tb *tb = pci_get_drvdata(pdev);
878 return tb_domain_suspend_noirq(tb);
881 static void nhi_enable_int_throttling(struct tb_nhi *nhi)
883 /* Throttling is specified in 256ns increments */
884 u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
888 * Configure interrupt throttling for all vectors even if we
891 for (i = 0; i < MSIX_MAX_VECS; i++) {
892 u32 reg = REG_INT_THROTTLING_RATE + i * 4;
893 iowrite32(throttle, nhi->iobase + reg);
897 static int nhi_resume_noirq(struct device *dev)
899 struct pci_dev *pdev = to_pci_dev(dev);
900 struct tb *tb = pci_get_drvdata(pdev);
903 * Check that the device is still there. It may be that the user
904 * unplugged last device which causes the host controller to go
907 if (!pci_device_is_present(pdev))
908 tb->nhi->going_away = true;
910 nhi_enable_int_throttling(tb->nhi);
912 return tb_domain_resume_noirq(tb);
915 static int nhi_suspend(struct device *dev)
917 struct pci_dev *pdev = to_pci_dev(dev);
918 struct tb *tb = pci_get_drvdata(pdev);
920 return tb_domain_suspend(tb);
923 static void nhi_complete(struct device *dev)
925 struct pci_dev *pdev = to_pci_dev(dev);
926 struct tb *tb = pci_get_drvdata(pdev);
929 * If we were runtime suspended when system suspend started,
930 * schedule runtime resume now. It should bring the domain back
931 * to functional state.
933 if (pm_runtime_suspended(&pdev->dev))
934 pm_runtime_resume(&pdev->dev);
936 tb_domain_complete(tb);
939 static int nhi_runtime_suspend(struct device *dev)
941 struct pci_dev *pdev = to_pci_dev(dev);
942 struct tb *tb = pci_get_drvdata(pdev);
944 return tb_domain_runtime_suspend(tb);
947 static int nhi_runtime_resume(struct device *dev)
949 struct pci_dev *pdev = to_pci_dev(dev);
950 struct tb *tb = pci_get_drvdata(pdev);
952 nhi_enable_int_throttling(tb->nhi);
953 return tb_domain_runtime_resume(tb);
956 static void nhi_shutdown(struct tb_nhi *nhi)
959 dev_info(&nhi->pdev->dev, "shutdown\n");
961 for (i = 0; i < nhi->hop_count; i++) {
962 if (nhi->tx_rings[i])
963 dev_WARN(&nhi->pdev->dev,
964 "TX ring %d is still active\n", i);
965 if (nhi->rx_rings[i])
966 dev_WARN(&nhi->pdev->dev,
967 "RX ring %d is still active\n", i);
969 nhi_disable_interrupts(nhi);
971 * We have to release the irq before calling flush_work. Otherwise an
972 * already executing IRQ handler could call schedule_work again.
974 if (!nhi->pdev->msix_enabled) {
975 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
976 flush_work(&nhi->interrupt_work);
978 ida_destroy(&nhi->msix_ida);
981 static int nhi_init_msi(struct tb_nhi *nhi)
983 struct pci_dev *pdev = nhi->pdev;
986 /* In case someone left them on. */
987 nhi_disable_interrupts(nhi);
989 nhi_enable_int_throttling(nhi);
991 ida_init(&nhi->msix_ida);
994 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
995 * get all MSI-X vectors and if we succeed, each ring will have
996 * one MSI-X. If for some reason that does not work out, we
997 * fallback to a single MSI.
999 nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
1002 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
1006 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
1008 irq = pci_irq_vector(nhi->pdev, 0);
1012 res = devm_request_irq(&pdev->dev, irq, nhi_msi,
1013 IRQF_NO_SUSPEND, "thunderbolt", nhi);
1015 dev_err(&pdev->dev, "request_irq failed, aborting\n");
1023 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1029 res = pcim_enable_device(pdev);
1031 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
1035 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
1037 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
1041 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
1046 /* cannot fail - table is allocated bin pcim_iomap_regions */
1047 nhi->iobase = pcim_iomap_table(pdev)[0];
1048 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
1049 if (nhi->hop_count != 12 && nhi->hop_count != 32)
1050 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
1053 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1054 sizeof(*nhi->tx_rings), GFP_KERNEL);
1055 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
1056 sizeof(*nhi->rx_rings), GFP_KERNEL);
1057 if (!nhi->tx_rings || !nhi->rx_rings)
1060 res = nhi_init_msi(nhi);
1062 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
1066 spin_lock_init(&nhi->lock);
1068 res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1070 res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1072 dev_err(&pdev->dev, "failed to set DMA mask\n");
1076 pci_set_master(pdev);
1078 tb = icm_probe(nhi);
1082 dev_err(&nhi->pdev->dev,
1083 "failed to determine connection manager, aborting\n");
1087 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
1089 res = tb_domain_add(tb);
1092 * At this point the RX/TX rings might already have been
1093 * activated. Do a proper shutdown.
1099 pci_set_drvdata(pdev, tb);
1101 pm_runtime_allow(&pdev->dev);
1102 pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
1103 pm_runtime_use_autosuspend(&pdev->dev);
1104 pm_runtime_put_autosuspend(&pdev->dev);
1109 static void nhi_remove(struct pci_dev *pdev)
1111 struct tb *tb = pci_get_drvdata(pdev);
1112 struct tb_nhi *nhi = tb->nhi;
1114 pm_runtime_get_sync(&pdev->dev);
1115 pm_runtime_dont_use_autosuspend(&pdev->dev);
1116 pm_runtime_forbid(&pdev->dev);
1118 tb_domain_remove(tb);
1123 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
1124 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
1125 * resume_noirq until we are done.
1127 static const struct dev_pm_ops nhi_pm_ops = {
1128 .suspend_noirq = nhi_suspend_noirq,
1129 .resume_noirq = nhi_resume_noirq,
1130 .freeze_noirq = nhi_suspend_noirq, /*
1131 * we just disable hotplug, the
1132 * pci-tunnels stay alive.
1134 .thaw_noirq = nhi_resume_noirq,
1135 .restore_noirq = nhi_resume_noirq,
1136 .suspend = nhi_suspend,
1137 .freeze = nhi_suspend,
1138 .poweroff = nhi_suspend,
1139 .complete = nhi_complete,
1140 .runtime_suspend = nhi_runtime_suspend,
1141 .runtime_resume = nhi_runtime_resume,
1144 static struct pci_device_id nhi_ids[] = {
1146 * We have to specify class, the TB bridges use the same device and
1147 * vendor (sub)id on gen 1 and gen 2 controllers.
1150 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1151 .vendor = PCI_VENDOR_ID_INTEL,
1152 .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
1153 .subvendor = 0x2222, .subdevice = 0x1111,
1156 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1157 .vendor = PCI_VENDOR_ID_INTEL,
1158 .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
1159 .subvendor = 0x2222, .subdevice = 0x1111,
1162 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1163 .vendor = PCI_VENDOR_ID_INTEL,
1164 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
1165 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1168 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
1169 .vendor = PCI_VENDOR_ID_INTEL,
1170 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
1171 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
1175 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
1176 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
1177 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
1178 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
1179 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
1180 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
1181 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
1182 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
1183 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
1184 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
1189 MODULE_DEVICE_TABLE(pci, nhi_ids);
1190 MODULE_LICENSE("GPL");
1192 static struct pci_driver nhi_driver = {
1193 .name = "thunderbolt",
1194 .id_table = nhi_ids,
1196 .remove = nhi_remove,
1197 .driver.pm = &nhi_pm_ops,
1200 static int __init nhi_init(void)
1204 ret = tb_domain_init();
1207 ret = pci_register_driver(&nhi_driver);
1213 static void __exit nhi_unload(void)
1215 pci_unregister_driver(&nhi_driver);
1219 rootfs_initcall(nhi_init);
1220 module_exit(nhi_unload);