2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/sizes.h>
36 #include "registers.h"
39 #include "../dmaengine.h"
41 int completion_timeout = 200;
42 module_param(completion_timeout, int, 0644);
43 MODULE_PARM_DESC(completion_timeout,
44 "set ioat completion timeout [msec] (default 200 [msec])");
45 int idle_timeout = 2000;
46 module_param(idle_timeout, int, 0644);
47 MODULE_PARM_DESC(idle_timeout,
48 "set ioat idel timeout [msec] (default 2000 [msec])");
50 #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
51 #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
53 static char *chanerr_str[] = {
54 "DMA Transfer Source Address Error",
55 "DMA Transfer Destination Address Error",
56 "Next Descriptor Address Error",
58 "Chan Address Value Error",
60 "Chipset Uncorrectable Data Integrity Error",
61 "DMA Uncorrectable Data Integrity Error",
64 "Descriptor Control Error",
65 "Descriptor Transfer Size Error",
66 "Completion Address Error",
67 "Interrupt Configuration Error",
68 "Super extended descriptor Address Error",
72 "Descriptor Count Error",
73 "DIF All F detect Error",
74 "Guard Tag verification Error",
75 "Application Tag verification Error",
76 "Reference Tag verification Error",
78 "Result DIF All F detect Error",
79 "Result Guard Tag verification Error",
80 "Result Application Tag verification Error",
81 "Result Reference Tag verification Error",
84 static void ioat_eh(struct ioatdma_chan *ioat_chan);
86 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
90 for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
91 if ((chanerr >> i) & 1) {
92 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
99 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
101 * @data: interrupt data
103 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
105 struct ioatdma_device *instance = data;
106 struct ioatdma_chan *ioat_chan;
107 unsigned long attnstatus;
111 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
113 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
116 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
117 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
121 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
122 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
123 ioat_chan = ioat_chan_by_index(instance, bit);
124 if (test_bit(IOAT_RUN, &ioat_chan->state))
125 tasklet_schedule(&ioat_chan->cleanup_task);
128 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
133 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
135 * @data: interrupt data
137 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
139 struct ioatdma_chan *ioat_chan = data;
141 if (test_bit(IOAT_RUN, &ioat_chan->state))
142 tasklet_schedule(&ioat_chan->cleanup_task);
147 void ioat_stop(struct ioatdma_chan *ioat_chan)
149 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
150 struct pci_dev *pdev = ioat_dma->pdev;
151 int chan_id = chan_num(ioat_chan);
152 struct msix_entry *msix;
154 /* 1/ stop irq from firing tasklets
155 * 2/ stop the tasklet from re-arming irqs
157 clear_bit(IOAT_RUN, &ioat_chan->state);
159 /* flush inflight interrupts */
160 switch (ioat_dma->irq_mode) {
162 msix = &ioat_dma->msix_entries[chan_id];
163 synchronize_irq(msix->vector);
167 synchronize_irq(pdev->irq);
173 /* flush inflight timers */
174 del_timer_sync(&ioat_chan->timer);
176 /* flush inflight tasklet runs */
177 tasklet_kill(&ioat_chan->cleanup_task);
179 /* final cleanup now that everything is quiesced and can't re-arm */
180 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
183 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
185 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
186 ioat_chan->issued = ioat_chan->head;
187 writew(ioat_chan->dmacount,
188 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
189 dev_dbg(to_dev(ioat_chan),
190 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
191 __func__, ioat_chan->head, ioat_chan->tail,
192 ioat_chan->issued, ioat_chan->dmacount);
195 void ioat_issue_pending(struct dma_chan *c)
197 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
199 if (ioat_ring_pending(ioat_chan)) {
200 spin_lock_bh(&ioat_chan->prep_lock);
201 __ioat_issue_pending(ioat_chan);
202 spin_unlock_bh(&ioat_chan->prep_lock);
207 * ioat_update_pending - log pending descriptors
208 * @ioat: ioat+ channel
210 * Check if the number of unsubmitted descriptors has exceeded the
211 * watermark. Called with prep_lock held
213 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
215 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
216 __ioat_issue_pending(ioat_chan);
219 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
221 struct ioat_ring_ent *desc;
222 struct ioat_dma_descriptor *hw;
224 if (ioat_ring_space(ioat_chan) < 1) {
225 dev_err(to_dev(ioat_chan),
226 "Unable to start null desc - ring full\n");
230 dev_dbg(to_dev(ioat_chan),
231 "%s: head: %#x tail: %#x issued: %#x\n",
232 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
233 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
238 hw->ctl_f.int_en = 1;
239 hw->ctl_f.compl_write = 1;
240 /* set size to non-zero value (channel returns error when size is 0) */
241 hw->size = NULL_DESC_BUFFER_SIZE;
244 async_tx_ack(&desc->txd);
245 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
246 dump_desc_dbg(ioat_chan, desc);
247 /* make sure descriptors are written before we submit */
249 ioat_chan->head += 1;
250 __ioat_issue_pending(ioat_chan);
253 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
255 spin_lock_bh(&ioat_chan->prep_lock);
256 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
257 __ioat_start_null_desc(ioat_chan);
258 spin_unlock_bh(&ioat_chan->prep_lock);
261 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
263 /* set the tail to be re-issued */
264 ioat_chan->issued = ioat_chan->tail;
265 ioat_chan->dmacount = 0;
266 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
268 dev_dbg(to_dev(ioat_chan),
269 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
270 __func__, ioat_chan->head, ioat_chan->tail,
271 ioat_chan->issued, ioat_chan->dmacount);
273 if (ioat_ring_pending(ioat_chan)) {
274 struct ioat_ring_ent *desc;
276 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
277 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
278 __ioat_issue_pending(ioat_chan);
280 __ioat_start_null_desc(ioat_chan);
283 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
285 unsigned long end = jiffies + tmo;
289 status = ioat_chansts(ioat_chan);
290 if (is_ioat_active(status) || is_ioat_idle(status))
291 ioat_suspend(ioat_chan);
292 while (is_ioat_active(status) || is_ioat_idle(status)) {
293 if (tmo && time_after(jiffies, end)) {
297 status = ioat_chansts(ioat_chan);
304 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
306 unsigned long end = jiffies + tmo;
309 ioat_reset(ioat_chan);
310 while (ioat_reset_pending(ioat_chan)) {
311 if (end && time_after(jiffies, end)) {
321 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
322 __releases(&ioat_chan->prep_lock)
324 struct dma_chan *c = tx->chan;
325 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
328 cookie = dma_cookie_assign(tx);
329 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
331 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
332 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
334 /* make descriptor updates visible before advancing ioat->head,
335 * this is purposefully not smp_wmb() since we are also
336 * publishing the descriptor updates to a dma device
340 ioat_chan->head += ioat_chan->produce;
342 ioat_update_pending(ioat_chan);
343 spin_unlock_bh(&ioat_chan->prep_lock);
348 static struct ioat_ring_ent *
349 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
351 struct ioat_dma_descriptor *hw;
352 struct ioat_ring_ent *desc;
353 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
359 chunk = idx / IOAT_DESCS_PER_2M;
360 idx &= (IOAT_DESCS_PER_2M - 1);
361 offs = idx * IOAT_DESC_SZ;
362 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
363 phys = ioat_chan->descs[chunk].hw + offs;
364 hw = (struct ioat_dma_descriptor *)pos;
365 memset(hw, 0, sizeof(*hw));
367 desc = kmem_cache_zalloc(ioat_cache, flags);
371 dma_async_tx_descriptor_init(&desc->txd, chan);
372 desc->txd.tx_submit = ioat_tx_submit_unlock;
374 desc->txd.phys = phys;
378 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
380 kmem_cache_free(ioat_cache, desc);
383 struct ioat_ring_ent **
384 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
386 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
387 struct ioat_ring_ent **ring;
388 int total_descs = 1 << order;
391 /* allocate the array to hold the software ring */
392 ring = kcalloc(total_descs, sizeof(*ring), flags);
396 ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
398 for (i = 0; i < chunks; i++) {
399 struct ioat_descs *descs = &ioat_chan->descs[i];
401 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
402 SZ_2M, &descs->hw, flags);
406 for (idx = 0; idx < i; idx++) {
407 descs = &ioat_chan->descs[idx];
408 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
409 descs->virt, descs->hw);
414 ioat_chan->desc_chunks = 0;
420 for (i = 0; i < total_descs; i++) {
421 ring[i] = ioat_alloc_ring_ent(c, i, flags);
426 ioat_free_ring_ent(ring[i], c);
428 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
429 dma_free_coherent(to_dev(ioat_chan),
431 ioat_chan->descs[idx].virt,
432 ioat_chan->descs[idx].hw);
433 ioat_chan->descs[idx].virt = NULL;
434 ioat_chan->descs[idx].hw = 0;
437 ioat_chan->desc_chunks = 0;
441 set_desc_id(ring[i], i);
445 for (i = 0; i < total_descs-1; i++) {
446 struct ioat_ring_ent *next = ring[i+1];
447 struct ioat_dma_descriptor *hw = ring[i]->hw;
449 hw->next = next->txd.phys;
451 ring[i]->hw->next = ring[0]->txd.phys;
457 * ioat_check_space_lock - verify space and grab ring producer lock
458 * @ioat: ioat,3 channel (ring) to operate on
459 * @num_descs: allocation length
461 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
462 __acquires(&ioat_chan->prep_lock)
464 spin_lock_bh(&ioat_chan->prep_lock);
465 /* never allow the last descriptor to be consumed, we need at
466 * least one free at all times to allow for on-the-fly ring
469 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
470 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
471 __func__, num_descs, ioat_chan->head,
472 ioat_chan->tail, ioat_chan->issued);
473 ioat_chan->produce = num_descs;
474 return 0; /* with ioat->prep_lock held */
476 spin_unlock_bh(&ioat_chan->prep_lock);
478 dev_dbg_ratelimited(to_dev(ioat_chan),
479 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
480 __func__, num_descs, ioat_chan->head,
481 ioat_chan->tail, ioat_chan->issued);
483 /* progress reclaim in the allocation failure case we may be
484 * called under bh_disabled so we need to trigger the timer
487 if (time_is_before_jiffies(ioat_chan->timer.expires)
488 && timer_pending(&ioat_chan->timer)) {
489 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
490 ioat_timer_event((unsigned long)ioat_chan);
496 static bool desc_has_ext(struct ioat_ring_ent *desc)
498 struct ioat_dma_descriptor *hw = desc->hw;
500 if (hw->ctl_f.op == IOAT_OP_XOR ||
501 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
502 struct ioat_xor_descriptor *xor = desc->xor;
504 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
506 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
507 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
508 struct ioat_pq_descriptor *pq = desc->pq;
510 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
518 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
523 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
524 kmem_cache_free(ioat_sed_cache, sed);
527 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
532 completion = *ioat_chan->completion;
533 phys_complete = ioat_chansts_to_addr(completion);
535 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
536 (unsigned long long) phys_complete);
538 return phys_complete;
541 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
544 *phys_complete = ioat_get_current_completion(ioat_chan);
545 if (*phys_complete == ioat_chan->last_completion)
548 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
549 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
555 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
557 struct ioat_dma_descriptor *hw = desc->hw;
559 switch (hw->ctl_f.op) {
561 case IOAT_OP_PQ_VAL_16S:
563 struct ioat_pq_descriptor *pq = desc->pq;
565 /* check if there's error written */
566 if (!pq->dwbes_f.wbes)
569 /* need to set a chanerr var for checking to clear later */
571 if (pq->dwbes_f.p_val_err)
572 *desc->result |= SUM_CHECK_P_RESULT;
574 if (pq->dwbes_f.q_val_err)
575 *desc->result |= SUM_CHECK_Q_RESULT;
585 * __cleanup - reclaim used descriptors
586 * @ioat: channel (ring) to clean
588 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
590 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
591 struct ioat_ring_ent *desc;
592 bool seen_current = false;
593 int idx = ioat_chan->tail, i;
596 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
597 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
600 * At restart of the channel, the completion address and the
601 * channel status will be 0 due to starting a new chain. Since
602 * it's new chain and the first descriptor "fails", there is
603 * nothing to clean up. We do not want to reap the entire submitted
604 * chain due to this 0 address value and then BUG.
609 active = ioat_ring_active(ioat_chan);
610 for (i = 0; i < active && !seen_current; i++) {
611 struct dma_async_tx_descriptor *tx;
613 smp_read_barrier_depends();
614 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
615 desc = ioat_get_ring_ent(ioat_chan, idx + i);
616 dump_desc_dbg(ioat_chan, desc);
618 /* set err stat if we are using dwbes */
619 if (ioat_dma->cap & IOAT_CAP_DWBES)
620 desc_get_errstat(ioat_chan, desc);
624 dma_cookie_complete(tx);
625 dma_descriptor_unmap(tx);
626 dmaengine_desc_get_callback_invoke(tx, NULL);
628 tx->callback_result = NULL;
631 if (tx->phys == phys_complete)
634 /* skip extended descriptors */
635 if (desc_has_ext(desc)) {
636 BUG_ON(i + 1 >= active);
640 /* cleanup super extended descriptors */
642 ioat_free_sed(ioat_dma, desc->sed);
647 /* finish all descriptor reads before incrementing tail */
649 ioat_chan->tail = idx + i;
650 /* no active descs have written a completion? */
651 BUG_ON(active && !seen_current);
652 ioat_chan->last_completion = phys_complete;
654 if (active - i == 0) {
655 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
657 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
660 /* microsecond delay by sysfs variable per pending descriptor */
661 if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
662 writew(min((ioat_chan->intr_coalesce * (active - i)),
663 IOAT_INTRDELAY_MASK),
664 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
665 ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
669 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
673 spin_lock_bh(&ioat_chan->cleanup_lock);
675 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
676 __cleanup(ioat_chan, phys_complete);
678 if (is_ioat_halted(*ioat_chan->completion)) {
679 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
682 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
683 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
688 spin_unlock_bh(&ioat_chan->cleanup_lock);
691 void ioat_cleanup_event(unsigned long data)
693 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
695 ioat_cleanup(ioat_chan);
696 if (!test_bit(IOAT_RUN, &ioat_chan->state))
698 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
701 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
705 ioat_quiesce(ioat_chan, 0);
706 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
707 __cleanup(ioat_chan, phys_complete);
709 __ioat_restart_chan(ioat_chan);
713 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
715 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
716 struct ioat_ring_ent *desc;
718 int idx = ioat_chan->tail, i;
721 * We assume that the failed descriptor has been processed.
722 * Now we are just returning all the remaining submitted
723 * descriptors to abort.
725 active = ioat_ring_active(ioat_chan);
727 /* we skip the failed descriptor that tail points to */
728 for (i = 1; i < active; i++) {
729 struct dma_async_tx_descriptor *tx;
731 smp_read_barrier_depends();
732 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
733 desc = ioat_get_ring_ent(ioat_chan, idx + i);
737 struct dmaengine_result res;
739 dma_cookie_complete(tx);
740 dma_descriptor_unmap(tx);
741 res.result = DMA_TRANS_ABORTED;
742 dmaengine_desc_get_callback_invoke(tx, &res);
744 tx->callback_result = NULL;
747 /* skip extended descriptors */
748 if (desc_has_ext(desc)) {
749 WARN_ON(i + 1 >= active);
753 /* cleanup super extended descriptors */
755 ioat_free_sed(ioat_dma, desc->sed);
760 smp_mb(); /* finish all descriptor reads before incrementing tail */
761 ioat_chan->tail = idx + active;
763 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
764 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
767 static void ioat_eh(struct ioatdma_chan *ioat_chan)
769 struct pci_dev *pdev = to_pdev(ioat_chan);
770 struct ioat_dma_descriptor *hw;
771 struct dma_async_tx_descriptor *tx;
773 struct ioat_ring_ent *desc;
778 struct dmaengine_result res;
780 /* cleanup so tail points to descriptor that caused the error */
781 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
782 __cleanup(ioat_chan, phys_complete);
784 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
785 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
787 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
788 __func__, chanerr, chanerr_int);
790 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
792 dump_desc_dbg(ioat_chan, desc);
794 switch (hw->ctl_f.op) {
795 case IOAT_OP_XOR_VAL:
796 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
797 *desc->result |= SUM_CHECK_P_RESULT;
798 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
802 case IOAT_OP_PQ_VAL_16S:
803 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
804 *desc->result |= SUM_CHECK_P_RESULT;
805 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
807 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
808 *desc->result |= SUM_CHECK_Q_RESULT;
809 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
814 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
815 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
816 res.result = DMA_TRANS_READ_FAILED;
817 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
818 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
819 res.result = DMA_TRANS_WRITE_FAILED;
820 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
825 res.result = DMA_TRANS_NOERROR;
827 /* fault on unhandled error or spurious halt */
828 if (chanerr ^ err_handled || chanerr == 0) {
829 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
830 __func__, chanerr, err_handled);
831 dev_err(to_dev(ioat_chan), "Errors handled:\n");
832 ioat_print_chanerrs(ioat_chan, err_handled);
833 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
834 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
839 /* cleanup the faulty descriptor since we are continuing */
842 dma_cookie_complete(tx);
843 dma_descriptor_unmap(tx);
844 dmaengine_desc_get_callback_invoke(tx, &res);
846 tx->callback_result = NULL;
849 /* mark faulting descriptor as complete */
850 *ioat_chan->completion = desc->txd.phys;
852 spin_lock_bh(&ioat_chan->prep_lock);
853 /* we need abort all descriptors */
855 ioat_abort_descs(ioat_chan);
856 /* clean up the channel, we could be in weird state */
857 ioat_reset_hw(ioat_chan);
860 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
861 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
863 ioat_restart_channel(ioat_chan);
864 spin_unlock_bh(&ioat_chan->prep_lock);
867 static void check_active(struct ioatdma_chan *ioat_chan)
869 if (ioat_ring_active(ioat_chan)) {
870 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
874 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
875 mod_timer_pending(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
878 void ioat_timer_event(unsigned long data)
880 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
881 dma_addr_t phys_complete;
884 status = ioat_chansts(ioat_chan);
886 /* when halted due to errors check for channel
887 * programming errors before advancing the completion state
889 if (is_ioat_halted(status)) {
892 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
893 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
895 dev_err(to_dev(ioat_chan), "Errors:\n");
896 ioat_print_chanerrs(ioat_chan, chanerr);
898 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
899 spin_lock_bh(&ioat_chan->cleanup_lock);
900 spin_lock_bh(&ioat_chan->prep_lock);
901 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
902 spin_unlock_bh(&ioat_chan->prep_lock);
904 ioat_abort_descs(ioat_chan);
905 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
906 ioat_reset_hw(ioat_chan);
907 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
908 ioat_restart_channel(ioat_chan);
910 spin_lock_bh(&ioat_chan->prep_lock);
911 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
912 spin_unlock_bh(&ioat_chan->prep_lock);
913 spin_unlock_bh(&ioat_chan->cleanup_lock);
919 spin_lock_bh(&ioat_chan->cleanup_lock);
921 /* handle the no-actives case */
922 if (!ioat_ring_active(ioat_chan)) {
923 spin_lock_bh(&ioat_chan->prep_lock);
924 check_active(ioat_chan);
925 spin_unlock_bh(&ioat_chan->prep_lock);
926 spin_unlock_bh(&ioat_chan->cleanup_lock);
930 /* if we haven't made progress and we have already
931 * acknowledged a pending completion once, then be more
932 * forceful with a restart
934 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
935 __cleanup(ioat_chan, phys_complete);
936 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
939 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
940 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
942 dev_err(to_dev(ioat_chan), "Errors:\n");
943 ioat_print_chanerrs(ioat_chan, chanerr);
945 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
946 ioat_ring_active(ioat_chan));
948 spin_lock_bh(&ioat_chan->prep_lock);
949 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
950 spin_unlock_bh(&ioat_chan->prep_lock);
952 ioat_abort_descs(ioat_chan);
953 dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
954 ioat_reset_hw(ioat_chan);
955 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
956 ioat_restart_channel(ioat_chan);
958 spin_lock_bh(&ioat_chan->prep_lock);
959 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
960 spin_unlock_bh(&ioat_chan->prep_lock);
961 spin_unlock_bh(&ioat_chan->cleanup_lock);
964 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
966 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
967 spin_unlock_bh(&ioat_chan->cleanup_lock);
971 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
972 struct dma_tx_state *txstate)
974 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
977 ret = dma_cookie_status(c, cookie, txstate);
978 if (ret == DMA_COMPLETE)
981 ioat_cleanup(ioat_chan);
983 return dma_cookie_status(c, cookie, txstate);
986 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
988 /* throw away whatever the channel was doing and get it
989 * initialized, with ioat3 specific workarounds
991 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
992 struct pci_dev *pdev = ioat_dma->pdev;
997 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
999 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1000 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1002 if (ioat_dma->version < IOAT_VER_3_3) {
1003 /* clear any pending errors */
1004 err = pci_read_config_dword(pdev,
1005 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1008 "channel error register unreachable\n");
1011 pci_write_config_dword(pdev,
1012 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1014 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1015 * (workaround for spurious config parity error after restart)
1017 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1018 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1019 pci_write_config_dword(pdev,
1020 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1025 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1026 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1027 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1028 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1032 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1034 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1035 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1036 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1037 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1042 dev_err(&pdev->dev, "Failed to reset: %d\n", err);