2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
34 #include <linux/sizes.h>
36 #include "registers.h"
39 #include "../dmaengine.h"
41 int completion_timeout = 200;
42 module_param(completion_timeout, int, 0644);
43 MODULE_PARM_DESC(completion_timeout,
44 "set ioat completion timeout [msec] (default 200 [msec])");
45 int idle_timeout = 2000;
46 module_param(idle_timeout, int, 0644);
47 MODULE_PARM_DESC(idle_timeout,
48 "set ioat idel timeout [msec] (default 2000 [msec])");
50 #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
51 #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
53 static char *chanerr_str[] = {
54 "DMA Transfer Destination Address Error",
55 "Next Descriptor Address Error",
57 "Chan Address Value Error",
59 "Chipset Uncorrectable Data Integrity Error",
60 "DMA Uncorrectable Data Integrity Error",
63 "Descriptor Control Error",
64 "Descriptor Transfer Size Error",
65 "Completion Address Error",
66 "Interrupt Configuration Error",
67 "Super extended descriptor Address Error",
71 "Descriptor Count Error",
72 "DIF All F detect Error",
73 "Guard Tag verification Error",
74 "Application Tag verification Error",
75 "Reference Tag verification Error",
77 "Result DIF All F detect Error",
78 "Result Guard Tag verification Error",
79 "Result Application Tag verification Error",
80 "Result Reference Tag verification Error",
84 static void ioat_eh(struct ioatdma_chan *ioat_chan);
86 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
90 for (i = 0; i < 32; i++) {
91 if ((chanerr >> i) & 1) {
93 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
102 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
104 * @data: interrupt data
106 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
108 struct ioatdma_device *instance = data;
109 struct ioatdma_chan *ioat_chan;
110 unsigned long attnstatus;
114 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
116 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
119 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
120 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
124 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
125 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
126 ioat_chan = ioat_chan_by_index(instance, bit);
127 if (test_bit(IOAT_RUN, &ioat_chan->state))
128 tasklet_schedule(&ioat_chan->cleanup_task);
131 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
136 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
138 * @data: interrupt data
140 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
142 struct ioatdma_chan *ioat_chan = data;
144 if (test_bit(IOAT_RUN, &ioat_chan->state))
145 tasklet_schedule(&ioat_chan->cleanup_task);
150 void ioat_stop(struct ioatdma_chan *ioat_chan)
152 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
153 struct pci_dev *pdev = ioat_dma->pdev;
154 int chan_id = chan_num(ioat_chan);
155 struct msix_entry *msix;
157 /* 1/ stop irq from firing tasklets
158 * 2/ stop the tasklet from re-arming irqs
160 clear_bit(IOAT_RUN, &ioat_chan->state);
162 /* flush inflight interrupts */
163 switch (ioat_dma->irq_mode) {
165 msix = &ioat_dma->msix_entries[chan_id];
166 synchronize_irq(msix->vector);
170 synchronize_irq(pdev->irq);
176 /* flush inflight timers */
177 del_timer_sync(&ioat_chan->timer);
179 /* flush inflight tasklet runs */
180 tasklet_kill(&ioat_chan->cleanup_task);
182 /* final cleanup now that everything is quiesced and can't re-arm */
183 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
186 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
188 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
189 ioat_chan->issued = ioat_chan->head;
190 writew(ioat_chan->dmacount,
191 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
192 dev_dbg(to_dev(ioat_chan),
193 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
194 __func__, ioat_chan->head, ioat_chan->tail,
195 ioat_chan->issued, ioat_chan->dmacount);
198 void ioat_issue_pending(struct dma_chan *c)
200 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
202 if (ioat_ring_pending(ioat_chan)) {
203 spin_lock_bh(&ioat_chan->prep_lock);
204 __ioat_issue_pending(ioat_chan);
205 spin_unlock_bh(&ioat_chan->prep_lock);
210 * ioat_update_pending - log pending descriptors
211 * @ioat: ioat+ channel
213 * Check if the number of unsubmitted descriptors has exceeded the
214 * watermark. Called with prep_lock held
216 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
218 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
219 __ioat_issue_pending(ioat_chan);
222 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
224 struct ioat_ring_ent *desc;
225 struct ioat_dma_descriptor *hw;
227 if (ioat_ring_space(ioat_chan) < 1) {
228 dev_err(to_dev(ioat_chan),
229 "Unable to start null desc - ring full\n");
233 dev_dbg(to_dev(ioat_chan),
234 "%s: head: %#x tail: %#x issued: %#x\n",
235 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
236 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
241 hw->ctl_f.int_en = 1;
242 hw->ctl_f.compl_write = 1;
243 /* set size to non-zero value (channel returns error when size is 0) */
244 hw->size = NULL_DESC_BUFFER_SIZE;
247 async_tx_ack(&desc->txd);
248 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
249 dump_desc_dbg(ioat_chan, desc);
250 /* make sure descriptors are written before we submit */
252 ioat_chan->head += 1;
253 __ioat_issue_pending(ioat_chan);
256 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
258 spin_lock_bh(&ioat_chan->prep_lock);
259 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
260 __ioat_start_null_desc(ioat_chan);
261 spin_unlock_bh(&ioat_chan->prep_lock);
264 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
266 /* set the tail to be re-issued */
267 ioat_chan->issued = ioat_chan->tail;
268 ioat_chan->dmacount = 0;
269 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
271 dev_dbg(to_dev(ioat_chan),
272 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
273 __func__, ioat_chan->head, ioat_chan->tail,
274 ioat_chan->issued, ioat_chan->dmacount);
276 if (ioat_ring_pending(ioat_chan)) {
277 struct ioat_ring_ent *desc;
279 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
280 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
281 __ioat_issue_pending(ioat_chan);
283 __ioat_start_null_desc(ioat_chan);
286 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
288 unsigned long end = jiffies + tmo;
292 status = ioat_chansts(ioat_chan);
293 if (is_ioat_active(status) || is_ioat_idle(status))
294 ioat_suspend(ioat_chan);
295 while (is_ioat_active(status) || is_ioat_idle(status)) {
296 if (tmo && time_after(jiffies, end)) {
300 status = ioat_chansts(ioat_chan);
307 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
309 unsigned long end = jiffies + tmo;
312 ioat_reset(ioat_chan);
313 while (ioat_reset_pending(ioat_chan)) {
314 if (end && time_after(jiffies, end)) {
324 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
325 __releases(&ioat_chan->prep_lock)
327 struct dma_chan *c = tx->chan;
328 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
331 cookie = dma_cookie_assign(tx);
332 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
334 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
335 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
337 /* make descriptor updates visible before advancing ioat->head,
338 * this is purposefully not smp_wmb() since we are also
339 * publishing the descriptor updates to a dma device
343 ioat_chan->head += ioat_chan->produce;
345 ioat_update_pending(ioat_chan);
346 spin_unlock_bh(&ioat_chan->prep_lock);
351 static struct ioat_ring_ent *
352 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
354 struct ioat_dma_descriptor *hw;
355 struct ioat_ring_ent *desc;
356 struct ioatdma_device *ioat_dma;
357 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
363 ioat_dma = to_ioatdma_device(chan->device);
365 chunk = idx / IOAT_DESCS_PER_2M;
366 idx &= (IOAT_DESCS_PER_2M - 1);
367 offs = idx * IOAT_DESC_SZ;
368 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
369 phys = ioat_chan->descs[chunk].hw + offs;
370 hw = (struct ioat_dma_descriptor *)pos;
371 memset(hw, 0, sizeof(*hw));
373 desc = kmem_cache_zalloc(ioat_cache, flags);
377 dma_async_tx_descriptor_init(&desc->txd, chan);
378 desc->txd.tx_submit = ioat_tx_submit_unlock;
380 desc->txd.phys = phys;
384 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
386 kmem_cache_free(ioat_cache, desc);
389 struct ioat_ring_ent **
390 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
392 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
393 struct ioat_ring_ent **ring;
394 int total_descs = 1 << order;
397 /* allocate the array to hold the software ring */
398 ring = kcalloc(total_descs, sizeof(*ring), flags);
402 ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
404 for (i = 0; i < chunks; i++) {
405 struct ioat_descs *descs = &ioat_chan->descs[i];
407 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
408 SZ_2M, &descs->hw, flags);
412 for (idx = 0; idx < i; idx++) {
413 descs = &ioat_chan->descs[idx];
414 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
415 descs->virt, descs->hw);
420 ioat_chan->desc_chunks = 0;
426 for (i = 0; i < total_descs; i++) {
427 ring[i] = ioat_alloc_ring_ent(c, i, flags);
432 ioat_free_ring_ent(ring[i], c);
434 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
435 dma_free_coherent(to_dev(ioat_chan),
437 ioat_chan->descs[idx].virt,
438 ioat_chan->descs[idx].hw);
439 ioat_chan->descs[idx].virt = NULL;
440 ioat_chan->descs[idx].hw = 0;
443 ioat_chan->desc_chunks = 0;
447 set_desc_id(ring[i], i);
451 for (i = 0; i < total_descs-1; i++) {
452 struct ioat_ring_ent *next = ring[i+1];
453 struct ioat_dma_descriptor *hw = ring[i]->hw;
455 hw->next = next->txd.phys;
457 ring[i]->hw->next = ring[0]->txd.phys;
463 * ioat_check_space_lock - verify space and grab ring producer lock
464 * @ioat: ioat,3 channel (ring) to operate on
465 * @num_descs: allocation length
467 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
468 __acquires(&ioat_chan->prep_lock)
470 spin_lock_bh(&ioat_chan->prep_lock);
471 /* never allow the last descriptor to be consumed, we need at
472 * least one free at all times to allow for on-the-fly ring
475 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
476 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
477 __func__, num_descs, ioat_chan->head,
478 ioat_chan->tail, ioat_chan->issued);
479 ioat_chan->produce = num_descs;
480 return 0; /* with ioat->prep_lock held */
482 spin_unlock_bh(&ioat_chan->prep_lock);
484 dev_dbg_ratelimited(to_dev(ioat_chan),
485 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
486 __func__, num_descs, ioat_chan->head,
487 ioat_chan->tail, ioat_chan->issued);
489 /* progress reclaim in the allocation failure case we may be
490 * called under bh_disabled so we need to trigger the timer
493 if (time_is_before_jiffies(ioat_chan->timer.expires)
494 && timer_pending(&ioat_chan->timer)) {
495 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
496 ioat_timer_event((unsigned long)ioat_chan);
502 static bool desc_has_ext(struct ioat_ring_ent *desc)
504 struct ioat_dma_descriptor *hw = desc->hw;
506 if (hw->ctl_f.op == IOAT_OP_XOR ||
507 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
508 struct ioat_xor_descriptor *xor = desc->xor;
510 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
512 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
513 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
514 struct ioat_pq_descriptor *pq = desc->pq;
516 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
524 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
529 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
530 kmem_cache_free(ioat_sed_cache, sed);
533 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
538 completion = *ioat_chan->completion;
539 phys_complete = ioat_chansts_to_addr(completion);
541 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
542 (unsigned long long) phys_complete);
544 return phys_complete;
547 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
550 *phys_complete = ioat_get_current_completion(ioat_chan);
551 if (*phys_complete == ioat_chan->last_completion)
554 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
555 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
561 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
563 struct ioat_dma_descriptor *hw = desc->hw;
565 switch (hw->ctl_f.op) {
567 case IOAT_OP_PQ_VAL_16S:
569 struct ioat_pq_descriptor *pq = desc->pq;
571 /* check if there's error written */
572 if (!pq->dwbes_f.wbes)
575 /* need to set a chanerr var for checking to clear later */
577 if (pq->dwbes_f.p_val_err)
578 *desc->result |= SUM_CHECK_P_RESULT;
580 if (pq->dwbes_f.q_val_err)
581 *desc->result |= SUM_CHECK_Q_RESULT;
591 * __cleanup - reclaim used descriptors
592 * @ioat: channel (ring) to clean
594 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
596 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
597 struct ioat_ring_ent *desc;
598 bool seen_current = false;
599 int idx = ioat_chan->tail, i;
602 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
603 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
606 * At restart of the channel, the completion address and the
607 * channel status will be 0 due to starting a new chain. Since
608 * it's new chain and the first descriptor "fails", there is
609 * nothing to clean up. We do not want to reap the entire submitted
610 * chain due to this 0 address value and then BUG.
615 active = ioat_ring_active(ioat_chan);
616 for (i = 0; i < active && !seen_current; i++) {
617 struct dma_async_tx_descriptor *tx;
619 smp_read_barrier_depends();
620 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
621 desc = ioat_get_ring_ent(ioat_chan, idx + i);
622 dump_desc_dbg(ioat_chan, desc);
624 /* set err stat if we are using dwbes */
625 if (ioat_dma->cap & IOAT_CAP_DWBES)
626 desc_get_errstat(ioat_chan, desc);
630 struct dmaengine_result res;
632 dma_cookie_complete(tx);
633 dma_descriptor_unmap(tx);
634 res.result = DMA_TRANS_NOERROR;
635 dmaengine_desc_get_callback_invoke(tx, NULL);
637 tx->callback_result = NULL;
640 if (tx->phys == phys_complete)
643 /* skip extended descriptors */
644 if (desc_has_ext(desc)) {
645 BUG_ON(i + 1 >= active);
649 /* cleanup super extended descriptors */
651 ioat_free_sed(ioat_dma, desc->sed);
656 /* finish all descriptor reads before incrementing tail */
658 ioat_chan->tail = idx + i;
659 /* no active descs have written a completion? */
660 BUG_ON(active && !seen_current);
661 ioat_chan->last_completion = phys_complete;
663 if (active - i == 0) {
664 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
666 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
669 /* 5 microsecond delay per pending descriptor */
670 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
671 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
674 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
678 spin_lock_bh(&ioat_chan->cleanup_lock);
680 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
681 __cleanup(ioat_chan, phys_complete);
683 if (is_ioat_halted(*ioat_chan->completion)) {
684 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
687 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
688 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
693 spin_unlock_bh(&ioat_chan->cleanup_lock);
696 void ioat_cleanup_event(unsigned long data)
698 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
700 ioat_cleanup(ioat_chan);
701 if (!test_bit(IOAT_RUN, &ioat_chan->state))
703 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
706 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
710 ioat_quiesce(ioat_chan, 0);
711 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
712 __cleanup(ioat_chan, phys_complete);
714 __ioat_restart_chan(ioat_chan);
718 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
720 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
721 struct ioat_ring_ent *desc;
723 int idx = ioat_chan->tail, i;
726 * We assume that the failed descriptor has been processed.
727 * Now we are just returning all the remaining submitted
728 * descriptors to abort.
730 active = ioat_ring_active(ioat_chan);
732 /* we skip the failed descriptor that tail points to */
733 for (i = 1; i < active; i++) {
734 struct dma_async_tx_descriptor *tx;
736 smp_read_barrier_depends();
737 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
738 desc = ioat_get_ring_ent(ioat_chan, idx + i);
742 struct dmaengine_result res;
744 dma_cookie_complete(tx);
745 dma_descriptor_unmap(tx);
746 res.result = DMA_TRANS_ABORTED;
747 dmaengine_desc_get_callback_invoke(tx, &res);
749 tx->callback_result = NULL;
752 /* skip extended descriptors */
753 if (desc_has_ext(desc)) {
754 WARN_ON(i + 1 >= active);
758 /* cleanup super extended descriptors */
760 ioat_free_sed(ioat_dma, desc->sed);
765 smp_mb(); /* finish all descriptor reads before incrementing tail */
766 ioat_chan->tail = idx + active;
768 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
769 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
772 static void ioat_eh(struct ioatdma_chan *ioat_chan)
774 struct pci_dev *pdev = to_pdev(ioat_chan);
775 struct ioat_dma_descriptor *hw;
776 struct dma_async_tx_descriptor *tx;
778 struct ioat_ring_ent *desc;
783 struct dmaengine_result res;
785 /* cleanup so tail points to descriptor that caused the error */
786 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
787 __cleanup(ioat_chan, phys_complete);
789 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
790 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
792 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
793 __func__, chanerr, chanerr_int);
795 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
797 dump_desc_dbg(ioat_chan, desc);
799 switch (hw->ctl_f.op) {
800 case IOAT_OP_XOR_VAL:
801 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
802 *desc->result |= SUM_CHECK_P_RESULT;
803 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
807 case IOAT_OP_PQ_VAL_16S:
808 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
809 *desc->result |= SUM_CHECK_P_RESULT;
810 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
812 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
813 *desc->result |= SUM_CHECK_Q_RESULT;
814 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
819 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
820 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
821 res.result = DMA_TRANS_READ_FAILED;
822 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
823 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
824 res.result = DMA_TRANS_WRITE_FAILED;
825 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
830 res.result = DMA_TRANS_NOERROR;
832 /* fault on unhandled error or spurious halt */
833 if (chanerr ^ err_handled || chanerr == 0) {
834 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
835 __func__, chanerr, err_handled);
836 dev_err(to_dev(ioat_chan), "Errors handled:\n");
837 ioat_print_chanerrs(ioat_chan, err_handled);
838 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
839 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
844 /* cleanup the faulty descriptor since we are continuing */
847 dma_cookie_complete(tx);
848 dma_descriptor_unmap(tx);
849 dmaengine_desc_get_callback_invoke(tx, &res);
851 tx->callback_result = NULL;
854 /* mark faulting descriptor as complete */
855 *ioat_chan->completion = desc->txd.phys;
857 spin_lock_bh(&ioat_chan->prep_lock);
858 /* we need abort all descriptors */
860 ioat_abort_descs(ioat_chan);
861 /* clean up the channel, we could be in weird state */
862 ioat_reset_hw(ioat_chan);
865 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
866 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
868 ioat_restart_channel(ioat_chan);
869 spin_unlock_bh(&ioat_chan->prep_lock);
872 static void check_active(struct ioatdma_chan *ioat_chan)
874 if (ioat_ring_active(ioat_chan)) {
875 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
879 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
880 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
883 void ioat_timer_event(unsigned long data)
885 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
886 dma_addr_t phys_complete;
889 status = ioat_chansts(ioat_chan);
891 /* when halted due to errors check for channel
892 * programming errors before advancing the completion state
894 if (is_ioat_halted(status)) {
897 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
898 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
900 dev_err(to_dev(ioat_chan), "Errors:\n");
901 ioat_print_chanerrs(ioat_chan, chanerr);
903 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
904 spin_lock_bh(&ioat_chan->cleanup_lock);
905 spin_lock_bh(&ioat_chan->prep_lock);
906 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
907 spin_unlock_bh(&ioat_chan->prep_lock);
909 ioat_abort_descs(ioat_chan);
910 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
911 ioat_reset_hw(ioat_chan);
912 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
913 ioat_restart_channel(ioat_chan);
915 spin_lock_bh(&ioat_chan->prep_lock);
916 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
917 spin_unlock_bh(&ioat_chan->prep_lock);
918 spin_unlock_bh(&ioat_chan->cleanup_lock);
924 spin_lock_bh(&ioat_chan->cleanup_lock);
926 /* handle the no-actives case */
927 if (!ioat_ring_active(ioat_chan)) {
928 spin_lock_bh(&ioat_chan->prep_lock);
929 check_active(ioat_chan);
930 spin_unlock_bh(&ioat_chan->prep_lock);
931 spin_unlock_bh(&ioat_chan->cleanup_lock);
935 /* if we haven't made progress and we have already
936 * acknowledged a pending completion once, then be more
937 * forceful with a restart
939 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
940 __cleanup(ioat_chan, phys_complete);
941 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
944 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
945 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
947 dev_err(to_dev(ioat_chan), "Errors:\n");
948 ioat_print_chanerrs(ioat_chan, chanerr);
950 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
951 ioat_ring_active(ioat_chan));
953 spin_lock_bh(&ioat_chan->prep_lock);
954 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
955 spin_unlock_bh(&ioat_chan->prep_lock);
957 ioat_abort_descs(ioat_chan);
958 dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
959 ioat_reset_hw(ioat_chan);
960 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
961 ioat_restart_channel(ioat_chan);
963 spin_lock_bh(&ioat_chan->prep_lock);
964 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
965 spin_unlock_bh(&ioat_chan->prep_lock);
966 spin_unlock_bh(&ioat_chan->cleanup_lock);
969 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
971 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
972 spin_unlock_bh(&ioat_chan->cleanup_lock);
976 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
977 struct dma_tx_state *txstate)
979 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
982 ret = dma_cookie_status(c, cookie, txstate);
983 if (ret == DMA_COMPLETE)
986 ioat_cleanup(ioat_chan);
988 return dma_cookie_status(c, cookie, txstate);
991 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
993 /* throw away whatever the channel was doing and get it
994 * initialized, with ioat3 specific workarounds
996 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
997 struct pci_dev *pdev = ioat_dma->pdev;
1002 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1004 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1005 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1007 if (ioat_dma->version < IOAT_VER_3_3) {
1008 /* clear any pending errors */
1009 err = pci_read_config_dword(pdev,
1010 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1013 "channel error register unreachable\n");
1016 pci_write_config_dword(pdev,
1017 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1019 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1020 * (workaround for spurious config parity error after restart)
1022 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1023 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1024 pci_write_config_dword(pdev,
1025 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1030 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1031 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1032 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1033 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1037 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1039 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1040 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1041 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1042 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1047 dev_err(&pdev->dev, "Failed to reset: %d\n", err);