1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel I/OAT DMA Linux driver
4 * Copyright(c) 2004 - 2015 Intel Corporation.
8 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/dmaengine.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/workqueue.h>
21 #include <linux/prefetch.h>
22 #include <linux/sizes.h>
24 #include "registers.h"
27 #include "../dmaengine.h"
29 int completion_timeout = 200;
30 module_param(completion_timeout, int, 0644);
31 MODULE_PARM_DESC(completion_timeout,
32 "set ioat completion timeout [msec] (default 200 [msec])");
33 int idle_timeout = 2000;
34 module_param(idle_timeout, int, 0644);
35 MODULE_PARM_DESC(idle_timeout,
36 "set ioat idel timeout [msec] (default 2000 [msec])");
38 #define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
39 #define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
41 static char *chanerr_str[] = {
42 "DMA Transfer Source Address Error",
43 "DMA Transfer Destination Address Error",
44 "Next Descriptor Address Error",
46 "Chan Address Value Error",
48 "Chipset Uncorrectable Data Integrity Error",
49 "DMA Uncorrectable Data Integrity Error",
52 "Descriptor Control Error",
53 "Descriptor Transfer Size Error",
54 "Completion Address Error",
55 "Interrupt Configuration Error",
56 "Super extended descriptor Address Error",
60 "Descriptor Count Error",
61 "DIF All F detect Error",
62 "Guard Tag verification Error",
63 "Application Tag verification Error",
64 "Reference Tag verification Error",
66 "Result DIF All F detect Error",
67 "Result Guard Tag verification Error",
68 "Result Application Tag verification Error",
69 "Result Reference Tag verification Error",
72 static void ioat_eh(struct ioatdma_chan *ioat_chan);
74 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
78 for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
79 if ((chanerr >> i) & 1) {
80 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
87 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
89 * @data: interrupt data
91 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
93 struct ioatdma_device *instance = data;
94 struct ioatdma_chan *ioat_chan;
95 unsigned long attnstatus;
99 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
101 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
104 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
105 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
109 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
110 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
111 ioat_chan = ioat_chan_by_index(instance, bit);
112 if (test_bit(IOAT_RUN, &ioat_chan->state))
113 tasklet_schedule(&ioat_chan->cleanup_task);
116 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
121 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
123 * @data: interrupt data
125 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
127 struct ioatdma_chan *ioat_chan = data;
129 if (test_bit(IOAT_RUN, &ioat_chan->state))
130 tasklet_schedule(&ioat_chan->cleanup_task);
135 void ioat_stop(struct ioatdma_chan *ioat_chan)
137 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
138 struct pci_dev *pdev = ioat_dma->pdev;
139 int chan_id = chan_num(ioat_chan);
140 struct msix_entry *msix;
142 /* 1/ stop irq from firing tasklets
143 * 2/ stop the tasklet from re-arming irqs
145 clear_bit(IOAT_RUN, &ioat_chan->state);
147 /* flush inflight interrupts */
148 switch (ioat_dma->irq_mode) {
150 msix = &ioat_dma->msix_entries[chan_id];
151 synchronize_irq(msix->vector);
155 synchronize_irq(pdev->irq);
161 /* flush inflight timers */
162 del_timer_sync(&ioat_chan->timer);
164 /* flush inflight tasklet runs */
165 tasklet_kill(&ioat_chan->cleanup_task);
167 /* final cleanup now that everything is quiesced and can't re-arm */
168 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
171 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
173 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174 ioat_chan->issued = ioat_chan->head;
175 writew(ioat_chan->dmacount,
176 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177 dev_dbg(to_dev(ioat_chan),
178 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
179 __func__, ioat_chan->head, ioat_chan->tail,
180 ioat_chan->issued, ioat_chan->dmacount);
183 void ioat_issue_pending(struct dma_chan *c)
185 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
187 if (ioat_ring_pending(ioat_chan)) {
188 spin_lock_bh(&ioat_chan->prep_lock);
189 __ioat_issue_pending(ioat_chan);
190 spin_unlock_bh(&ioat_chan->prep_lock);
195 * ioat_update_pending - log pending descriptors
196 * @ioat: ioat+ channel
198 * Check if the number of unsubmitted descriptors has exceeded the
199 * watermark. Called with prep_lock held
201 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
203 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204 __ioat_issue_pending(ioat_chan);
207 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
209 struct ioat_ring_ent *desc;
210 struct ioat_dma_descriptor *hw;
212 if (ioat_ring_space(ioat_chan) < 1) {
213 dev_err(to_dev(ioat_chan),
214 "Unable to start null desc - ring full\n");
218 dev_dbg(to_dev(ioat_chan),
219 "%s: head: %#x tail: %#x issued: %#x\n",
220 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
226 hw->ctl_f.int_en = 1;
227 hw->ctl_f.compl_write = 1;
228 /* set size to non-zero value (channel returns error when size is 0) */
229 hw->size = NULL_DESC_BUFFER_SIZE;
232 async_tx_ack(&desc->txd);
233 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234 dump_desc_dbg(ioat_chan, desc);
235 /* make sure descriptors are written before we submit */
237 ioat_chan->head += 1;
238 __ioat_issue_pending(ioat_chan);
241 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
243 spin_lock_bh(&ioat_chan->prep_lock);
244 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245 __ioat_start_null_desc(ioat_chan);
246 spin_unlock_bh(&ioat_chan->prep_lock);
249 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
251 /* set the tail to be re-issued */
252 ioat_chan->issued = ioat_chan->tail;
253 ioat_chan->dmacount = 0;
254 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
256 dev_dbg(to_dev(ioat_chan),
257 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
258 __func__, ioat_chan->head, ioat_chan->tail,
259 ioat_chan->issued, ioat_chan->dmacount);
261 if (ioat_ring_pending(ioat_chan)) {
262 struct ioat_ring_ent *desc;
264 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266 __ioat_issue_pending(ioat_chan);
268 __ioat_start_null_desc(ioat_chan);
271 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
273 unsigned long end = jiffies + tmo;
277 status = ioat_chansts(ioat_chan);
278 if (is_ioat_active(status) || is_ioat_idle(status))
279 ioat_suspend(ioat_chan);
280 while (is_ioat_active(status) || is_ioat_idle(status)) {
281 if (tmo && time_after(jiffies, end)) {
285 status = ioat_chansts(ioat_chan);
292 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
294 unsigned long end = jiffies + tmo;
297 ioat_reset(ioat_chan);
298 while (ioat_reset_pending(ioat_chan)) {
299 if (end && time_after(jiffies, end)) {
309 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
310 __releases(&ioat_chan->prep_lock)
312 struct dma_chan *c = tx->chan;
313 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
316 cookie = dma_cookie_assign(tx);
317 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
319 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
322 /* make descriptor updates visible before advancing ioat->head,
323 * this is purposefully not smp_wmb() since we are also
324 * publishing the descriptor updates to a dma device
328 ioat_chan->head += ioat_chan->produce;
330 ioat_update_pending(ioat_chan);
331 spin_unlock_bh(&ioat_chan->prep_lock);
336 static struct ioat_ring_ent *
337 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
339 struct ioat_dma_descriptor *hw;
340 struct ioat_ring_ent *desc;
341 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
347 chunk = idx / IOAT_DESCS_PER_2M;
348 idx &= (IOAT_DESCS_PER_2M - 1);
349 offs = idx * IOAT_DESC_SZ;
350 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351 phys = ioat_chan->descs[chunk].hw + offs;
352 hw = (struct ioat_dma_descriptor *)pos;
353 memset(hw, 0, sizeof(*hw));
355 desc = kmem_cache_zalloc(ioat_cache, flags);
359 dma_async_tx_descriptor_init(&desc->txd, chan);
360 desc->txd.tx_submit = ioat_tx_submit_unlock;
362 desc->txd.phys = phys;
366 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
368 kmem_cache_free(ioat_cache, desc);
371 struct ioat_ring_ent **
372 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
375 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
376 struct ioat_ring_ent **ring;
377 int total_descs = 1 << order;
380 /* allocate the array to hold the software ring */
381 ring = kcalloc(total_descs, sizeof(*ring), flags);
385 ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
387 for (i = 0; i < chunks; i++) {
388 struct ioat_descs *descs = &ioat_chan->descs[i];
390 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
391 SZ_2M, &descs->hw, flags);
395 for (idx = 0; idx < i; idx++) {
396 descs = &ioat_chan->descs[idx];
397 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
398 descs->virt, descs->hw);
403 ioat_chan->desc_chunks = 0;
409 for (i = 0; i < total_descs; i++) {
410 ring[i] = ioat_alloc_ring_ent(c, i, flags);
415 ioat_free_ring_ent(ring[i], c);
417 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
418 dma_free_coherent(to_dev(ioat_chan),
420 ioat_chan->descs[idx].virt,
421 ioat_chan->descs[idx].hw);
422 ioat_chan->descs[idx].virt = NULL;
423 ioat_chan->descs[idx].hw = 0;
426 ioat_chan->desc_chunks = 0;
430 set_desc_id(ring[i], i);
434 for (i = 0; i < total_descs-1; i++) {
435 struct ioat_ring_ent *next = ring[i+1];
436 struct ioat_dma_descriptor *hw = ring[i]->hw;
438 hw->next = next->txd.phys;
440 ring[i]->hw->next = ring[0]->txd.phys;
442 /* setup descriptor pre-fetching for v3.4 */
443 if (ioat_dma->cap & IOAT_CAP_DPS) {
444 u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
447 drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
449 writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
457 * ioat_check_space_lock - verify space and grab ring producer lock
458 * @ioat: ioat,3 channel (ring) to operate on
459 * @num_descs: allocation length
461 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
462 __acquires(&ioat_chan->prep_lock)
464 spin_lock_bh(&ioat_chan->prep_lock);
465 /* never allow the last descriptor to be consumed, we need at
466 * least one free at all times to allow for on-the-fly ring
469 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
470 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
471 __func__, num_descs, ioat_chan->head,
472 ioat_chan->tail, ioat_chan->issued);
473 ioat_chan->produce = num_descs;
474 return 0; /* with ioat->prep_lock held */
476 spin_unlock_bh(&ioat_chan->prep_lock);
478 dev_dbg_ratelimited(to_dev(ioat_chan),
479 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
480 __func__, num_descs, ioat_chan->head,
481 ioat_chan->tail, ioat_chan->issued);
483 /* progress reclaim in the allocation failure case we may be
484 * called under bh_disabled so we need to trigger the timer
487 if (time_is_before_jiffies(ioat_chan->timer.expires)
488 && timer_pending(&ioat_chan->timer)) {
489 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
490 ioat_timer_event(&ioat_chan->timer);
496 static bool desc_has_ext(struct ioat_ring_ent *desc)
498 struct ioat_dma_descriptor *hw = desc->hw;
500 if (hw->ctl_f.op == IOAT_OP_XOR ||
501 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
502 struct ioat_xor_descriptor *xor = desc->xor;
504 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
506 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
507 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
508 struct ioat_pq_descriptor *pq = desc->pq;
510 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
518 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
523 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
524 kmem_cache_free(ioat_sed_cache, sed);
527 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
532 completion = *ioat_chan->completion;
533 phys_complete = ioat_chansts_to_addr(completion);
535 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
536 (unsigned long long) phys_complete);
538 return phys_complete;
541 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
544 *phys_complete = ioat_get_current_completion(ioat_chan);
545 if (*phys_complete == ioat_chan->last_completion)
548 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
549 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
555 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
557 struct ioat_dma_descriptor *hw = desc->hw;
559 switch (hw->ctl_f.op) {
561 case IOAT_OP_PQ_VAL_16S:
563 struct ioat_pq_descriptor *pq = desc->pq;
565 /* check if there's error written */
566 if (!pq->dwbes_f.wbes)
569 /* need to set a chanerr var for checking to clear later */
571 if (pq->dwbes_f.p_val_err)
572 *desc->result |= SUM_CHECK_P_RESULT;
574 if (pq->dwbes_f.q_val_err)
575 *desc->result |= SUM_CHECK_Q_RESULT;
585 * __cleanup - reclaim used descriptors
586 * @ioat: channel (ring) to clean
588 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
590 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
591 struct ioat_ring_ent *desc;
592 bool seen_current = false;
593 int idx = ioat_chan->tail, i;
596 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
597 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
600 * At restart of the channel, the completion address and the
601 * channel status will be 0 due to starting a new chain. Since
602 * it's new chain and the first descriptor "fails", there is
603 * nothing to clean up. We do not want to reap the entire submitted
604 * chain due to this 0 address value and then BUG.
609 active = ioat_ring_active(ioat_chan);
610 for (i = 0; i < active && !seen_current; i++) {
611 struct dma_async_tx_descriptor *tx;
613 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
614 desc = ioat_get_ring_ent(ioat_chan, idx + i);
615 dump_desc_dbg(ioat_chan, desc);
617 /* set err stat if we are using dwbes */
618 if (ioat_dma->cap & IOAT_CAP_DWBES)
619 desc_get_errstat(ioat_chan, desc);
623 dma_cookie_complete(tx);
624 dma_descriptor_unmap(tx);
625 dmaengine_desc_get_callback_invoke(tx, NULL);
627 tx->callback_result = NULL;
630 if (tx->phys == phys_complete)
633 /* skip extended descriptors */
634 if (desc_has_ext(desc)) {
635 BUG_ON(i + 1 >= active);
639 /* cleanup super extended descriptors */
641 ioat_free_sed(ioat_dma, desc->sed);
646 /* finish all descriptor reads before incrementing tail */
648 ioat_chan->tail = idx + i;
649 /* no active descs have written a completion? */
650 BUG_ON(active && !seen_current);
651 ioat_chan->last_completion = phys_complete;
653 if (active - i == 0) {
654 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
656 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
659 /* microsecond delay by sysfs variable per pending descriptor */
660 if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
661 writew(min((ioat_chan->intr_coalesce * (active - i)),
662 IOAT_INTRDELAY_MASK),
663 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
664 ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
668 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
672 spin_lock_bh(&ioat_chan->cleanup_lock);
674 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
675 __cleanup(ioat_chan, phys_complete);
677 if (is_ioat_halted(*ioat_chan->completion)) {
678 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
681 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
682 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
687 spin_unlock_bh(&ioat_chan->cleanup_lock);
690 void ioat_cleanup_event(unsigned long data)
692 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
694 ioat_cleanup(ioat_chan);
695 if (!test_bit(IOAT_RUN, &ioat_chan->state))
697 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
700 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
704 /* set the completion address register again */
705 writel(lower_32_bits(ioat_chan->completion_dma),
706 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
707 writel(upper_32_bits(ioat_chan->completion_dma),
708 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
710 ioat_quiesce(ioat_chan, 0);
711 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
712 __cleanup(ioat_chan, phys_complete);
714 __ioat_restart_chan(ioat_chan);
718 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
720 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
721 struct ioat_ring_ent *desc;
723 int idx = ioat_chan->tail, i;
726 * We assume that the failed descriptor has been processed.
727 * Now we are just returning all the remaining submitted
728 * descriptors to abort.
730 active = ioat_ring_active(ioat_chan);
732 /* we skip the failed descriptor that tail points to */
733 for (i = 1; i < active; i++) {
734 struct dma_async_tx_descriptor *tx;
736 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
737 desc = ioat_get_ring_ent(ioat_chan, idx + i);
741 struct dmaengine_result res;
743 dma_cookie_complete(tx);
744 dma_descriptor_unmap(tx);
745 res.result = DMA_TRANS_ABORTED;
746 dmaengine_desc_get_callback_invoke(tx, &res);
748 tx->callback_result = NULL;
751 /* skip extended descriptors */
752 if (desc_has_ext(desc)) {
753 WARN_ON(i + 1 >= active);
757 /* cleanup super extended descriptors */
759 ioat_free_sed(ioat_dma, desc->sed);
764 smp_mb(); /* finish all descriptor reads before incrementing tail */
765 ioat_chan->tail = idx + active;
767 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
768 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
771 static void ioat_eh(struct ioatdma_chan *ioat_chan)
773 struct pci_dev *pdev = to_pdev(ioat_chan);
774 struct ioat_dma_descriptor *hw;
775 struct dma_async_tx_descriptor *tx;
777 struct ioat_ring_ent *desc;
782 struct dmaengine_result res;
784 /* cleanup so tail points to descriptor that caused the error */
785 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
786 __cleanup(ioat_chan, phys_complete);
788 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
789 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
791 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
792 __func__, chanerr, chanerr_int);
794 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
796 dump_desc_dbg(ioat_chan, desc);
798 switch (hw->ctl_f.op) {
799 case IOAT_OP_XOR_VAL:
800 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
801 *desc->result |= SUM_CHECK_P_RESULT;
802 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
806 case IOAT_OP_PQ_VAL_16S:
807 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
808 *desc->result |= SUM_CHECK_P_RESULT;
809 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
811 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
812 *desc->result |= SUM_CHECK_Q_RESULT;
813 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
818 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
819 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
820 res.result = DMA_TRANS_READ_FAILED;
821 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
822 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
823 res.result = DMA_TRANS_WRITE_FAILED;
824 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
829 res.result = DMA_TRANS_NOERROR;
831 /* fault on unhandled error or spurious halt */
832 if (chanerr ^ err_handled || chanerr == 0) {
833 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
834 __func__, chanerr, err_handled);
835 dev_err(to_dev(ioat_chan), "Errors handled:\n");
836 ioat_print_chanerrs(ioat_chan, err_handled);
837 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
838 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
843 /* cleanup the faulty descriptor since we are continuing */
846 dma_cookie_complete(tx);
847 dma_descriptor_unmap(tx);
848 dmaengine_desc_get_callback_invoke(tx, &res);
850 tx->callback_result = NULL;
853 /* mark faulting descriptor as complete */
854 *ioat_chan->completion = desc->txd.phys;
856 spin_lock_bh(&ioat_chan->prep_lock);
857 /* we need abort all descriptors */
859 ioat_abort_descs(ioat_chan);
860 /* clean up the channel, we could be in weird state */
861 ioat_reset_hw(ioat_chan);
864 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
865 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
867 ioat_restart_channel(ioat_chan);
868 spin_unlock_bh(&ioat_chan->prep_lock);
871 static void check_active(struct ioatdma_chan *ioat_chan)
873 if (ioat_ring_active(ioat_chan)) {
874 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
878 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
879 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
882 void ioat_timer_event(struct timer_list *t)
884 struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
885 dma_addr_t phys_complete;
888 status = ioat_chansts(ioat_chan);
890 /* when halted due to errors check for channel
891 * programming errors before advancing the completion state
893 if (is_ioat_halted(status)) {
896 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
897 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
899 dev_err(to_dev(ioat_chan), "Errors:\n");
900 ioat_print_chanerrs(ioat_chan, chanerr);
902 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
903 spin_lock_bh(&ioat_chan->cleanup_lock);
904 spin_lock_bh(&ioat_chan->prep_lock);
905 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
906 spin_unlock_bh(&ioat_chan->prep_lock);
908 ioat_abort_descs(ioat_chan);
909 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
910 ioat_reset_hw(ioat_chan);
911 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
912 ioat_restart_channel(ioat_chan);
914 spin_lock_bh(&ioat_chan->prep_lock);
915 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
916 spin_unlock_bh(&ioat_chan->prep_lock);
917 spin_unlock_bh(&ioat_chan->cleanup_lock);
923 spin_lock_bh(&ioat_chan->cleanup_lock);
925 /* handle the no-actives case */
926 if (!ioat_ring_active(ioat_chan)) {
927 spin_lock_bh(&ioat_chan->prep_lock);
928 check_active(ioat_chan);
929 spin_unlock_bh(&ioat_chan->prep_lock);
930 spin_unlock_bh(&ioat_chan->cleanup_lock);
934 /* if we haven't made progress and we have already
935 * acknowledged a pending completion once, then be more
936 * forceful with a restart
938 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
939 __cleanup(ioat_chan, phys_complete);
940 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
943 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
944 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
946 dev_err(to_dev(ioat_chan), "Errors:\n");
947 ioat_print_chanerrs(ioat_chan, chanerr);
949 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
950 ioat_ring_active(ioat_chan));
952 spin_lock_bh(&ioat_chan->prep_lock);
953 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
954 spin_unlock_bh(&ioat_chan->prep_lock);
956 ioat_abort_descs(ioat_chan);
957 dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
958 ioat_reset_hw(ioat_chan);
959 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
960 ioat_restart_channel(ioat_chan);
962 spin_lock_bh(&ioat_chan->prep_lock);
963 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
964 spin_unlock_bh(&ioat_chan->prep_lock);
965 spin_unlock_bh(&ioat_chan->cleanup_lock);
968 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
970 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
971 spin_unlock_bh(&ioat_chan->cleanup_lock);
975 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
976 struct dma_tx_state *txstate)
978 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
981 ret = dma_cookie_status(c, cookie, txstate);
982 if (ret == DMA_COMPLETE)
985 ioat_cleanup(ioat_chan);
987 return dma_cookie_status(c, cookie, txstate);
990 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
992 /* throw away whatever the channel was doing and get it
993 * initialized, with ioat3 specific workarounds
995 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
996 struct pci_dev *pdev = ioat_dma->pdev;
1001 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1003 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1004 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1006 if (ioat_dma->version < IOAT_VER_3_3) {
1007 /* clear any pending errors */
1008 err = pci_read_config_dword(pdev,
1009 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1012 "channel error register unreachable\n");
1015 pci_write_config_dword(pdev,
1016 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1018 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1019 * (workaround for spurious config parity error after restart)
1021 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1022 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1023 pci_write_config_dword(pdev,
1024 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1029 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1030 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1031 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1032 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1036 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1038 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1039 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1040 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1041 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1046 dev_err(&pdev->dev, "Failed to reset: %d\n", err);