1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel I/OAT DMA Linux driver
4 * Copyright(c) 2004 - 2015 Intel Corporation.
8 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/dmaengine.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/workqueue.h>
21 #include <linux/prefetch.h>
22 #include <linux/sizes.h>
24 #include "registers.h"
27 #include "../dmaengine.h"
29 static char *chanerr_str[] = {
30 "DMA Transfer Source Address Error",
31 "DMA Transfer Destination Address Error",
32 "Next Descriptor Address Error",
34 "Chan Address Value Error",
36 "Chipset Uncorrectable Data Integrity Error",
37 "DMA Uncorrectable Data Integrity Error",
40 "Descriptor Control Error",
41 "Descriptor Transfer Size Error",
42 "Completion Address Error",
43 "Interrupt Configuration Error",
44 "Super extended descriptor Address Error",
48 "Descriptor Count Error",
49 "DIF All F detect Error",
50 "Guard Tag verification Error",
51 "Application Tag verification Error",
52 "Reference Tag verification Error",
54 "Result DIF All F detect Error",
55 "Result Guard Tag verification Error",
56 "Result Application Tag verification Error",
57 "Result Reference Tag verification Error",
60 static void ioat_eh(struct ioatdma_chan *ioat_chan);
62 static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
66 for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
67 if ((chanerr >> i) & 1) {
68 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
75 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
77 * @data: interrupt data
79 irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
81 struct ioatdma_device *instance = data;
82 struct ioatdma_chan *ioat_chan;
83 unsigned long attnstatus;
87 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
89 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
92 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
93 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
97 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
98 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
99 ioat_chan = ioat_chan_by_index(instance, bit);
100 if (test_bit(IOAT_RUN, &ioat_chan->state))
101 tasklet_schedule(&ioat_chan->cleanup_task);
104 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
109 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
111 * @data: interrupt data
113 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
115 struct ioatdma_chan *ioat_chan = data;
117 if (test_bit(IOAT_RUN, &ioat_chan->state))
118 tasklet_schedule(&ioat_chan->cleanup_task);
123 void ioat_stop(struct ioatdma_chan *ioat_chan)
125 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
126 struct pci_dev *pdev = ioat_dma->pdev;
127 int chan_id = chan_num(ioat_chan);
128 struct msix_entry *msix;
130 /* 1/ stop irq from firing tasklets
131 * 2/ stop the tasklet from re-arming irqs
133 clear_bit(IOAT_RUN, &ioat_chan->state);
135 /* flush inflight interrupts */
136 switch (ioat_dma->irq_mode) {
138 msix = &ioat_dma->msix_entries[chan_id];
139 synchronize_irq(msix->vector);
143 synchronize_irq(pdev->irq);
149 /* flush inflight timers */
150 del_timer_sync(&ioat_chan->timer);
152 /* flush inflight tasklet runs */
153 tasklet_kill(&ioat_chan->cleanup_task);
155 /* final cleanup now that everything is quiesced and can't re-arm */
156 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
159 static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
161 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
162 ioat_chan->issued = ioat_chan->head;
163 writew(ioat_chan->dmacount,
164 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
165 dev_dbg(to_dev(ioat_chan),
166 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
167 __func__, ioat_chan->head, ioat_chan->tail,
168 ioat_chan->issued, ioat_chan->dmacount);
171 void ioat_issue_pending(struct dma_chan *c)
173 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
175 if (ioat_ring_pending(ioat_chan)) {
176 spin_lock_bh(&ioat_chan->prep_lock);
177 __ioat_issue_pending(ioat_chan);
178 spin_unlock_bh(&ioat_chan->prep_lock);
183 * ioat_update_pending - log pending descriptors
184 * @ioat: ioat+ channel
186 * Check if the number of unsubmitted descriptors has exceeded the
187 * watermark. Called with prep_lock held
189 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
191 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
192 __ioat_issue_pending(ioat_chan);
195 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
197 struct ioat_ring_ent *desc;
198 struct ioat_dma_descriptor *hw;
200 if (ioat_ring_space(ioat_chan) < 1) {
201 dev_err(to_dev(ioat_chan),
202 "Unable to start null desc - ring full\n");
206 dev_dbg(to_dev(ioat_chan),
207 "%s: head: %#x tail: %#x issued: %#x\n",
208 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
209 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
214 hw->ctl_f.int_en = 1;
215 hw->ctl_f.compl_write = 1;
216 /* set size to non-zero value (channel returns error when size is 0) */
217 hw->size = NULL_DESC_BUFFER_SIZE;
220 async_tx_ack(&desc->txd);
221 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
222 dump_desc_dbg(ioat_chan, desc);
223 /* make sure descriptors are written before we submit */
225 ioat_chan->head += 1;
226 __ioat_issue_pending(ioat_chan);
229 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
231 spin_lock_bh(&ioat_chan->prep_lock);
232 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
233 __ioat_start_null_desc(ioat_chan);
234 spin_unlock_bh(&ioat_chan->prep_lock);
237 static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
239 /* set the tail to be re-issued */
240 ioat_chan->issued = ioat_chan->tail;
241 ioat_chan->dmacount = 0;
242 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
244 dev_dbg(to_dev(ioat_chan),
245 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
246 __func__, ioat_chan->head, ioat_chan->tail,
247 ioat_chan->issued, ioat_chan->dmacount);
249 if (ioat_ring_pending(ioat_chan)) {
250 struct ioat_ring_ent *desc;
252 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
253 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
254 __ioat_issue_pending(ioat_chan);
256 __ioat_start_null_desc(ioat_chan);
259 static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
261 unsigned long end = jiffies + tmo;
265 status = ioat_chansts(ioat_chan);
266 if (is_ioat_active(status) || is_ioat_idle(status))
267 ioat_suspend(ioat_chan);
268 while (is_ioat_active(status) || is_ioat_idle(status)) {
269 if (tmo && time_after(jiffies, end)) {
273 status = ioat_chansts(ioat_chan);
280 static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
282 unsigned long end = jiffies + tmo;
285 ioat_reset(ioat_chan);
286 while (ioat_reset_pending(ioat_chan)) {
287 if (end && time_after(jiffies, end)) {
297 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
298 __releases(&ioat_chan->prep_lock)
300 struct dma_chan *c = tx->chan;
301 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
304 cookie = dma_cookie_assign(tx);
305 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
307 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
308 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
310 /* make descriptor updates visible before advancing ioat->head,
311 * this is purposefully not smp_wmb() since we are also
312 * publishing the descriptor updates to a dma device
316 ioat_chan->head += ioat_chan->produce;
318 ioat_update_pending(ioat_chan);
319 spin_unlock_bh(&ioat_chan->prep_lock);
324 static struct ioat_ring_ent *
325 ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
327 struct ioat_dma_descriptor *hw;
328 struct ioat_ring_ent *desc;
329 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
335 chunk = idx / IOAT_DESCS_PER_CHUNK;
336 idx &= (IOAT_DESCS_PER_CHUNK - 1);
337 offs = idx * IOAT_DESC_SZ;
338 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
339 phys = ioat_chan->descs[chunk].hw + offs;
340 hw = (struct ioat_dma_descriptor *)pos;
341 memset(hw, 0, sizeof(*hw));
343 desc = kmem_cache_zalloc(ioat_cache, flags);
347 dma_async_tx_descriptor_init(&desc->txd, chan);
348 desc->txd.tx_submit = ioat_tx_submit_unlock;
350 desc->txd.phys = phys;
354 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
356 kmem_cache_free(ioat_cache, desc);
359 struct ioat_ring_ent **
360 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
362 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
363 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
364 struct ioat_ring_ent **ring;
365 int total_descs = 1 << order;
368 /* allocate the array to hold the software ring */
369 ring = kcalloc(total_descs, sizeof(*ring), flags);
373 chunks = (total_descs * IOAT_DESC_SZ) / IOAT_CHUNK_SIZE;
374 ioat_chan->desc_chunks = chunks;
376 for (i = 0; i < chunks; i++) {
377 struct ioat_descs *descs = &ioat_chan->descs[i];
379 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
380 SZ_2M, &descs->hw, flags);
384 for (idx = 0; idx < i; idx++) {
385 descs = &ioat_chan->descs[idx];
386 dma_free_coherent(to_dev(ioat_chan),
388 descs->virt, descs->hw);
393 ioat_chan->desc_chunks = 0;
399 for (i = 0; i < total_descs; i++) {
400 ring[i] = ioat_alloc_ring_ent(c, i, flags);
405 ioat_free_ring_ent(ring[i], c);
407 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
408 dma_free_coherent(to_dev(ioat_chan),
410 ioat_chan->descs[idx].virt,
411 ioat_chan->descs[idx].hw);
412 ioat_chan->descs[idx].virt = NULL;
413 ioat_chan->descs[idx].hw = 0;
416 ioat_chan->desc_chunks = 0;
420 set_desc_id(ring[i], i);
424 for (i = 0; i < total_descs-1; i++) {
425 struct ioat_ring_ent *next = ring[i+1];
426 struct ioat_dma_descriptor *hw = ring[i]->hw;
428 hw->next = next->txd.phys;
430 ring[i]->hw->next = ring[0]->txd.phys;
432 /* setup descriptor pre-fetching for v3.4 */
433 if (ioat_dma->cap & IOAT_CAP_DPS) {
434 u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
437 drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
439 writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
447 * ioat_check_space_lock - verify space and grab ring producer lock
448 * @ioat: ioat,3 channel (ring) to operate on
449 * @num_descs: allocation length
451 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
452 __acquires(&ioat_chan->prep_lock)
454 spin_lock_bh(&ioat_chan->prep_lock);
455 /* never allow the last descriptor to be consumed, we need at
456 * least one free at all times to allow for on-the-fly ring
459 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
460 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
461 __func__, num_descs, ioat_chan->head,
462 ioat_chan->tail, ioat_chan->issued);
463 ioat_chan->produce = num_descs;
464 return 0; /* with ioat->prep_lock held */
466 spin_unlock_bh(&ioat_chan->prep_lock);
468 dev_dbg_ratelimited(to_dev(ioat_chan),
469 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
470 __func__, num_descs, ioat_chan->head,
471 ioat_chan->tail, ioat_chan->issued);
473 /* progress reclaim in the allocation failure case we may be
474 * called under bh_disabled so we need to trigger the timer
477 if (time_is_before_jiffies(ioat_chan->timer.expires)
478 && timer_pending(&ioat_chan->timer)) {
479 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
480 ioat_timer_event(&ioat_chan->timer);
486 static bool desc_has_ext(struct ioat_ring_ent *desc)
488 struct ioat_dma_descriptor *hw = desc->hw;
490 if (hw->ctl_f.op == IOAT_OP_XOR ||
491 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
492 struct ioat_xor_descriptor *xor = desc->xor;
494 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
496 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
497 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
498 struct ioat_pq_descriptor *pq = desc->pq;
500 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
508 ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
513 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
514 kmem_cache_free(ioat_sed_cache, sed);
517 static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
522 completion = *ioat_chan->completion;
523 phys_complete = ioat_chansts_to_addr(completion);
525 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
526 (unsigned long long) phys_complete);
528 return phys_complete;
531 static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
534 *phys_complete = ioat_get_current_completion(ioat_chan);
535 if (*phys_complete == ioat_chan->last_completion)
538 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
539 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
545 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
547 struct ioat_dma_descriptor *hw = desc->hw;
549 switch (hw->ctl_f.op) {
551 case IOAT_OP_PQ_VAL_16S:
553 struct ioat_pq_descriptor *pq = desc->pq;
555 /* check if there's error written */
556 if (!pq->dwbes_f.wbes)
559 /* need to set a chanerr var for checking to clear later */
561 if (pq->dwbes_f.p_val_err)
562 *desc->result |= SUM_CHECK_P_RESULT;
564 if (pq->dwbes_f.q_val_err)
565 *desc->result |= SUM_CHECK_Q_RESULT;
575 * __cleanup - reclaim used descriptors
576 * @ioat: channel (ring) to clean
578 static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
580 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
581 struct ioat_ring_ent *desc;
582 bool seen_current = false;
583 int idx = ioat_chan->tail, i;
586 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
587 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
590 * At restart of the channel, the completion address and the
591 * channel status will be 0 due to starting a new chain. Since
592 * it's new chain and the first descriptor "fails", there is
593 * nothing to clean up. We do not want to reap the entire submitted
594 * chain due to this 0 address value and then BUG.
599 active = ioat_ring_active(ioat_chan);
600 for (i = 0; i < active && !seen_current; i++) {
601 struct dma_async_tx_descriptor *tx;
603 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
604 desc = ioat_get_ring_ent(ioat_chan, idx + i);
605 dump_desc_dbg(ioat_chan, desc);
607 /* set err stat if we are using dwbes */
608 if (ioat_dma->cap & IOAT_CAP_DWBES)
609 desc_get_errstat(ioat_chan, desc);
613 dma_cookie_complete(tx);
614 dma_descriptor_unmap(tx);
615 dmaengine_desc_get_callback_invoke(tx, NULL);
617 tx->callback_result = NULL;
620 if (tx->phys == phys_complete)
623 /* skip extended descriptors */
624 if (desc_has_ext(desc)) {
625 BUG_ON(i + 1 >= active);
629 /* cleanup super extended descriptors */
631 ioat_free_sed(ioat_dma, desc->sed);
636 /* finish all descriptor reads before incrementing tail */
638 ioat_chan->tail = idx + i;
639 /* no active descs have written a completion? */
640 BUG_ON(active && !seen_current);
641 ioat_chan->last_completion = phys_complete;
643 if (active - i == 0) {
644 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
646 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
649 /* microsecond delay by sysfs variable per pending descriptor */
650 if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
651 writew(min((ioat_chan->intr_coalesce * (active - i)),
652 IOAT_INTRDELAY_MASK),
653 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
654 ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
658 static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
662 spin_lock_bh(&ioat_chan->cleanup_lock);
664 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
665 __cleanup(ioat_chan, phys_complete);
667 if (is_ioat_halted(*ioat_chan->completion)) {
668 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
671 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
672 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
677 spin_unlock_bh(&ioat_chan->cleanup_lock);
680 void ioat_cleanup_event(unsigned long data)
682 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
684 ioat_cleanup(ioat_chan);
685 if (!test_bit(IOAT_RUN, &ioat_chan->state))
687 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
690 static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
694 /* set the completion address register again */
695 writel(lower_32_bits(ioat_chan->completion_dma),
696 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
697 writel(upper_32_bits(ioat_chan->completion_dma),
698 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
700 ioat_quiesce(ioat_chan, 0);
701 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
702 __cleanup(ioat_chan, phys_complete);
704 __ioat_restart_chan(ioat_chan);
708 static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
710 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
711 struct ioat_ring_ent *desc;
713 int idx = ioat_chan->tail, i;
716 * We assume that the failed descriptor has been processed.
717 * Now we are just returning all the remaining submitted
718 * descriptors to abort.
720 active = ioat_ring_active(ioat_chan);
722 /* we skip the failed descriptor that tail points to */
723 for (i = 1; i < active; i++) {
724 struct dma_async_tx_descriptor *tx;
726 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
727 desc = ioat_get_ring_ent(ioat_chan, idx + i);
731 struct dmaengine_result res;
733 dma_cookie_complete(tx);
734 dma_descriptor_unmap(tx);
735 res.result = DMA_TRANS_ABORTED;
736 dmaengine_desc_get_callback_invoke(tx, &res);
738 tx->callback_result = NULL;
741 /* skip extended descriptors */
742 if (desc_has_ext(desc)) {
743 WARN_ON(i + 1 >= active);
747 /* cleanup super extended descriptors */
749 ioat_free_sed(ioat_dma, desc->sed);
754 smp_mb(); /* finish all descriptor reads before incrementing tail */
755 ioat_chan->tail = idx + active;
757 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
758 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
761 static void ioat_eh(struct ioatdma_chan *ioat_chan)
763 struct pci_dev *pdev = to_pdev(ioat_chan);
764 struct ioat_dma_descriptor *hw;
765 struct dma_async_tx_descriptor *tx;
767 struct ioat_ring_ent *desc;
772 struct dmaengine_result res;
774 /* cleanup so tail points to descriptor that caused the error */
775 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
776 __cleanup(ioat_chan, phys_complete);
778 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
779 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
781 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
782 __func__, chanerr, chanerr_int);
784 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
786 dump_desc_dbg(ioat_chan, desc);
788 switch (hw->ctl_f.op) {
789 case IOAT_OP_XOR_VAL:
790 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
791 *desc->result |= SUM_CHECK_P_RESULT;
792 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
796 case IOAT_OP_PQ_VAL_16S:
797 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
798 *desc->result |= SUM_CHECK_P_RESULT;
799 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
801 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
802 *desc->result |= SUM_CHECK_Q_RESULT;
803 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
808 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
809 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
810 res.result = DMA_TRANS_READ_FAILED;
811 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
812 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
813 res.result = DMA_TRANS_WRITE_FAILED;
814 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
819 res.result = DMA_TRANS_NOERROR;
821 /* fault on unhandled error or spurious halt */
822 if (chanerr ^ err_handled || chanerr == 0) {
823 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
824 __func__, chanerr, err_handled);
825 dev_err(to_dev(ioat_chan), "Errors handled:\n");
826 ioat_print_chanerrs(ioat_chan, err_handled);
827 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
828 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
833 /* cleanup the faulty descriptor since we are continuing */
836 dma_cookie_complete(tx);
837 dma_descriptor_unmap(tx);
838 dmaengine_desc_get_callback_invoke(tx, &res);
840 tx->callback_result = NULL;
843 /* mark faulting descriptor as complete */
844 *ioat_chan->completion = desc->txd.phys;
846 spin_lock_bh(&ioat_chan->prep_lock);
847 /* we need abort all descriptors */
849 ioat_abort_descs(ioat_chan);
850 /* clean up the channel, we could be in weird state */
851 ioat_reset_hw(ioat_chan);
854 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
855 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
857 ioat_restart_channel(ioat_chan);
858 spin_unlock_bh(&ioat_chan->prep_lock);
861 static void check_active(struct ioatdma_chan *ioat_chan)
863 if (ioat_ring_active(ioat_chan)) {
864 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
868 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
869 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
872 static void ioat_reboot_chan(struct ioatdma_chan *ioat_chan)
874 spin_lock_bh(&ioat_chan->prep_lock);
875 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
876 spin_unlock_bh(&ioat_chan->prep_lock);
878 ioat_abort_descs(ioat_chan);
879 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
880 ioat_reset_hw(ioat_chan);
881 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
882 ioat_restart_channel(ioat_chan);
884 spin_lock_bh(&ioat_chan->prep_lock);
885 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
886 spin_unlock_bh(&ioat_chan->prep_lock);
889 void ioat_timer_event(struct timer_list *t)
891 struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
892 dma_addr_t phys_complete;
895 status = ioat_chansts(ioat_chan);
897 /* when halted due to errors check for channel
898 * programming errors before advancing the completion state
900 if (is_ioat_halted(status)) {
903 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
904 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
906 dev_err(to_dev(ioat_chan), "Errors:\n");
907 ioat_print_chanerrs(ioat_chan, chanerr);
909 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
910 spin_lock_bh(&ioat_chan->cleanup_lock);
911 ioat_reboot_chan(ioat_chan);
912 spin_unlock_bh(&ioat_chan->cleanup_lock);
918 spin_lock_bh(&ioat_chan->cleanup_lock);
920 /* handle the no-actives case */
921 if (!ioat_ring_active(ioat_chan)) {
922 spin_lock_bh(&ioat_chan->prep_lock);
923 check_active(ioat_chan);
924 spin_unlock_bh(&ioat_chan->prep_lock);
928 /* handle the missed cleanup case */
929 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) {
930 /* timer restarted in ioat_cleanup_preamble
931 * and IOAT_COMPLETION_ACK cleared
933 __cleanup(ioat_chan, phys_complete);
937 /* if we haven't made progress and we have already
938 * acknowledged a pending completion once, then be more
939 * forceful with a restart
941 if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
944 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
945 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
947 dev_err(to_dev(ioat_chan), "Errors:\n");
948 ioat_print_chanerrs(ioat_chan, chanerr);
950 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
951 ioat_ring_active(ioat_chan));
953 ioat_reboot_chan(ioat_chan);
958 /* handle missed issue pending case */
959 if (ioat_ring_pending(ioat_chan)) {
960 dev_warn(to_dev(ioat_chan),
961 "Completion timeout with pending descriptors\n");
962 spin_lock_bh(&ioat_chan->prep_lock);
963 __ioat_issue_pending(ioat_chan);
964 spin_unlock_bh(&ioat_chan->prep_lock);
967 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
968 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
970 spin_unlock_bh(&ioat_chan->cleanup_lock);
974 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
975 struct dma_tx_state *txstate)
977 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
980 ret = dma_cookie_status(c, cookie, txstate);
981 if (ret == DMA_COMPLETE)
984 ioat_cleanup(ioat_chan);
986 return dma_cookie_status(c, cookie, txstate);
989 int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
991 /* throw away whatever the channel was doing and get it
992 * initialized, with ioat3 specific workarounds
994 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
995 struct pci_dev *pdev = ioat_dma->pdev;
1000 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1002 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1003 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1005 if (ioat_dma->version < IOAT_VER_3_3) {
1006 /* clear any pending errors */
1007 err = pci_read_config_dword(pdev,
1008 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1011 "channel error register unreachable\n");
1014 pci_write_config_dword(pdev,
1015 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1017 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1018 * (workaround for spurious config parity error after restart)
1020 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1021 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1022 pci_write_config_dword(pdev,
1023 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1028 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1029 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1030 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1031 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1035 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
1037 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1038 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1039 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1040 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1045 dev_err(&pdev->dev, "Failed to reset: %d\n", err);