2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
58 #include <linux/module.h>
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h>
64 #include "../dmaengine.h"
65 #include "registers.h"
70 /* ioat hardware assumes at least two sources for raid operations */
71 #define src_cnt_to_sw(x) ((x) + 2)
72 #define src_cnt_to_hw(x) ((x) - 2)
73 #define ndest_to_sw(x) ((x) + 1)
74 #define ndest_to_hw(x) ((x) - 1)
75 #define src16_cnt_to_sw(x) ((x) + 9)
76 #define src16_cnt_to_hw(x) ((x) - 9)
78 /* provide a lookup table for setting the source address in the base or
79 * extended descriptor of an xor or pq descriptor
81 static const u8 xor_idx_to_desc = 0xe0;
82 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
83 static const u8 pq_idx_to_desc = 0xf8;
84 static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
85 2, 2, 2, 2, 2, 2, 2 };
86 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
87 static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
88 0, 1, 2, 3, 4, 5, 6 };
91 * technically sources 1 and 2 do not require SED, but the op will have
92 * at least 9 descriptors so that's irrelevant.
94 static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 1, 1, 1, 1, 1, 1, 1 };
97 static void ioat3_eh(struct ioat2_dma_chan *ioat);
99 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
101 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
103 return raw->field[xor_idx_to_field[idx]];
106 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
107 dma_addr_t addr, u32 offset, int idx)
109 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
111 raw->field[xor_idx_to_field[idx]] = addr + offset;
114 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
116 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
118 return raw->field[pq_idx_to_field[idx]];
121 static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
123 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
125 return raw->field[pq16_idx_to_field[idx]];
128 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
129 dma_addr_t addr, u32 offset, u8 coef, int idx)
131 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
132 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
134 raw->field[pq_idx_to_field[idx]] = addr + offset;
135 pq->coef[idx] = coef;
138 static int sed_get_pq16_pool_idx(int src_cnt)
141 return pq16_idx_to_sed[src_cnt];
144 static bool is_jf_ioat(struct pci_dev *pdev)
146 switch (pdev->device) {
147 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
148 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
149 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
150 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
151 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
152 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
153 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
154 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
155 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
156 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
163 static bool is_snb_ioat(struct pci_dev *pdev)
165 switch (pdev->device) {
166 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
167 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
168 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
169 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
170 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
171 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
172 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
173 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
174 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
175 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
182 static bool is_ivb_ioat(struct pci_dev *pdev)
184 switch (pdev->device) {
185 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
186 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
187 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
188 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
189 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
190 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
191 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
192 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
193 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
194 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
202 static bool is_hsw_ioat(struct pci_dev *pdev)
204 switch (pdev->device) {
205 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
206 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
207 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
208 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
209 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
210 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
211 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
212 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
213 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
214 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
222 static bool is_xeon_cb32(struct pci_dev *pdev)
224 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
228 static bool is_bwd_ioat(struct pci_dev *pdev)
230 switch (pdev->device) {
231 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
232 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
233 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
234 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
241 static bool is_bwd_noraid(struct pci_dev *pdev)
243 switch (pdev->device) {
244 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
245 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
253 static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
254 dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
256 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
257 struct ioat_pq16a_descriptor *pq16 =
258 (struct ioat_pq16a_descriptor *)desc[1];
259 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
261 raw->field[pq16_idx_to_field[idx]] = addr + offset;
264 pq->coef[idx] = coef;
266 pq16->coef[idx - 8] = coef;
269 static struct ioat_sed_ent *
270 ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
272 struct ioat_sed_ent *sed;
273 gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
275 sed = kmem_cache_alloc(device->sed_pool, flags);
279 sed->hw_pool = hw_pool;
280 sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
283 kmem_cache_free(device->sed_pool, sed);
290 static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
295 dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
296 kmem_cache_free(device->sed_pool, sed);
299 static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
300 struct ioat_ring_ent *desc, int idx)
302 struct ioat_chan_common *chan = &ioat->base;
303 struct pci_dev *pdev = chan->device->pdev;
304 size_t len = desc->len;
305 size_t offset = len - desc->hw->size;
306 struct dma_async_tx_descriptor *tx = &desc->txd;
307 enum dma_ctrl_flags flags = tx->flags;
309 switch (desc->hw->ctl_f.op) {
311 if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
312 ioat_dma_unmap(chan, flags, len, desc->hw);
314 case IOAT_OP_XOR_VAL:
316 struct ioat_xor_descriptor *xor = desc->xor;
317 struct ioat_ring_ent *ext;
318 struct ioat_xor_ext_descriptor *xor_ex = NULL;
319 int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
320 struct ioat_raw_descriptor *descs[2];
324 ext = ioat2_get_ring_ent(ioat, idx + 1);
325 xor_ex = ext->xor_ex;
328 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
329 descs[0] = (struct ioat_raw_descriptor *) xor;
330 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
331 for (i = 0; i < src_cnt; i++) {
332 dma_addr_t src = xor_get_src(descs, i);
334 ioat_unmap(pdev, src - offset, len,
335 PCI_DMA_TODEVICE, flags, 0);
338 /* dest is a source in xor validate operations */
339 if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
340 ioat_unmap(pdev, xor->dst_addr - offset, len,
341 PCI_DMA_TODEVICE, flags, 1);
346 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
347 ioat_unmap(pdev, xor->dst_addr - offset, len,
348 PCI_DMA_FROMDEVICE, flags, 1);
353 struct ioat_pq_descriptor *pq = desc->pq;
354 struct ioat_ring_ent *ext;
355 struct ioat_pq_ext_descriptor *pq_ex = NULL;
356 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
357 struct ioat_raw_descriptor *descs[2];
361 ext = ioat2_get_ring_ent(ioat, idx + 1);
365 /* in the 'continue' case don't unmap the dests as sources */
366 if (dmaf_p_disabled_continue(flags))
368 else if (dmaf_continue(flags))
371 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
372 descs[0] = (struct ioat_raw_descriptor *) pq;
373 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
374 for (i = 0; i < src_cnt; i++) {
375 dma_addr_t src = pq_get_src(descs, i);
377 ioat_unmap(pdev, src - offset, len,
378 PCI_DMA_TODEVICE, flags, 0);
381 /* the dests are sources in pq validate operations */
382 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
383 if (!(flags & DMA_PREP_PQ_DISABLE_P))
384 ioat_unmap(pdev, pq->p_addr - offset,
385 len, PCI_DMA_TODEVICE, flags, 0);
386 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
387 ioat_unmap(pdev, pq->q_addr - offset,
388 len, PCI_DMA_TODEVICE, flags, 0);
393 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
394 if (!(flags & DMA_PREP_PQ_DISABLE_P))
395 ioat_unmap(pdev, pq->p_addr - offset, len,
396 PCI_DMA_BIDIRECTIONAL, flags, 1);
397 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
398 ioat_unmap(pdev, pq->q_addr - offset, len,
399 PCI_DMA_BIDIRECTIONAL, flags, 1);
404 case IOAT_OP_PQ_VAL_16S: {
405 struct ioat_pq_descriptor *pq = desc->pq;
406 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
407 struct ioat_raw_descriptor *descs[4];
410 /* in the 'continue' case don't unmap the dests as sources */
411 if (dmaf_p_disabled_continue(flags))
413 else if (dmaf_continue(flags))
416 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
417 descs[0] = (struct ioat_raw_descriptor *)pq;
418 descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw);
419 descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]);
420 for (i = 0; i < src_cnt; i++) {
421 dma_addr_t src = pq16_get_src(descs, i);
423 ioat_unmap(pdev, src - offset, len,
424 PCI_DMA_TODEVICE, flags, 0);
427 /* the dests are sources in pq validate operations */
428 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
429 if (!(flags & DMA_PREP_PQ_DISABLE_P))
430 ioat_unmap(pdev, pq->p_addr - offset,
431 len, PCI_DMA_TODEVICE,
433 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
434 ioat_unmap(pdev, pq->q_addr - offset,
435 len, PCI_DMA_TODEVICE,
441 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
442 if (!(flags & DMA_PREP_PQ_DISABLE_P))
443 ioat_unmap(pdev, pq->p_addr - offset, len,
444 PCI_DMA_BIDIRECTIONAL, flags, 1);
445 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
446 ioat_unmap(pdev, pq->q_addr - offset, len,
447 PCI_DMA_BIDIRECTIONAL, flags, 1);
452 dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
453 __func__, desc->hw->ctl_f.op);
457 static bool desc_has_ext(struct ioat_ring_ent *desc)
459 struct ioat_dma_descriptor *hw = desc->hw;
461 if (hw->ctl_f.op == IOAT_OP_XOR ||
462 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
463 struct ioat_xor_descriptor *xor = desc->xor;
465 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
467 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
468 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
469 struct ioat_pq_descriptor *pq = desc->pq;
471 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
478 static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
483 completion = *chan->completion;
484 phys_complete = ioat_chansts_to_addr(completion);
486 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
487 (unsigned long long) phys_complete);
489 return phys_complete;
492 static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
495 *phys_complete = ioat3_get_current_completion(chan);
496 if (*phys_complete == chan->last_completion)
499 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
500 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
506 desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
508 struct ioat_dma_descriptor *hw = desc->hw;
510 switch (hw->ctl_f.op) {
512 case IOAT_OP_PQ_VAL_16S:
514 struct ioat_pq_descriptor *pq = desc->pq;
516 /* check if there's error written */
517 if (!pq->dwbes_f.wbes)
520 /* need to set a chanerr var for checking to clear later */
522 if (pq->dwbes_f.p_val_err)
523 *desc->result |= SUM_CHECK_P_RESULT;
525 if (pq->dwbes_f.q_val_err)
526 *desc->result |= SUM_CHECK_Q_RESULT;
536 * __cleanup - reclaim used descriptors
537 * @ioat: channel (ring) to clean
539 * The difference from the dma_v2.c __cleanup() is that this routine
540 * handles extended descriptors and dma-unmapping raid operations.
542 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
544 struct ioat_chan_common *chan = &ioat->base;
545 struct ioatdma_device *device = chan->device;
546 struct ioat_ring_ent *desc;
547 bool seen_current = false;
548 int idx = ioat->tail, i;
551 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
552 __func__, ioat->head, ioat->tail, ioat->issued);
555 * At restart of the channel, the completion address and the
556 * channel status will be 0 due to starting a new chain. Since
557 * it's new chain and the first descriptor "fails", there is
558 * nothing to clean up. We do not want to reap the entire submitted
559 * chain due to this 0 address value and then BUG.
564 active = ioat2_ring_active(ioat);
565 for (i = 0; i < active && !seen_current; i++) {
566 struct dma_async_tx_descriptor *tx;
568 smp_read_barrier_depends();
569 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
570 desc = ioat2_get_ring_ent(ioat, idx + i);
571 dump_desc_dbg(ioat, desc);
573 /* set err stat if we are using dwbes */
574 if (device->cap & IOAT_CAP_DWBES)
575 desc_get_errstat(ioat, desc);
579 dma_cookie_complete(tx);
580 dma_descriptor_unmap(tx);
581 ioat3_dma_unmap(ioat, desc, idx + i);
583 tx->callback(tx->callback_param);
588 if (tx->phys == phys_complete)
591 /* skip extended descriptors */
592 if (desc_has_ext(desc)) {
593 BUG_ON(i + 1 >= active);
597 /* cleanup super extended descriptors */
599 ioat3_free_sed(device, desc->sed);
603 smp_mb(); /* finish all descriptor reads before incrementing tail */
604 ioat->tail = idx + i;
605 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
606 chan->last_completion = phys_complete;
608 if (active - i == 0) {
609 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
611 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
612 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
614 /* 5 microsecond delay per pending descriptor */
615 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
616 chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
619 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
621 struct ioat_chan_common *chan = &ioat->base;
624 spin_lock_bh(&chan->cleanup_lock);
626 if (ioat3_cleanup_preamble(chan, &phys_complete))
627 __cleanup(ioat, phys_complete);
629 if (is_ioat_halted(*chan->completion)) {
630 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
632 if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
633 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
638 spin_unlock_bh(&chan->cleanup_lock);
641 static void ioat3_cleanup_event(unsigned long data)
643 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
646 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
649 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
651 struct ioat_chan_common *chan = &ioat->base;
654 ioat2_quiesce(chan, 0);
655 if (ioat3_cleanup_preamble(chan, &phys_complete))
656 __cleanup(ioat, phys_complete);
658 __ioat2_restart_chan(ioat);
661 static void ioat3_eh(struct ioat2_dma_chan *ioat)
663 struct ioat_chan_common *chan = &ioat->base;
664 struct pci_dev *pdev = to_pdev(chan);
665 struct ioat_dma_descriptor *hw;
667 struct ioat_ring_ent *desc;
672 /* cleanup so tail points to descriptor that caused the error */
673 if (ioat3_cleanup_preamble(chan, &phys_complete))
674 __cleanup(ioat, phys_complete);
676 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
677 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
679 dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
680 __func__, chanerr, chanerr_int);
682 desc = ioat2_get_ring_ent(ioat, ioat->tail);
684 dump_desc_dbg(ioat, desc);
686 switch (hw->ctl_f.op) {
687 case IOAT_OP_XOR_VAL:
688 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
689 *desc->result |= SUM_CHECK_P_RESULT;
690 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
694 case IOAT_OP_PQ_VAL_16S:
695 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
696 *desc->result |= SUM_CHECK_P_RESULT;
697 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
699 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
700 *desc->result |= SUM_CHECK_Q_RESULT;
701 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
706 /* fault on unhandled error or spurious halt */
707 if (chanerr ^ err_handled || chanerr == 0) {
708 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
709 __func__, chanerr, err_handled);
713 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
714 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
716 /* mark faulting descriptor as complete */
717 *chan->completion = desc->txd.phys;
719 spin_lock_bh(&ioat->prep_lock);
720 ioat3_restart_channel(ioat);
721 spin_unlock_bh(&ioat->prep_lock);
724 static void check_active(struct ioat2_dma_chan *ioat)
726 struct ioat_chan_common *chan = &ioat->base;
728 if (ioat2_ring_active(ioat)) {
729 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
733 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
734 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
735 else if (ioat->alloc_order > ioat_get_alloc_order()) {
736 /* if the ring is idle, empty, and oversized try to step
739 reshape_ring(ioat, ioat->alloc_order - 1);
741 /* keep shrinking until we get back to our minimum
744 if (ioat->alloc_order > ioat_get_alloc_order())
745 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
750 static void ioat3_timer_event(unsigned long data)
752 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
753 struct ioat_chan_common *chan = &ioat->base;
754 dma_addr_t phys_complete;
757 status = ioat_chansts(chan);
759 /* when halted due to errors check for channel
760 * programming errors before advancing the completion state
762 if (is_ioat_halted(status)) {
765 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
766 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
768 if (test_bit(IOAT_RUN, &chan->state))
769 BUG_ON(is_ioat_bug(chanerr));
770 else /* we never got off the ground */
774 /* if we haven't made progress and we have already
775 * acknowledged a pending completion once, then be more
776 * forceful with a restart
778 spin_lock_bh(&chan->cleanup_lock);
779 if (ioat_cleanup_preamble(chan, &phys_complete))
780 __cleanup(ioat, phys_complete);
781 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
782 spin_lock_bh(&ioat->prep_lock);
783 ioat3_restart_channel(ioat);
784 spin_unlock_bh(&ioat->prep_lock);
785 spin_unlock_bh(&chan->cleanup_lock);
788 set_bit(IOAT_COMPLETION_ACK, &chan->state);
789 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
793 if (ioat2_ring_active(ioat))
794 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
796 spin_lock_bh(&ioat->prep_lock);
798 spin_unlock_bh(&ioat->prep_lock);
800 spin_unlock_bh(&chan->cleanup_lock);
803 static enum dma_status
804 ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
805 struct dma_tx_state *txstate)
807 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
810 ret = dma_cookie_status(c, cookie, txstate);
811 if (ret == DMA_SUCCESS)
816 return dma_cookie_status(c, cookie, txstate);
819 static struct dma_async_tx_descriptor *
820 __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
821 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
822 size_t len, unsigned long flags)
824 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
825 struct ioat_ring_ent *compl_desc;
826 struct ioat_ring_ent *desc;
827 struct ioat_ring_ent *ext;
828 size_t total_len = len;
829 struct ioat_xor_descriptor *xor;
830 struct ioat_xor_ext_descriptor *xor_ex = NULL;
831 struct ioat_dma_descriptor *hw;
832 int num_descs, with_ext, idx, i;
834 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
838 num_descs = ioat2_xferlen_to_descs(ioat, len);
839 /* we need 2x the number of descriptors to cover greater than 5
848 /* completion writes from the raid engine may pass completion
849 * writes from the legacy engine, so we need one extra null
850 * (legacy) descriptor to ensure all completion writes arrive in
853 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
859 struct ioat_raw_descriptor *descs[2];
860 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
863 desc = ioat2_get_ring_ent(ioat, idx + i);
866 /* save a branch by unconditionally retrieving the
867 * extended descriptor xor_set_src() knows to not write
868 * to it in the single descriptor case
870 ext = ioat2_get_ring_ent(ioat, idx + i + 1);
871 xor_ex = ext->xor_ex;
873 descs[0] = (struct ioat_raw_descriptor *) xor;
874 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
875 for (s = 0; s < src_cnt; s++)
876 xor_set_src(descs, src[s], offset, s);
877 xor->size = xfer_size;
878 xor->dst_addr = dest + offset;
881 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
885 dump_desc_dbg(ioat, desc);
886 } while ((i += 1 + with_ext) < num_descs);
888 /* last xor descriptor carries the unmap parameters and fence bit */
889 desc->txd.flags = flags;
890 desc->len = total_len;
892 desc->result = result;
893 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
895 /* completion descriptor carries interrupt bit */
896 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
897 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
901 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
902 hw->ctl_f.compl_write = 1;
903 hw->size = NULL_DESC_BUFFER_SIZE;
904 dump_desc_dbg(ioat, compl_desc);
906 /* we leave the channel locked to ensure in order submission */
907 return &compl_desc->txd;
910 static struct dma_async_tx_descriptor *
911 ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
912 unsigned int src_cnt, size_t len, unsigned long flags)
914 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
917 struct dma_async_tx_descriptor *
918 ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
919 unsigned int src_cnt, size_t len,
920 enum sum_check_flags *result, unsigned long flags)
922 /* the cleanup routine only sets bits on validate failure, it
923 * does not clear bits on validate success... so clear it here
927 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
928 src_cnt - 1, len, flags);
932 dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
934 struct device *dev = to_dev(&ioat->base);
935 struct ioat_pq_descriptor *pq = desc->pq;
936 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
937 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
938 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
941 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
942 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
944 desc_id(desc), (unsigned long long) desc->txd.phys,
945 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
946 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
947 pq->ctl_f.compl_write,
948 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
950 for (i = 0; i < src_cnt; i++)
951 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
952 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
953 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
954 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
955 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
958 static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
959 struct ioat_ring_ent *desc)
961 struct device *dev = to_dev(&ioat->base);
962 struct ioat_pq_descriptor *pq = desc->pq;
963 struct ioat_raw_descriptor *descs[] = { (void *)pq,
966 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
970 descs[1] = (void *)desc->sed->hw;
971 descs[2] = (void *)desc->sed->hw + 64;
974 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
975 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
977 desc_id(desc), (unsigned long long) desc->txd.phys,
978 (unsigned long long) pq->next,
979 desc->txd.flags, pq->size, pq->ctl,
980 pq->ctl_f.op, pq->ctl_f.int_en,
981 pq->ctl_f.compl_write,
982 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
984 for (i = 0; i < src_cnt; i++) {
985 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
986 (unsigned long long) pq16_get_src(descs, i),
989 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
990 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
993 static struct dma_async_tx_descriptor *
994 __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
995 const dma_addr_t *dst, const dma_addr_t *src,
996 unsigned int src_cnt, const unsigned char *scf,
997 size_t len, unsigned long flags)
999 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1000 struct ioat_chan_common *chan = &ioat->base;
1001 struct ioatdma_device *device = chan->device;
1002 struct ioat_ring_ent *compl_desc;
1003 struct ioat_ring_ent *desc;
1004 struct ioat_ring_ent *ext;
1005 size_t total_len = len;
1006 struct ioat_pq_descriptor *pq;
1007 struct ioat_pq_ext_descriptor *pq_ex = NULL;
1008 struct ioat_dma_descriptor *hw;
1010 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
1011 int i, s, idx, with_ext, num_descs;
1012 int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
1014 dev_dbg(to_dev(chan), "%s\n", __func__);
1015 /* the engine requires at least two sources (we provide
1016 * at least 1 implied source in the DMA_PREP_CONTINUE case)
1018 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
1020 num_descs = ioat2_xferlen_to_descs(ioat, len);
1021 /* we need 2x the number of descriptors to cover greater than 3
1022 * sources (we need 1 extra source in the q-only continuation
1023 * case and 3 extra sources in the p+q continuation case.
1025 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
1026 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
1032 /* completion writes from the raid engine may pass completion
1033 * writes from the legacy engine, so we need one extra null
1034 * (legacy) descriptor to ensure all completion writes arrive in
1037 if (likely(num_descs) &&
1038 ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
1044 struct ioat_raw_descriptor *descs[2];
1045 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
1047 desc = ioat2_get_ring_ent(ioat, idx + i);
1050 /* save a branch by unconditionally retrieving the
1051 * extended descriptor pq_set_src() knows to not write
1052 * to it in the single descriptor case
1054 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
1057 descs[0] = (struct ioat_raw_descriptor *) pq;
1058 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
1060 for (s = 0; s < src_cnt; s++)
1061 pq_set_src(descs, src[s], offset, scf[s], s);
1063 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1064 if (dmaf_p_disabled_continue(flags))
1065 pq_set_src(descs, dst[1], offset, 1, s++);
1066 else if (dmaf_continue(flags)) {
1067 pq_set_src(descs, dst[0], offset, 0, s++);
1068 pq_set_src(descs, dst[1], offset, 1, s++);
1069 pq_set_src(descs, dst[1], offset, 0, s++);
1071 pq->size = xfer_size;
1072 pq->p_addr = dst[0] + offset;
1073 pq->q_addr = dst[1] + offset;
1076 /* we turn on descriptor write back error status */
1077 if (device->cap & IOAT_CAP_DWBES)
1078 pq->ctl_f.wb_en = result ? 1 : 0;
1079 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
1080 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
1081 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
1084 offset += xfer_size;
1085 } while ((i += 1 + with_ext) < num_descs);
1087 /* last pq descriptor carries the unmap parameters and fence bit */
1088 desc->txd.flags = flags;
1089 desc->len = total_len;
1091 desc->result = result;
1092 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1093 dump_pq_desc_dbg(ioat, desc, ext);
1096 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1097 pq->ctl_f.compl_write = 1;
1100 /* completion descriptor carries interrupt bit */
1101 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
1102 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
1103 hw = compl_desc->hw;
1106 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1107 hw->ctl_f.compl_write = 1;
1108 hw->size = NULL_DESC_BUFFER_SIZE;
1109 dump_desc_dbg(ioat, compl_desc);
1113 /* we leave the channel locked to ensure in order submission */
1114 return &compl_desc->txd;
1117 static struct dma_async_tx_descriptor *
1118 __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
1119 const dma_addr_t *dst, const dma_addr_t *src,
1120 unsigned int src_cnt, const unsigned char *scf,
1121 size_t len, unsigned long flags)
1123 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1124 struct ioat_chan_common *chan = &ioat->base;
1125 struct ioatdma_device *device = chan->device;
1126 struct ioat_ring_ent *desc;
1127 size_t total_len = len;
1128 struct ioat_pq_descriptor *pq;
1131 int i, s, idx, num_descs;
1133 /* this function only handles src_cnt 9 - 16 */
1134 BUG_ON(src_cnt < 9);
1136 /* this function is only called with 9-16 sources */
1137 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
1139 dev_dbg(to_dev(chan), "%s\n", __func__);
1141 num_descs = ioat2_xferlen_to_descs(ioat, len);
1144 * 16 source pq is only available on cb3.3 and has no completion
1147 if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
1155 struct ioat_raw_descriptor *descs[4];
1156 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
1158 desc = ioat2_get_ring_ent(ioat, idx + i);
1161 descs[0] = (struct ioat_raw_descriptor *) pq;
1163 desc->sed = ioat3_alloc_sed(device,
1164 sed_get_pq16_pool_idx(src_cnt));
1166 dev_err(to_dev(chan),
1167 "%s: no free sed entries\n", __func__);
1171 pq->sed_addr = desc->sed->dma;
1172 desc->sed->parent = desc;
1174 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
1175 descs[2] = (void *)descs[1] + 64;
1177 for (s = 0; s < src_cnt; s++)
1178 pq16_set_src(descs, src[s], offset, scf[s], s);
1180 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1181 if (dmaf_p_disabled_continue(flags))
1182 pq16_set_src(descs, dst[1], offset, 1, s++);
1183 else if (dmaf_continue(flags)) {
1184 pq16_set_src(descs, dst[0], offset, 0, s++);
1185 pq16_set_src(descs, dst[1], offset, 1, s++);
1186 pq16_set_src(descs, dst[1], offset, 0, s++);
1189 pq->size = xfer_size;
1190 pq->p_addr = dst[0] + offset;
1191 pq->q_addr = dst[1] + offset;
1194 pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
1195 /* we turn on descriptor write back error status */
1196 if (device->cap & IOAT_CAP_DWBES)
1197 pq->ctl_f.wb_en = result ? 1 : 0;
1198 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
1199 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
1202 offset += xfer_size;
1203 } while (++i < num_descs);
1205 /* last pq descriptor carries the unmap parameters and fence bit */
1206 desc->txd.flags = flags;
1207 desc->len = total_len;
1209 desc->result = result;
1210 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1212 /* with cb3.3 we should be able to do completion w/o a null desc */
1213 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1214 pq->ctl_f.compl_write = 1;
1216 dump_pq16_desc_dbg(ioat, desc);
1218 /* we leave the channel locked to ensure in order submission */
1222 static struct dma_async_tx_descriptor *
1223 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1224 unsigned int src_cnt, const unsigned char *scf, size_t len,
1225 unsigned long flags)
1227 struct dma_device *dma = chan->device;
1229 /* specify valid address for disabled result */
1230 if (flags & DMA_PREP_PQ_DISABLE_P)
1232 if (flags & DMA_PREP_PQ_DISABLE_Q)
1235 /* handle the single source multiply case from the raid6
1238 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
1239 dma_addr_t single_source[2];
1240 unsigned char single_source_coef[2];
1242 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
1243 single_source[0] = src[0];
1244 single_source[1] = src[0];
1245 single_source_coef[0] = scf[0];
1246 single_source_coef[1] = 0;
1248 return (src_cnt > 8) && (dma->max_pq > 8) ?
1249 __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
1250 2, single_source_coef, len,
1252 __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
1253 single_source_coef, len, flags);
1256 return (src_cnt > 8) && (dma->max_pq > 8) ?
1257 __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
1259 __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
1264 struct dma_async_tx_descriptor *
1265 ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1266 unsigned int src_cnt, const unsigned char *scf, size_t len,
1267 enum sum_check_flags *pqres, unsigned long flags)
1269 struct dma_device *dma = chan->device;
1271 /* specify valid address for disabled result */
1272 if (flags & DMA_PREP_PQ_DISABLE_P)
1274 if (flags & DMA_PREP_PQ_DISABLE_Q)
1277 /* the cleanup routine only sets bits on validate failure, it
1278 * does not clear bits on validate success... so clear it here
1282 return (src_cnt > 8) && (dma->max_pq > 8) ?
1283 __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
1285 __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
1289 static struct dma_async_tx_descriptor *
1290 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
1291 unsigned int src_cnt, size_t len, unsigned long flags)
1293 struct dma_device *dma = chan->device;
1294 unsigned char scf[src_cnt];
1297 memset(scf, 0, src_cnt);
1299 flags |= DMA_PREP_PQ_DISABLE_Q;
1300 pq[1] = dst; /* specify valid address for disabled result */
1302 return (src_cnt > 8) && (dma->max_pq > 8) ?
1303 __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
1305 __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
1309 struct dma_async_tx_descriptor *
1310 ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
1311 unsigned int src_cnt, size_t len,
1312 enum sum_check_flags *result, unsigned long flags)
1314 struct dma_device *dma = chan->device;
1315 unsigned char scf[src_cnt];
1318 /* the cleanup routine only sets bits on validate failure, it
1319 * does not clear bits on validate success... so clear it here
1323 memset(scf, 0, src_cnt);
1325 flags |= DMA_PREP_PQ_DISABLE_Q;
1326 pq[1] = pq[0]; /* specify valid address for disabled result */
1329 return (src_cnt > 8) && (dma->max_pq > 8) ?
1330 __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
1332 __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
1336 static struct dma_async_tx_descriptor *
1337 ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
1339 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1340 struct ioat_ring_ent *desc;
1341 struct ioat_dma_descriptor *hw;
1343 if (ioat2_check_space_lock(ioat, 1) == 0)
1344 desc = ioat2_get_ring_ent(ioat, ioat->head);
1351 hw->ctl_f.int_en = 1;
1352 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1353 hw->ctl_f.compl_write = 1;
1354 hw->size = NULL_DESC_BUFFER_SIZE;
1358 desc->txd.flags = flags;
1361 dump_desc_dbg(ioat, desc);
1363 /* we leave the channel locked to ensure in order submission */
1367 static void ioat3_dma_test_callback(void *dma_async_param)
1369 struct completion *cmp = dma_async_param;
1374 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
1375 static int ioat_xor_val_self_test(struct ioatdma_device *device)
1379 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
1380 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
1381 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
1382 dma_addr_t dest_dma;
1383 struct dma_async_tx_descriptor *tx;
1384 struct dma_chan *dma_chan;
1385 dma_cookie_t cookie;
1390 struct completion cmp;
1392 struct device *dev = &device->pdev->dev;
1393 struct dma_device *dma = &device->common;
1396 dev_dbg(dev, "%s\n", __func__);
1398 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
1401 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
1402 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1403 if (!xor_srcs[src_idx]) {
1405 __free_page(xor_srcs[src_idx]);
1410 dest = alloc_page(GFP_KERNEL);
1413 __free_page(xor_srcs[src_idx]);
1417 /* Fill in src buffers */
1418 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
1419 u8 *ptr = page_address(xor_srcs[src_idx]);
1420 for (i = 0; i < PAGE_SIZE; i++)
1421 ptr[i] = (1 << src_idx);
1424 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
1425 cmp_byte ^= (u8) (1 << src_idx);
1427 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1428 (cmp_byte << 8) | cmp_byte;
1430 memset(page_address(dest), 0, PAGE_SIZE);
1432 dma_chan = container_of(dma->channels.next, struct dma_chan,
1434 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
1442 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1443 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1444 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
1446 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1447 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1448 DMA_PREP_INTERRUPT |
1449 DMA_COMPL_SKIP_SRC_UNMAP |
1450 DMA_COMPL_SKIP_DEST_UNMAP);
1453 dev_err(dev, "Self-test xor prep failed\n");
1459 init_completion(&cmp);
1460 tx->callback = ioat3_dma_test_callback;
1461 tx->callback_param = &cmp;
1462 cookie = tx->tx_submit(tx);
1464 dev_err(dev, "Self-test xor setup failed\n");
1468 dma->device_issue_pending(dma_chan);
1470 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1472 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1473 dev_err(dev, "Self-test xor timed out\n");
1478 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1479 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1480 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1482 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1483 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1484 u32 *ptr = page_address(dest);
1485 if (ptr[i] != cmp_word) {
1486 dev_err(dev, "Self-test xor failed compare\n");
1488 goto free_resources;
1491 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1493 /* skip validate if the capability is not present */
1494 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1495 goto free_resources;
1497 op = IOAT_OP_XOR_VAL;
1499 /* validate the sources with the destintation page */
1500 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1501 xor_val_srcs[i] = xor_srcs[i];
1502 xor_val_srcs[i] = dest;
1506 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1507 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1509 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1510 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1511 &xor_val_result, DMA_PREP_INTERRUPT |
1512 DMA_COMPL_SKIP_SRC_UNMAP |
1513 DMA_COMPL_SKIP_DEST_UNMAP);
1515 dev_err(dev, "Self-test zero prep failed\n");
1521 init_completion(&cmp);
1522 tx->callback = ioat3_dma_test_callback;
1523 tx->callback_param = &cmp;
1524 cookie = tx->tx_submit(tx);
1526 dev_err(dev, "Self-test zero setup failed\n");
1530 dma->device_issue_pending(dma_chan);
1532 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1534 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1535 dev_err(dev, "Self-test validate timed out\n");
1540 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1541 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1543 if (xor_val_result != 0) {
1544 dev_err(dev, "Self-test validate failed compare\n");
1546 goto free_resources;
1549 /* test for non-zero parity sum */
1550 op = IOAT_OP_XOR_VAL;
1553 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1554 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1556 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1557 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1558 &xor_val_result, DMA_PREP_INTERRUPT |
1559 DMA_COMPL_SKIP_SRC_UNMAP |
1560 DMA_COMPL_SKIP_DEST_UNMAP);
1562 dev_err(dev, "Self-test 2nd zero prep failed\n");
1568 init_completion(&cmp);
1569 tx->callback = ioat3_dma_test_callback;
1570 tx->callback_param = &cmp;
1571 cookie = tx->tx_submit(tx);
1573 dev_err(dev, "Self-test 2nd zero setup failed\n");
1577 dma->device_issue_pending(dma_chan);
1579 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1581 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1582 dev_err(dev, "Self-test 2nd validate timed out\n");
1587 if (xor_val_result != SUM_CHECK_P_RESULT) {
1588 dev_err(dev, "Self-test validate failed compare\n");
1593 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1594 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1596 goto free_resources;
1598 if (op == IOAT_OP_XOR) {
1599 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1600 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1601 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1603 } else if (op == IOAT_OP_XOR_VAL) {
1604 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1605 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1609 dma->device_free_chan_resources(dma_chan);
1611 src_idx = IOAT_NUM_SRC_TEST;
1613 __free_page(xor_srcs[src_idx]);
1618 static int ioat3_dma_self_test(struct ioatdma_device *device)
1620 int rc = ioat_dma_self_test(device);
1625 rc = ioat_xor_val_self_test(device);
1632 static int ioat3_irq_reinit(struct ioatdma_device *device)
1634 int msixcnt = device->common.chancnt;
1635 struct pci_dev *pdev = device->pdev;
1637 struct msix_entry *msix;
1638 struct ioat_chan_common *chan;
1641 switch (device->irq_mode) {
1644 for (i = 0; i < msixcnt; i++) {
1645 msix = &device->msix_entries[i];
1646 chan = ioat_chan_by_index(device, i);
1647 devm_free_irq(&pdev->dev, msix->vector, chan);
1650 pci_disable_msix(pdev);
1653 case IOAT_MSIX_SINGLE:
1654 msix = &device->msix_entries[0];
1655 chan = ioat_chan_by_index(device, 0);
1656 devm_free_irq(&pdev->dev, msix->vector, chan);
1657 pci_disable_msix(pdev);
1661 chan = ioat_chan_by_index(device, 0);
1662 devm_free_irq(&pdev->dev, pdev->irq, chan);
1663 pci_disable_msi(pdev);
1667 chan = ioat_chan_by_index(device, 0);
1668 devm_free_irq(&pdev->dev, pdev->irq, chan);
1675 device->irq_mode = IOAT_NOIRQ;
1677 err = ioat_dma_setup_interrupts(device);
1682 static int ioat3_reset_hw(struct ioat_chan_common *chan)
1684 /* throw away whatever the channel was doing and get it
1685 * initialized, with ioat3 specific workarounds
1687 struct ioatdma_device *device = chan->device;
1688 struct pci_dev *pdev = device->pdev;
1693 ioat2_quiesce(chan, msecs_to_jiffies(100));
1695 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1696 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1698 if (device->version < IOAT_VER_3_3) {
1699 /* clear any pending errors */
1700 err = pci_read_config_dword(pdev,
1701 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1704 "channel error register unreachable\n");
1707 pci_write_config_dword(pdev,
1708 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1710 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1711 * (workaround for spurious config parity error after restart)
1713 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1714 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1715 pci_write_config_dword(pdev,
1716 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1721 err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
1723 dev_err(&pdev->dev, "Failed to reset!\n");
1727 if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
1728 err = ioat3_irq_reinit(device);
1733 static void ioat3_intr_quirk(struct ioatdma_device *device)
1735 struct dma_device *dma;
1737 struct ioat_chan_common *chan;
1740 dma = &device->common;
1743 * if we have descriptor write back error status, we mask the
1746 if (device->cap & IOAT_CAP_DWBES) {
1747 list_for_each_entry(c, &dma->channels, device_node) {
1748 chan = to_chan_common(c);
1749 errmask = readl(chan->reg_base +
1750 IOAT_CHANERR_MASK_OFFSET);
1751 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1752 IOAT_CHANERR_XOR_Q_ERR;
1753 writel(errmask, chan->reg_base +
1754 IOAT_CHANERR_MASK_OFFSET);
1759 int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1761 struct pci_dev *pdev = device->pdev;
1762 int dca_en = system_has_dca_enabled(pdev);
1763 struct dma_device *dma;
1765 struct ioat_chan_common *chan;
1766 bool is_raid_device = false;
1769 device->enumerate_channels = ioat2_enumerate_channels;
1770 device->reset_hw = ioat3_reset_hw;
1771 device->self_test = ioat3_dma_self_test;
1772 device->intr_quirk = ioat3_intr_quirk;
1773 dma = &device->common;
1774 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1775 dma->device_issue_pending = ioat2_issue_pending;
1776 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1777 dma->device_free_chan_resources = ioat2_free_chan_resources;
1779 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1780 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1782 device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1784 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1785 device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1787 /* dca is incompatible with raid operations */
1788 if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1789 device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1791 if (device->cap & IOAT_CAP_XOR) {
1792 is_raid_device = true;
1795 dma_cap_set(DMA_XOR, dma->cap_mask);
1796 dma->device_prep_dma_xor = ioat3_prep_xor;
1798 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1799 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1802 if (device->cap & IOAT_CAP_PQ) {
1803 is_raid_device = true;
1805 dma->device_prep_dma_pq = ioat3_prep_pq;
1806 dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
1807 dma_cap_set(DMA_PQ, dma->cap_mask);
1808 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1810 if (device->cap & IOAT_CAP_RAID16SS) {
1811 dma_set_maxpq(dma, 16, 0);
1813 dma_set_maxpq(dma, 8, 0);
1816 if (!(device->cap & IOAT_CAP_XOR)) {
1817 dma->device_prep_dma_xor = ioat3_prep_pqxor;
1818 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1819 dma_cap_set(DMA_XOR, dma->cap_mask);
1820 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1822 if (device->cap & IOAT_CAP_RAID16SS) {
1830 dma->device_tx_status = ioat3_tx_status;
1831 device->cleanup_fn = ioat3_cleanup_event;
1832 device->timer_fn = ioat3_timer_event;
1834 /* starting with CB3.3 super extended descriptors are supported */
1835 if (device->cap & IOAT_CAP_RAID16SS) {
1839 /* allocate sw descriptor pool for SED */
1840 device->sed_pool = kmem_cache_create("ioat_sed",
1841 sizeof(struct ioat_sed_ent), 0, 0, NULL);
1842 if (!device->sed_pool)
1845 for (i = 0; i < MAX_SED_POOLS; i++) {
1846 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1848 /* allocate SED DMA pool */
1849 device->sed_hw_pool[i] = dma_pool_create(pool_name,
1851 SED_SIZE * (i + 1), 64, 0);
1852 if (!device->sed_hw_pool[i])
1853 goto sed_pool_cleanup;
1858 err = ioat_probe(device);
1861 ioat_set_tcp_copy_break(262144);
1863 list_for_each_entry(c, &dma->channels, device_node) {
1864 chan = to_chan_common(c);
1865 writel(IOAT_DMA_DCA_ANY_CPU,
1866 chan->reg_base + IOAT_DCACTRL_OFFSET);
1869 err = ioat_register(device);
1873 ioat_kobject_add(device, &ioat2_ktype);
1876 device->dca = ioat3_dca_init(pdev, device->reg_base);
1881 if (device->sed_pool) {
1883 kmem_cache_destroy(device->sed_pool);
1885 for (i = 0; i < MAX_SED_POOLS; i++)
1886 if (device->sed_hw_pool[i])
1887 dma_pool_destroy(device->sed_hw_pool[i]);
1893 void ioat3_dma_remove(struct ioatdma_device *device)
1895 if (device->sed_pool) {
1897 kmem_cache_destroy(device->sed_pool);
1899 for (i = 0; i < MAX_SED_POOLS; i++)
1900 if (device->sed_hw_pool[i])
1901 dma_pool_destroy(device->sed_hw_pool[i]);