2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
58 #include <linux/module.h>
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h>
64 #include "../dmaengine.h"
65 #include "registers.h"
70 /* ioat hardware assumes at least two sources for raid operations */
71 #define src_cnt_to_sw(x) ((x) + 2)
72 #define src_cnt_to_hw(x) ((x) - 2)
73 #define ndest_to_sw(x) ((x) + 1)
74 #define ndest_to_hw(x) ((x) - 1)
75 #define src16_cnt_to_sw(x) ((x) + 9)
76 #define src16_cnt_to_hw(x) ((x) - 9)
78 /* provide a lookup table for setting the source address in the base or
79 * extended descriptor of an xor or pq descriptor
81 static const u8 xor_idx_to_desc = 0xe0;
82 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
83 static const u8 pq_idx_to_desc = 0xf8;
84 static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1,
85 2, 2, 2, 2, 2, 2, 2 };
86 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
87 static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7,
88 0, 1, 2, 3, 4, 5, 6 };
91 * technically sources 1 and 2 do not require SED, but the op will have
92 * at least 9 descriptors so that's irrelevant.
94 static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0,
95 1, 1, 1, 1, 1, 1, 1 };
97 static void ioat3_eh(struct ioat2_dma_chan *ioat);
99 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
100 dma_addr_t addr, u32 offset, int idx)
102 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
104 raw->field[xor_idx_to_field[idx]] = addr + offset;
107 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
109 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
111 return raw->field[pq_idx_to_field[idx]];
114 static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx)
116 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
118 return raw->field[pq16_idx_to_field[idx]];
121 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
122 dma_addr_t addr, u32 offset, u8 coef, int idx)
124 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
125 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
127 raw->field[pq_idx_to_field[idx]] = addr + offset;
128 pq->coef[idx] = coef;
131 static int sed_get_pq16_pool_idx(int src_cnt)
134 return pq16_idx_to_sed[src_cnt];
137 static bool is_jf_ioat(struct pci_dev *pdev)
139 switch (pdev->device) {
140 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
141 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
142 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
143 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
144 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
145 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
146 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
147 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
148 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
149 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
156 static bool is_snb_ioat(struct pci_dev *pdev)
158 switch (pdev->device) {
159 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
160 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
161 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
162 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
163 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
164 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
165 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
166 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
167 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
168 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
175 static bool is_ivb_ioat(struct pci_dev *pdev)
177 switch (pdev->device) {
178 case PCI_DEVICE_ID_INTEL_IOAT_IVB0:
179 case PCI_DEVICE_ID_INTEL_IOAT_IVB1:
180 case PCI_DEVICE_ID_INTEL_IOAT_IVB2:
181 case PCI_DEVICE_ID_INTEL_IOAT_IVB3:
182 case PCI_DEVICE_ID_INTEL_IOAT_IVB4:
183 case PCI_DEVICE_ID_INTEL_IOAT_IVB5:
184 case PCI_DEVICE_ID_INTEL_IOAT_IVB6:
185 case PCI_DEVICE_ID_INTEL_IOAT_IVB7:
186 case PCI_DEVICE_ID_INTEL_IOAT_IVB8:
187 case PCI_DEVICE_ID_INTEL_IOAT_IVB9:
195 static bool is_hsw_ioat(struct pci_dev *pdev)
197 switch (pdev->device) {
198 case PCI_DEVICE_ID_INTEL_IOAT_HSW0:
199 case PCI_DEVICE_ID_INTEL_IOAT_HSW1:
200 case PCI_DEVICE_ID_INTEL_IOAT_HSW2:
201 case PCI_DEVICE_ID_INTEL_IOAT_HSW3:
202 case PCI_DEVICE_ID_INTEL_IOAT_HSW4:
203 case PCI_DEVICE_ID_INTEL_IOAT_HSW5:
204 case PCI_DEVICE_ID_INTEL_IOAT_HSW6:
205 case PCI_DEVICE_ID_INTEL_IOAT_HSW7:
206 case PCI_DEVICE_ID_INTEL_IOAT_HSW8:
207 case PCI_DEVICE_ID_INTEL_IOAT_HSW9:
215 static bool is_xeon_cb32(struct pci_dev *pdev)
217 return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
221 static bool is_bwd_ioat(struct pci_dev *pdev)
223 switch (pdev->device) {
224 case PCI_DEVICE_ID_INTEL_IOAT_BWD0:
225 case PCI_DEVICE_ID_INTEL_IOAT_BWD1:
226 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
227 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
234 static bool is_bwd_noraid(struct pci_dev *pdev)
236 switch (pdev->device) {
237 case PCI_DEVICE_ID_INTEL_IOAT_BWD2:
238 case PCI_DEVICE_ID_INTEL_IOAT_BWD3:
246 static void pq16_set_src(struct ioat_raw_descriptor *desc[3],
247 dma_addr_t addr, u32 offset, u8 coef, unsigned idx)
249 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0];
250 struct ioat_pq16a_descriptor *pq16 =
251 (struct ioat_pq16a_descriptor *)desc[1];
252 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]];
254 raw->field[pq16_idx_to_field[idx]] = addr + offset;
257 pq->coef[idx] = coef;
259 pq16->coef[idx - 8] = coef;
262 static struct ioat_sed_ent *
263 ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool)
265 struct ioat_sed_ent *sed;
266 gfp_t flags = __GFP_ZERO | GFP_ATOMIC;
268 sed = kmem_cache_alloc(device->sed_pool, flags);
272 sed->hw_pool = hw_pool;
273 sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool],
276 kmem_cache_free(device->sed_pool, sed);
283 static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed)
288 dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
289 kmem_cache_free(device->sed_pool, sed);
292 static bool desc_has_ext(struct ioat_ring_ent *desc)
294 struct ioat_dma_descriptor *hw = desc->hw;
296 if (hw->ctl_f.op == IOAT_OP_XOR ||
297 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
298 struct ioat_xor_descriptor *xor = desc->xor;
300 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
302 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
303 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
304 struct ioat_pq_descriptor *pq = desc->pq;
306 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
313 static u64 ioat3_get_current_completion(struct ioat_chan_common *chan)
318 completion = *chan->completion;
319 phys_complete = ioat_chansts_to_addr(completion);
321 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
322 (unsigned long long) phys_complete);
324 return phys_complete;
327 static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan,
330 *phys_complete = ioat3_get_current_completion(chan);
331 if (*phys_complete == chan->last_completion)
334 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
335 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
341 desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc)
343 struct ioat_dma_descriptor *hw = desc->hw;
345 switch (hw->ctl_f.op) {
347 case IOAT_OP_PQ_VAL_16S:
349 struct ioat_pq_descriptor *pq = desc->pq;
351 /* check if there's error written */
352 if (!pq->dwbes_f.wbes)
355 /* need to set a chanerr var for checking to clear later */
357 if (pq->dwbes_f.p_val_err)
358 *desc->result |= SUM_CHECK_P_RESULT;
360 if (pq->dwbes_f.q_val_err)
361 *desc->result |= SUM_CHECK_Q_RESULT;
371 * __cleanup - reclaim used descriptors
372 * @ioat: channel (ring) to clean
374 * The difference from the dma_v2.c __cleanup() is that this routine
375 * handles extended descriptors and dma-unmapping raid operations.
377 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
379 struct ioat_chan_common *chan = &ioat->base;
380 struct ioatdma_device *device = chan->device;
381 struct ioat_ring_ent *desc;
382 bool seen_current = false;
383 int idx = ioat->tail, i;
386 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
387 __func__, ioat->head, ioat->tail, ioat->issued);
390 * At restart of the channel, the completion address and the
391 * channel status will be 0 due to starting a new chain. Since
392 * it's new chain and the first descriptor "fails", there is
393 * nothing to clean up. We do not want to reap the entire submitted
394 * chain due to this 0 address value and then BUG.
399 active = ioat2_ring_active(ioat);
400 for (i = 0; i < active && !seen_current; i++) {
401 struct dma_async_tx_descriptor *tx;
403 smp_read_barrier_depends();
404 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
405 desc = ioat2_get_ring_ent(ioat, idx + i);
406 dump_desc_dbg(ioat, desc);
408 /* set err stat if we are using dwbes */
409 if (device->cap & IOAT_CAP_DWBES)
410 desc_get_errstat(ioat, desc);
414 dma_cookie_complete(tx);
415 dma_descriptor_unmap(tx);
417 tx->callback(tx->callback_param);
422 if (tx->phys == phys_complete)
425 /* skip extended descriptors */
426 if (desc_has_ext(desc)) {
427 BUG_ON(i + 1 >= active);
431 /* cleanup super extended descriptors */
433 ioat3_free_sed(device, desc->sed);
437 smp_mb(); /* finish all descriptor reads before incrementing tail */
438 ioat->tail = idx + i;
439 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
440 chan->last_completion = phys_complete;
442 if (active - i == 0) {
443 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
445 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
446 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
448 /* 5 microsecond delay per pending descriptor */
449 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
450 chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
453 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
455 struct ioat_chan_common *chan = &ioat->base;
458 spin_lock_bh(&chan->cleanup_lock);
460 if (ioat3_cleanup_preamble(chan, &phys_complete))
461 __cleanup(ioat, phys_complete);
463 if (is_ioat_halted(*chan->completion)) {
464 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
466 if (chanerr & IOAT_CHANERR_HANDLE_MASK) {
467 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
472 spin_unlock_bh(&chan->cleanup_lock);
475 static void ioat3_cleanup_event(unsigned long data)
477 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
480 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
483 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
485 struct ioat_chan_common *chan = &ioat->base;
488 ioat2_quiesce(chan, 0);
489 if (ioat3_cleanup_preamble(chan, &phys_complete))
490 __cleanup(ioat, phys_complete);
492 __ioat2_restart_chan(ioat);
495 static void ioat3_eh(struct ioat2_dma_chan *ioat)
497 struct ioat_chan_common *chan = &ioat->base;
498 struct pci_dev *pdev = to_pdev(chan);
499 struct ioat_dma_descriptor *hw;
501 struct ioat_ring_ent *desc;
506 /* cleanup so tail points to descriptor that caused the error */
507 if (ioat3_cleanup_preamble(chan, &phys_complete))
508 __cleanup(ioat, phys_complete);
510 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
511 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
513 dev_dbg(to_dev(chan), "%s: error = %x:%x\n",
514 __func__, chanerr, chanerr_int);
516 desc = ioat2_get_ring_ent(ioat, ioat->tail);
518 dump_desc_dbg(ioat, desc);
520 switch (hw->ctl_f.op) {
521 case IOAT_OP_XOR_VAL:
522 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
523 *desc->result |= SUM_CHECK_P_RESULT;
524 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
528 case IOAT_OP_PQ_VAL_16S:
529 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
530 *desc->result |= SUM_CHECK_P_RESULT;
531 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
533 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
534 *desc->result |= SUM_CHECK_Q_RESULT;
535 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
540 /* fault on unhandled error or spurious halt */
541 if (chanerr ^ err_handled || chanerr == 0) {
542 dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n",
543 __func__, chanerr, err_handled);
547 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
548 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
550 /* mark faulting descriptor as complete */
551 *chan->completion = desc->txd.phys;
553 spin_lock_bh(&ioat->prep_lock);
554 ioat3_restart_channel(ioat);
555 spin_unlock_bh(&ioat->prep_lock);
558 static void check_active(struct ioat2_dma_chan *ioat)
560 struct ioat_chan_common *chan = &ioat->base;
562 if (ioat2_ring_active(ioat)) {
563 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
567 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &chan->state))
568 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
569 else if (ioat->alloc_order > ioat_get_alloc_order()) {
570 /* if the ring is idle, empty, and oversized try to step
573 reshape_ring(ioat, ioat->alloc_order - 1);
575 /* keep shrinking until we get back to our minimum
578 if (ioat->alloc_order > ioat_get_alloc_order())
579 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
584 static void ioat3_timer_event(unsigned long data)
586 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
587 struct ioat_chan_common *chan = &ioat->base;
588 dma_addr_t phys_complete;
591 status = ioat_chansts(chan);
593 /* when halted due to errors check for channel
594 * programming errors before advancing the completion state
596 if (is_ioat_halted(status)) {
599 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
600 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
602 if (test_bit(IOAT_RUN, &chan->state))
603 BUG_ON(is_ioat_bug(chanerr));
604 else /* we never got off the ground */
608 /* if we haven't made progress and we have already
609 * acknowledged a pending completion once, then be more
610 * forceful with a restart
612 spin_lock_bh(&chan->cleanup_lock);
613 if (ioat_cleanup_preamble(chan, &phys_complete))
614 __cleanup(ioat, phys_complete);
615 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
616 spin_lock_bh(&ioat->prep_lock);
617 ioat3_restart_channel(ioat);
618 spin_unlock_bh(&ioat->prep_lock);
619 spin_unlock_bh(&chan->cleanup_lock);
622 set_bit(IOAT_COMPLETION_ACK, &chan->state);
623 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
627 if (ioat2_ring_active(ioat))
628 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
630 spin_lock_bh(&ioat->prep_lock);
632 spin_unlock_bh(&ioat->prep_lock);
634 spin_unlock_bh(&chan->cleanup_lock);
637 static enum dma_status
638 ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
639 struct dma_tx_state *txstate)
641 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
644 ret = dma_cookie_status(c, cookie, txstate);
645 if (ret == DMA_SUCCESS)
650 return dma_cookie_status(c, cookie, txstate);
653 static struct dma_async_tx_descriptor *
654 __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
655 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
656 size_t len, unsigned long flags)
658 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
659 struct ioat_ring_ent *compl_desc;
660 struct ioat_ring_ent *desc;
661 struct ioat_ring_ent *ext;
662 size_t total_len = len;
663 struct ioat_xor_descriptor *xor;
664 struct ioat_xor_ext_descriptor *xor_ex = NULL;
665 struct ioat_dma_descriptor *hw;
666 int num_descs, with_ext, idx, i;
668 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
672 num_descs = ioat2_xferlen_to_descs(ioat, len);
673 /* we need 2x the number of descriptors to cover greater than 5
682 /* completion writes from the raid engine may pass completion
683 * writes from the legacy engine, so we need one extra null
684 * (legacy) descriptor to ensure all completion writes arrive in
687 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
693 struct ioat_raw_descriptor *descs[2];
694 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
697 desc = ioat2_get_ring_ent(ioat, idx + i);
700 /* save a branch by unconditionally retrieving the
701 * extended descriptor xor_set_src() knows to not write
702 * to it in the single descriptor case
704 ext = ioat2_get_ring_ent(ioat, idx + i + 1);
705 xor_ex = ext->xor_ex;
707 descs[0] = (struct ioat_raw_descriptor *) xor;
708 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
709 for (s = 0; s < src_cnt; s++)
710 xor_set_src(descs, src[s], offset, s);
711 xor->size = xfer_size;
712 xor->dst_addr = dest + offset;
715 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
719 dump_desc_dbg(ioat, desc);
720 } while ((i += 1 + with_ext) < num_descs);
722 /* last xor descriptor carries the unmap parameters and fence bit */
723 desc->txd.flags = flags;
724 desc->len = total_len;
726 desc->result = result;
727 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
729 /* completion descriptor carries interrupt bit */
730 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
731 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
735 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
736 hw->ctl_f.compl_write = 1;
737 hw->size = NULL_DESC_BUFFER_SIZE;
738 dump_desc_dbg(ioat, compl_desc);
740 /* we leave the channel locked to ensure in order submission */
741 return &compl_desc->txd;
744 static struct dma_async_tx_descriptor *
745 ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
746 unsigned int src_cnt, size_t len, unsigned long flags)
748 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
751 struct dma_async_tx_descriptor *
752 ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
753 unsigned int src_cnt, size_t len,
754 enum sum_check_flags *result, unsigned long flags)
756 /* the cleanup routine only sets bits on validate failure, it
757 * does not clear bits on validate success... so clear it here
761 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
762 src_cnt - 1, len, flags);
766 dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
768 struct device *dev = to_dev(&ioat->base);
769 struct ioat_pq_descriptor *pq = desc->pq;
770 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
771 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
772 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
775 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
776 " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
778 desc_id(desc), (unsigned long long) desc->txd.phys,
779 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
780 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
781 pq->ctl_f.compl_write,
782 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
784 for (i = 0; i < src_cnt; i++)
785 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
786 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
787 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
788 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
789 dev_dbg(dev, "\tNEXT: %#llx\n", pq->next);
792 static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat,
793 struct ioat_ring_ent *desc)
795 struct device *dev = to_dev(&ioat->base);
796 struct ioat_pq_descriptor *pq = desc->pq;
797 struct ioat_raw_descriptor *descs[] = { (void *)pq,
800 int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt);
804 descs[1] = (void *)desc->sed->hw;
805 descs[2] = (void *)desc->sed->hw + 64;
808 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
809 " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'"
811 desc_id(desc), (unsigned long long) desc->txd.phys,
812 (unsigned long long) pq->next,
813 desc->txd.flags, pq->size, pq->ctl,
814 pq->ctl_f.op, pq->ctl_f.int_en,
815 pq->ctl_f.compl_write,
816 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
818 for (i = 0; i < src_cnt; i++) {
819 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
820 (unsigned long long) pq16_get_src(descs, i),
823 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
824 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
827 static struct dma_async_tx_descriptor *
828 __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
829 const dma_addr_t *dst, const dma_addr_t *src,
830 unsigned int src_cnt, const unsigned char *scf,
831 size_t len, unsigned long flags)
833 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
834 struct ioat_chan_common *chan = &ioat->base;
835 struct ioatdma_device *device = chan->device;
836 struct ioat_ring_ent *compl_desc;
837 struct ioat_ring_ent *desc;
838 struct ioat_ring_ent *ext;
839 size_t total_len = len;
840 struct ioat_pq_descriptor *pq;
841 struct ioat_pq_ext_descriptor *pq_ex = NULL;
842 struct ioat_dma_descriptor *hw;
844 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
845 int i, s, idx, with_ext, num_descs;
846 int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0;
848 dev_dbg(to_dev(chan), "%s\n", __func__);
849 /* the engine requires at least two sources (we provide
850 * at least 1 implied source in the DMA_PREP_CONTINUE case)
852 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
854 num_descs = ioat2_xferlen_to_descs(ioat, len);
855 /* we need 2x the number of descriptors to cover greater than 3
856 * sources (we need 1 extra source in the q-only continuation
857 * case and 3 extra sources in the p+q continuation case.
859 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
860 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
866 /* completion writes from the raid engine may pass completion
867 * writes from the legacy engine, so we need one extra null
868 * (legacy) descriptor to ensure all completion writes arrive in
871 if (likely(num_descs) &&
872 ioat2_check_space_lock(ioat, num_descs + cb32) == 0)
878 struct ioat_raw_descriptor *descs[2];
879 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
881 desc = ioat2_get_ring_ent(ioat, idx + i);
884 /* save a branch by unconditionally retrieving the
885 * extended descriptor pq_set_src() knows to not write
886 * to it in the single descriptor case
888 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
891 descs[0] = (struct ioat_raw_descriptor *) pq;
892 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
894 for (s = 0; s < src_cnt; s++)
895 pq_set_src(descs, src[s], offset, scf[s], s);
897 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
898 if (dmaf_p_disabled_continue(flags))
899 pq_set_src(descs, dst[1], offset, 1, s++);
900 else if (dmaf_continue(flags)) {
901 pq_set_src(descs, dst[0], offset, 0, s++);
902 pq_set_src(descs, dst[1], offset, 1, s++);
903 pq_set_src(descs, dst[1], offset, 0, s++);
905 pq->size = xfer_size;
906 pq->p_addr = dst[0] + offset;
907 pq->q_addr = dst[1] + offset;
910 /* we turn on descriptor write back error status */
911 if (device->cap & IOAT_CAP_DWBES)
912 pq->ctl_f.wb_en = result ? 1 : 0;
913 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
914 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
915 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
919 } while ((i += 1 + with_ext) < num_descs);
921 /* last pq descriptor carries the unmap parameters and fence bit */
922 desc->txd.flags = flags;
923 desc->len = total_len;
925 desc->result = result;
926 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
927 dump_pq_desc_dbg(ioat, desc, ext);
930 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
931 pq->ctl_f.compl_write = 1;
934 /* completion descriptor carries interrupt bit */
935 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
936 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
940 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
941 hw->ctl_f.compl_write = 1;
942 hw->size = NULL_DESC_BUFFER_SIZE;
943 dump_desc_dbg(ioat, compl_desc);
947 /* we leave the channel locked to ensure in order submission */
948 return &compl_desc->txd;
951 static struct dma_async_tx_descriptor *
952 __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result,
953 const dma_addr_t *dst, const dma_addr_t *src,
954 unsigned int src_cnt, const unsigned char *scf,
955 size_t len, unsigned long flags)
957 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
958 struct ioat_chan_common *chan = &ioat->base;
959 struct ioatdma_device *device = chan->device;
960 struct ioat_ring_ent *desc;
961 size_t total_len = len;
962 struct ioat_pq_descriptor *pq;
965 int i, s, idx, num_descs;
967 /* this function only handles src_cnt 9 - 16 */
970 /* this function is only called with 9-16 sources */
971 op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S;
973 dev_dbg(to_dev(chan), "%s\n", __func__);
975 num_descs = ioat2_xferlen_to_descs(ioat, len);
978 * 16 source pq is only available on cb3.3 and has no completion
981 if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0)
989 struct ioat_raw_descriptor *descs[4];
990 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
992 desc = ioat2_get_ring_ent(ioat, idx + i);
995 descs[0] = (struct ioat_raw_descriptor *) pq;
997 desc->sed = ioat3_alloc_sed(device,
998 sed_get_pq16_pool_idx(src_cnt));
1000 dev_err(to_dev(chan),
1001 "%s: no free sed entries\n", __func__);
1005 pq->sed_addr = desc->sed->dma;
1006 desc->sed->parent = desc;
1008 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw;
1009 descs[2] = (void *)descs[1] + 64;
1011 for (s = 0; s < src_cnt; s++)
1012 pq16_set_src(descs, src[s], offset, scf[s], s);
1014 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
1015 if (dmaf_p_disabled_continue(flags))
1016 pq16_set_src(descs, dst[1], offset, 1, s++);
1017 else if (dmaf_continue(flags)) {
1018 pq16_set_src(descs, dst[0], offset, 0, s++);
1019 pq16_set_src(descs, dst[1], offset, 1, s++);
1020 pq16_set_src(descs, dst[1], offset, 0, s++);
1023 pq->size = xfer_size;
1024 pq->p_addr = dst[0] + offset;
1025 pq->q_addr = dst[1] + offset;
1028 pq->ctl_f.src_cnt = src16_cnt_to_hw(s);
1029 /* we turn on descriptor write back error status */
1030 if (device->cap & IOAT_CAP_DWBES)
1031 pq->ctl_f.wb_en = result ? 1 : 0;
1032 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
1033 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
1036 offset += xfer_size;
1037 } while (++i < num_descs);
1039 /* last pq descriptor carries the unmap parameters and fence bit */
1040 desc->txd.flags = flags;
1041 desc->len = total_len;
1043 desc->result = result;
1044 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1046 /* with cb3.3 we should be able to do completion w/o a null desc */
1047 pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1048 pq->ctl_f.compl_write = 1;
1050 dump_pq16_desc_dbg(ioat, desc);
1052 /* we leave the channel locked to ensure in order submission */
1056 static struct dma_async_tx_descriptor *
1057 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1058 unsigned int src_cnt, const unsigned char *scf, size_t len,
1059 unsigned long flags)
1061 struct dma_device *dma = chan->device;
1063 /* specify valid address for disabled result */
1064 if (flags & DMA_PREP_PQ_DISABLE_P)
1066 if (flags & DMA_PREP_PQ_DISABLE_Q)
1069 /* handle the single source multiply case from the raid6
1072 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
1073 dma_addr_t single_source[2];
1074 unsigned char single_source_coef[2];
1076 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
1077 single_source[0] = src[0];
1078 single_source[1] = src[0];
1079 single_source_coef[0] = scf[0];
1080 single_source_coef[1] = 0;
1082 return (src_cnt > 8) && (dma->max_pq > 8) ?
1083 __ioat3_prep_pq16_lock(chan, NULL, dst, single_source,
1084 2, single_source_coef, len,
1086 __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
1087 single_source_coef, len, flags);
1090 return (src_cnt > 8) && (dma->max_pq > 8) ?
1091 __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt,
1093 __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt,
1098 struct dma_async_tx_descriptor *
1099 ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1100 unsigned int src_cnt, const unsigned char *scf, size_t len,
1101 enum sum_check_flags *pqres, unsigned long flags)
1103 struct dma_device *dma = chan->device;
1105 /* specify valid address for disabled result */
1106 if (flags & DMA_PREP_PQ_DISABLE_P)
1108 if (flags & DMA_PREP_PQ_DISABLE_Q)
1111 /* the cleanup routine only sets bits on validate failure, it
1112 * does not clear bits on validate success... so clear it here
1116 return (src_cnt > 8) && (dma->max_pq > 8) ?
1117 __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len,
1119 __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
1123 static struct dma_async_tx_descriptor *
1124 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
1125 unsigned int src_cnt, size_t len, unsigned long flags)
1127 struct dma_device *dma = chan->device;
1128 unsigned char scf[src_cnt];
1131 memset(scf, 0, src_cnt);
1133 flags |= DMA_PREP_PQ_DISABLE_Q;
1134 pq[1] = dst; /* specify valid address for disabled result */
1136 return (src_cnt > 8) && (dma->max_pq > 8) ?
1137 __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len,
1139 __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
1143 struct dma_async_tx_descriptor *
1144 ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
1145 unsigned int src_cnt, size_t len,
1146 enum sum_check_flags *result, unsigned long flags)
1148 struct dma_device *dma = chan->device;
1149 unsigned char scf[src_cnt];
1152 /* the cleanup routine only sets bits on validate failure, it
1153 * does not clear bits on validate success... so clear it here
1157 memset(scf, 0, src_cnt);
1159 flags |= DMA_PREP_PQ_DISABLE_Q;
1160 pq[1] = pq[0]; /* specify valid address for disabled result */
1163 return (src_cnt > 8) && (dma->max_pq > 8) ?
1164 __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1,
1166 __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1,
1170 static struct dma_async_tx_descriptor *
1171 ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
1173 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
1174 struct ioat_ring_ent *desc;
1175 struct ioat_dma_descriptor *hw;
1177 if (ioat2_check_space_lock(ioat, 1) == 0)
1178 desc = ioat2_get_ring_ent(ioat, ioat->head);
1185 hw->ctl_f.int_en = 1;
1186 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1187 hw->ctl_f.compl_write = 1;
1188 hw->size = NULL_DESC_BUFFER_SIZE;
1192 desc->txd.flags = flags;
1195 dump_desc_dbg(ioat, desc);
1197 /* we leave the channel locked to ensure in order submission */
1201 static void ioat3_dma_test_callback(void *dma_async_param)
1203 struct completion *cmp = dma_async_param;
1208 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
1209 static int ioat_xor_val_self_test(struct ioatdma_device *device)
1213 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
1214 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
1215 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
1216 dma_addr_t dest_dma;
1217 struct dma_async_tx_descriptor *tx;
1218 struct dma_chan *dma_chan;
1219 dma_cookie_t cookie;
1224 struct completion cmp;
1226 struct device *dev = &device->pdev->dev;
1227 struct dma_device *dma = &device->common;
1230 dev_dbg(dev, "%s\n", __func__);
1232 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
1235 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
1236 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1237 if (!xor_srcs[src_idx]) {
1239 __free_page(xor_srcs[src_idx]);
1244 dest = alloc_page(GFP_KERNEL);
1247 __free_page(xor_srcs[src_idx]);
1251 /* Fill in src buffers */
1252 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
1253 u8 *ptr = page_address(xor_srcs[src_idx]);
1254 for (i = 0; i < PAGE_SIZE; i++)
1255 ptr[i] = (1 << src_idx);
1258 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
1259 cmp_byte ^= (u8) (1 << src_idx);
1261 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1262 (cmp_byte << 8) | cmp_byte;
1264 memset(page_address(dest), 0, PAGE_SIZE);
1266 dma_chan = container_of(dma->channels.next, struct dma_chan,
1268 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
1276 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1277 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1278 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
1280 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1281 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1282 DMA_PREP_INTERRUPT |
1283 DMA_COMPL_SKIP_SRC_UNMAP |
1284 DMA_COMPL_SKIP_DEST_UNMAP);
1287 dev_err(dev, "Self-test xor prep failed\n");
1293 init_completion(&cmp);
1294 tx->callback = ioat3_dma_test_callback;
1295 tx->callback_param = &cmp;
1296 cookie = tx->tx_submit(tx);
1298 dev_err(dev, "Self-test xor setup failed\n");
1302 dma->device_issue_pending(dma_chan);
1304 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1306 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1307 dev_err(dev, "Self-test xor timed out\n");
1312 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1313 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1314 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1316 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1317 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1318 u32 *ptr = page_address(dest);
1319 if (ptr[i] != cmp_word) {
1320 dev_err(dev, "Self-test xor failed compare\n");
1322 goto free_resources;
1325 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1327 /* skip validate if the capability is not present */
1328 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1329 goto free_resources;
1331 op = IOAT_OP_XOR_VAL;
1333 /* validate the sources with the destintation page */
1334 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1335 xor_val_srcs[i] = xor_srcs[i];
1336 xor_val_srcs[i] = dest;
1340 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1341 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1343 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1344 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1345 &xor_val_result, DMA_PREP_INTERRUPT |
1346 DMA_COMPL_SKIP_SRC_UNMAP |
1347 DMA_COMPL_SKIP_DEST_UNMAP);
1349 dev_err(dev, "Self-test zero prep failed\n");
1355 init_completion(&cmp);
1356 tx->callback = ioat3_dma_test_callback;
1357 tx->callback_param = &cmp;
1358 cookie = tx->tx_submit(tx);
1360 dev_err(dev, "Self-test zero setup failed\n");
1364 dma->device_issue_pending(dma_chan);
1366 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1368 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1369 dev_err(dev, "Self-test validate timed out\n");
1374 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1375 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1377 if (xor_val_result != 0) {
1378 dev_err(dev, "Self-test validate failed compare\n");
1380 goto free_resources;
1383 /* test for non-zero parity sum */
1384 op = IOAT_OP_XOR_VAL;
1387 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1388 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1390 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1391 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1392 &xor_val_result, DMA_PREP_INTERRUPT |
1393 DMA_COMPL_SKIP_SRC_UNMAP |
1394 DMA_COMPL_SKIP_DEST_UNMAP);
1396 dev_err(dev, "Self-test 2nd zero prep failed\n");
1402 init_completion(&cmp);
1403 tx->callback = ioat3_dma_test_callback;
1404 tx->callback_param = &cmp;
1405 cookie = tx->tx_submit(tx);
1407 dev_err(dev, "Self-test 2nd zero setup failed\n");
1411 dma->device_issue_pending(dma_chan);
1413 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1415 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1416 dev_err(dev, "Self-test 2nd validate timed out\n");
1421 if (xor_val_result != SUM_CHECK_P_RESULT) {
1422 dev_err(dev, "Self-test validate failed compare\n");
1427 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1428 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1430 goto free_resources;
1432 if (op == IOAT_OP_XOR) {
1433 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1434 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1435 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1437 } else if (op == IOAT_OP_XOR_VAL) {
1438 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1439 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1443 dma->device_free_chan_resources(dma_chan);
1445 src_idx = IOAT_NUM_SRC_TEST;
1447 __free_page(xor_srcs[src_idx]);
1452 static int ioat3_dma_self_test(struct ioatdma_device *device)
1454 int rc = ioat_dma_self_test(device);
1459 rc = ioat_xor_val_self_test(device);
1466 static int ioat3_irq_reinit(struct ioatdma_device *device)
1468 int msixcnt = device->common.chancnt;
1469 struct pci_dev *pdev = device->pdev;
1471 struct msix_entry *msix;
1472 struct ioat_chan_common *chan;
1475 switch (device->irq_mode) {
1478 for (i = 0; i < msixcnt; i++) {
1479 msix = &device->msix_entries[i];
1480 chan = ioat_chan_by_index(device, i);
1481 devm_free_irq(&pdev->dev, msix->vector, chan);
1484 pci_disable_msix(pdev);
1487 case IOAT_MSIX_SINGLE:
1488 msix = &device->msix_entries[0];
1489 chan = ioat_chan_by_index(device, 0);
1490 devm_free_irq(&pdev->dev, msix->vector, chan);
1491 pci_disable_msix(pdev);
1495 chan = ioat_chan_by_index(device, 0);
1496 devm_free_irq(&pdev->dev, pdev->irq, chan);
1497 pci_disable_msi(pdev);
1501 chan = ioat_chan_by_index(device, 0);
1502 devm_free_irq(&pdev->dev, pdev->irq, chan);
1509 device->irq_mode = IOAT_NOIRQ;
1511 err = ioat_dma_setup_interrupts(device);
1516 static int ioat3_reset_hw(struct ioat_chan_common *chan)
1518 /* throw away whatever the channel was doing and get it
1519 * initialized, with ioat3 specific workarounds
1521 struct ioatdma_device *device = chan->device;
1522 struct pci_dev *pdev = device->pdev;
1527 ioat2_quiesce(chan, msecs_to_jiffies(100));
1529 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1530 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1532 if (device->version < IOAT_VER_3_3) {
1533 /* clear any pending errors */
1534 err = pci_read_config_dword(pdev,
1535 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1538 "channel error register unreachable\n");
1541 pci_write_config_dword(pdev,
1542 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1544 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1545 * (workaround for spurious config parity error after restart)
1547 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1548 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1549 pci_write_config_dword(pdev,
1550 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1555 err = ioat2_reset_sync(chan, msecs_to_jiffies(200));
1557 dev_err(&pdev->dev, "Failed to reset!\n");
1561 if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev))
1562 err = ioat3_irq_reinit(device);
1567 static void ioat3_intr_quirk(struct ioatdma_device *device)
1569 struct dma_device *dma;
1571 struct ioat_chan_common *chan;
1574 dma = &device->common;
1577 * if we have descriptor write back error status, we mask the
1580 if (device->cap & IOAT_CAP_DWBES) {
1581 list_for_each_entry(c, &dma->channels, device_node) {
1582 chan = to_chan_common(c);
1583 errmask = readl(chan->reg_base +
1584 IOAT_CHANERR_MASK_OFFSET);
1585 errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR |
1586 IOAT_CHANERR_XOR_Q_ERR;
1587 writel(errmask, chan->reg_base +
1588 IOAT_CHANERR_MASK_OFFSET);
1593 int ioat3_dma_probe(struct ioatdma_device *device, int dca)
1595 struct pci_dev *pdev = device->pdev;
1596 int dca_en = system_has_dca_enabled(pdev);
1597 struct dma_device *dma;
1599 struct ioat_chan_common *chan;
1600 bool is_raid_device = false;
1603 device->enumerate_channels = ioat2_enumerate_channels;
1604 device->reset_hw = ioat3_reset_hw;
1605 device->self_test = ioat3_dma_self_test;
1606 device->intr_quirk = ioat3_intr_quirk;
1607 dma = &device->common;
1608 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1609 dma->device_issue_pending = ioat2_issue_pending;
1610 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1611 dma->device_free_chan_resources = ioat2_free_chan_resources;
1613 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1614 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1616 device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1618 if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
1619 device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
1621 /* dca is incompatible with raid operations */
1622 if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1623 device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1625 if (device->cap & IOAT_CAP_XOR) {
1626 is_raid_device = true;
1629 dma_cap_set(DMA_XOR, dma->cap_mask);
1630 dma->device_prep_dma_xor = ioat3_prep_xor;
1632 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1633 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1636 if (device->cap & IOAT_CAP_PQ) {
1637 is_raid_device = true;
1639 dma->device_prep_dma_pq = ioat3_prep_pq;
1640 dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
1641 dma_cap_set(DMA_PQ, dma->cap_mask);
1642 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1644 if (device->cap & IOAT_CAP_RAID16SS) {
1645 dma_set_maxpq(dma, 16, 0);
1647 dma_set_maxpq(dma, 8, 0);
1650 if (!(device->cap & IOAT_CAP_XOR)) {
1651 dma->device_prep_dma_xor = ioat3_prep_pqxor;
1652 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1653 dma_cap_set(DMA_XOR, dma->cap_mask);
1654 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1656 if (device->cap & IOAT_CAP_RAID16SS) {
1664 dma->device_tx_status = ioat3_tx_status;
1665 device->cleanup_fn = ioat3_cleanup_event;
1666 device->timer_fn = ioat3_timer_event;
1668 /* starting with CB3.3 super extended descriptors are supported */
1669 if (device->cap & IOAT_CAP_RAID16SS) {
1673 /* allocate sw descriptor pool for SED */
1674 device->sed_pool = kmem_cache_create("ioat_sed",
1675 sizeof(struct ioat_sed_ent), 0, 0, NULL);
1676 if (!device->sed_pool)
1679 for (i = 0; i < MAX_SED_POOLS; i++) {
1680 snprintf(pool_name, 14, "ioat_hw%d_sed", i);
1682 /* allocate SED DMA pool */
1683 device->sed_hw_pool[i] = dma_pool_create(pool_name,
1685 SED_SIZE * (i + 1), 64, 0);
1686 if (!device->sed_hw_pool[i])
1687 goto sed_pool_cleanup;
1692 err = ioat_probe(device);
1695 ioat_set_tcp_copy_break(262144);
1697 list_for_each_entry(c, &dma->channels, device_node) {
1698 chan = to_chan_common(c);
1699 writel(IOAT_DMA_DCA_ANY_CPU,
1700 chan->reg_base + IOAT_DCACTRL_OFFSET);
1703 err = ioat_register(device);
1707 ioat_kobject_add(device, &ioat2_ktype);
1710 device->dca = ioat3_dca_init(pdev, device->reg_base);
1715 if (device->sed_pool) {
1717 kmem_cache_destroy(device->sed_pool);
1719 for (i = 0; i < MAX_SED_POOLS; i++)
1720 if (device->sed_hw_pool[i])
1721 dma_pool_destroy(device->sed_hw_pool[i]);
1727 void ioat3_dma_remove(struct ioatdma_device *device)
1729 if (device->sed_pool) {
1731 kmem_cache_destroy(device->sed_pool);
1733 for (i = 0; i < MAX_SED_POOLS; i++)
1734 if (device->sed_hw_pool[i])
1735 dma_pool_destroy(device->sed_hw_pool[i]);