2 * Topcliff PCH DMA controller driver
3 * Copyright (c) 2010 Intel Corporation
4 * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/dmaengine.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/init.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/pch_dma.h>
28 #define DRV_NAME "pch-dma"
30 #define DMA_CTL0_DISABLE 0x0
31 #define DMA_CTL0_SG 0x1
32 #define DMA_CTL0_ONESHOT 0x2
33 #define DMA_CTL0_MODE_MASK_BITS 0x3
34 #define DMA_CTL0_DIR_SHIFT_BITS 2
35 #define DMA_CTL0_BITS_PER_CH 4
37 #define DMA_CTL2_START_SHIFT_BITS 8
38 #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1)
40 #define DMA_STATUS_IDLE 0x0
41 #define DMA_STATUS_DESC_READ 0x1
42 #define DMA_STATUS_WAIT 0x2
43 #define DMA_STATUS_ACCESS 0x3
44 #define DMA_STATUS_BITS_PER_CH 2
45 #define DMA_STATUS_MASK_BITS 0x3
46 #define DMA_STATUS_SHIFT_BITS 16
47 #define DMA_STATUS_IRQ(x) (0x1 << (x))
48 #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8))
49 #define DMA_STATUS2_ERR(x) (0x1 << (x))
51 #define DMA_DESC_WIDTH_SHIFT_BITS 12
52 #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS)
53 #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS)
54 #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS)
55 #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF
56 #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF
57 #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF
58 #define DMA_DESC_END_WITHOUT_IRQ 0x0
59 #define DMA_DESC_END_WITH_IRQ 0x1
60 #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2
61 #define DMA_DESC_FOLLOW_WITH_IRQ 0x3
65 static unsigned int init_nr_desc_per_channel = 64;
66 module_param(init_nr_desc_per_channel, uint, 0644);
67 MODULE_PARM_DESC(init_nr_desc_per_channel,
68 "initial descriptors per channel (default: 64)");
70 struct pch_dma_desc_regs {
86 struct pch_dma_desc_regs desc[MAX_CHAN_NR];
90 struct pch_dma_desc_regs regs;
91 struct dma_async_tx_descriptor txd;
92 struct list_head desc_node;
93 struct list_head tx_list;
98 void __iomem *membase;
99 enum dma_data_direction dir;
100 struct tasklet_struct tasklet;
101 unsigned long err_status;
105 dma_cookie_t completed_cookie;
106 struct list_head active_list;
107 struct list_head queue;
108 struct list_head free_list;
109 unsigned int descs_allocated;
112 #define PDC_DEV_ADDR 0x00
113 #define PDC_MEM_ADDR 0x04
114 #define PDC_SIZE 0x08
115 #define PDC_NEXT 0x0C
117 #define channel_readl(pdc, name) \
118 readl((pdc)->membase + PDC_##name)
119 #define channel_writel(pdc, name, val) \
120 writel((val), (pdc)->membase + PDC_##name)
123 struct dma_device dma;
124 void __iomem *membase;
125 struct pci_pool *pool;
126 struct pch_dma_regs regs;
127 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR];
128 struct pch_dma_chan channels[MAX_CHAN_NR];
131 #define PCH_DMA_CTL0 0x00
132 #define PCH_DMA_CTL1 0x04
133 #define PCH_DMA_CTL2 0x08
134 #define PCH_DMA_CTL3 0x0C
135 #define PCH_DMA_STS0 0x10
136 #define PCH_DMA_STS1 0x14
137 #define PCH_DMA_STS2 0x18
139 #define dma_readl(pd, name) \
140 readl((pd)->membase + PCH_DMA_##name)
141 #define dma_writel(pd, name, val) \
142 writel((val), (pd)->membase + PCH_DMA_##name)
145 struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd)
147 return container_of(txd, struct pch_dma_desc, txd);
150 static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan)
152 return container_of(chan, struct pch_dma_chan, chan);
155 static inline struct pch_dma *to_pd(struct dma_device *ddev)
157 return container_of(ddev, struct pch_dma, dma);
160 static inline struct device *chan2dev(struct dma_chan *chan)
162 return &chan->dev->device;
165 static inline struct device *chan2parent(struct dma_chan *chan)
167 return chan->dev->device.parent;
171 struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
173 return list_first_entry(&pd_chan->active_list,
174 struct pch_dma_desc, desc_node);
178 struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan)
180 return list_first_entry(&pd_chan->queue,
181 struct pch_dma_desc, desc_node);
184 static void pdc_enable_irq(struct dma_chan *chan, int enable)
186 struct pch_dma *pd = to_pd(chan->device);
190 if (chan->chan_id < 8)
193 pos = chan->chan_id + 8;
195 val = dma_readl(pd, CTL2);
200 val &= ~(0x1 << pos);
202 dma_writel(pd, CTL2, val);
204 dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n",
208 static void pdc_set_dir(struct dma_chan *chan)
210 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
211 struct pch_dma *pd = to_pd(chan->device);
214 if (chan->chan_id < 8) {
215 val = dma_readl(pd, CTL0);
217 if (pd_chan->dir == DMA_TO_DEVICE)
218 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
219 DMA_CTL0_DIR_SHIFT_BITS);
221 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
222 DMA_CTL0_DIR_SHIFT_BITS));
224 dma_writel(pd, CTL0, val);
226 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
227 val = dma_readl(pd, CTL3);
229 if (pd_chan->dir == DMA_TO_DEVICE)
230 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
231 DMA_CTL0_DIR_SHIFT_BITS);
233 val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch +
234 DMA_CTL0_DIR_SHIFT_BITS));
236 dma_writel(pd, CTL3, val);
239 dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n",
243 static void pdc_set_mode(struct dma_chan *chan, u32 mode)
245 struct pch_dma *pd = to_pd(chan->device);
248 if (chan->chan_id < 8) {
249 val = dma_readl(pd, CTL0);
251 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
252 (DMA_CTL0_BITS_PER_CH * chan->chan_id));
253 val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id);
255 dma_writel(pd, CTL0, val);
257 int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */
259 val = dma_readl(pd, CTL3);
261 val &= ~(DMA_CTL0_MODE_MASK_BITS <<
262 (DMA_CTL0_BITS_PER_CH * ch));
263 val |= mode << (DMA_CTL0_BITS_PER_CH * ch);
265 dma_writel(pd, CTL3, val);
269 dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n",
273 static u32 pdc_get_status0(struct pch_dma_chan *pd_chan)
275 struct pch_dma *pd = to_pd(pd_chan->chan.device);
278 val = dma_readl(pd, STS0);
279 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
280 DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id));
283 static u32 pdc_get_status2(struct pch_dma_chan *pd_chan)
285 struct pch_dma *pd = to_pd(pd_chan->chan.device);
288 val = dma_readl(pd, STS2);
289 return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS +
290 DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8)));
293 static bool pdc_is_idle(struct pch_dma_chan *pd_chan)
297 if (pd_chan->chan.chan_id < 8)
298 sts = pdc_get_status0(pd_chan);
300 sts = pdc_get_status2(pd_chan);
303 if (sts == DMA_STATUS_IDLE)
309 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc)
311 if (!pdc_is_idle(pd_chan)) {
312 dev_err(chan2dev(&pd_chan->chan),
313 "BUG: Attempt to start non-idle channel\n");
317 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n",
318 pd_chan->chan.chan_id, desc->regs.dev_addr);
319 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n",
320 pd_chan->chan.chan_id, desc->regs.mem_addr);
321 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n",
322 pd_chan->chan.chan_id, desc->regs.size);
323 dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n",
324 pd_chan->chan.chan_id, desc->regs.next);
326 if (list_empty(&desc->tx_list)) {
327 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr);
328 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr);
329 channel_writel(pd_chan, SIZE, desc->regs.size);
330 channel_writel(pd_chan, NEXT, desc->regs.next);
331 pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT);
333 channel_writel(pd_chan, NEXT, desc->txd.phys);
334 pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG);
338 static void pdc_chain_complete(struct pch_dma_chan *pd_chan,
339 struct pch_dma_desc *desc)
341 struct dma_async_tx_descriptor *txd = &desc->txd;
342 dma_async_tx_callback callback = txd->callback;
343 void *param = txd->callback_param;
345 list_splice_init(&desc->tx_list, &pd_chan->free_list);
346 list_move(&desc->desc_node, &pd_chan->free_list);
352 static void pdc_complete_all(struct pch_dma_chan *pd_chan)
354 struct pch_dma_desc *desc, *_d;
357 BUG_ON(!pdc_is_idle(pd_chan));
359 if (!list_empty(&pd_chan->queue))
360 pdc_dostart(pd_chan, pdc_first_queued(pd_chan));
362 list_splice_init(&pd_chan->active_list, &list);
363 list_splice_init(&pd_chan->queue, &pd_chan->active_list);
365 list_for_each_entry_safe(desc, _d, &list, desc_node)
366 pdc_chain_complete(pd_chan, desc);
369 static void pdc_handle_error(struct pch_dma_chan *pd_chan)
371 struct pch_dma_desc *bad_desc;
373 bad_desc = pdc_first_active(pd_chan);
374 list_del(&bad_desc->desc_node);
376 list_splice_init(&pd_chan->queue, pd_chan->active_list.prev);
378 if (!list_empty(&pd_chan->active_list))
379 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
381 dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n");
382 dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n",
383 bad_desc->txd.cookie);
385 pdc_chain_complete(pd_chan, bad_desc);
388 static void pdc_advance_work(struct pch_dma_chan *pd_chan)
390 if (list_empty(&pd_chan->active_list) ||
391 list_is_singular(&pd_chan->active_list)) {
392 pdc_complete_all(pd_chan);
394 pdc_chain_complete(pd_chan, pdc_first_active(pd_chan));
395 pdc_dostart(pd_chan, pdc_first_active(pd_chan));
399 static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan,
400 struct pch_dma_desc *desc)
402 dma_cookie_t cookie = pd_chan->chan.cookie;
407 pd_chan->chan.cookie = cookie;
408 desc->txd.cookie = cookie;
413 static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd)
415 struct pch_dma_desc *desc = to_pd_desc(txd);
416 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan);
419 spin_lock(&pd_chan->lock);
420 cookie = pdc_assign_cookie(pd_chan, desc);
422 if (list_empty(&pd_chan->active_list)) {
423 list_add_tail(&desc->desc_node, &pd_chan->active_list);
424 pdc_dostart(pd_chan, desc);
426 list_add_tail(&desc->desc_node, &pd_chan->queue);
429 spin_unlock(&pd_chan->lock);
433 static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags)
435 struct pch_dma_desc *desc = NULL;
436 struct pch_dma *pd = to_pd(chan->device);
439 desc = pci_pool_alloc(pd->pool, flags, &addr);
441 memset(desc, 0, sizeof(struct pch_dma_desc));
442 INIT_LIST_HEAD(&desc->tx_list);
443 dma_async_tx_descriptor_init(&desc->txd, chan);
444 desc->txd.tx_submit = pd_tx_submit;
445 desc->txd.flags = DMA_CTRL_ACK;
446 desc->txd.phys = addr;
452 static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan)
454 struct pch_dma_desc *desc, *_d;
455 struct pch_dma_desc *ret = NULL;
458 spin_lock(&pd_chan->lock);
459 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) {
461 if (async_tx_test_ack(&desc->txd)) {
462 list_del(&desc->desc_node);
466 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc);
468 spin_unlock(&pd_chan->lock);
469 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i);
472 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO);
474 spin_lock(&pd_chan->lock);
475 pd_chan->descs_allocated++;
476 spin_unlock(&pd_chan->lock);
478 dev_err(chan2dev(&pd_chan->chan),
479 "failed to alloc desc\n");
486 static void pdc_desc_put(struct pch_dma_chan *pd_chan,
487 struct pch_dma_desc *desc)
490 spin_lock(&pd_chan->lock);
491 list_splice_init(&desc->tx_list, &pd_chan->free_list);
492 list_add(&desc->desc_node, &pd_chan->free_list);
493 spin_unlock(&pd_chan->lock);
497 static int pd_alloc_chan_resources(struct dma_chan *chan)
499 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
500 struct pch_dma_desc *desc;
504 if (!pdc_is_idle(pd_chan)) {
505 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
509 if (!list_empty(&pd_chan->free_list))
510 return pd_chan->descs_allocated;
512 for (i = 0; i < init_nr_desc_per_channel; i++) {
513 desc = pdc_alloc_desc(chan, GFP_KERNEL);
516 dev_warn(chan2dev(chan),
517 "Only allocated %d initial descriptors\n", i);
521 list_add_tail(&desc->desc_node, &tmp_list);
524 spin_lock_bh(&pd_chan->lock);
525 list_splice(&tmp_list, &pd_chan->free_list);
526 pd_chan->descs_allocated = i;
527 pd_chan->completed_cookie = chan->cookie = 1;
528 spin_unlock_bh(&pd_chan->lock);
530 pdc_enable_irq(chan, 1);
532 return pd_chan->descs_allocated;
535 static void pd_free_chan_resources(struct dma_chan *chan)
537 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
538 struct pch_dma *pd = to_pd(chan->device);
539 struct pch_dma_desc *desc, *_d;
542 BUG_ON(!pdc_is_idle(pd_chan));
543 BUG_ON(!list_empty(&pd_chan->active_list));
544 BUG_ON(!list_empty(&pd_chan->queue));
546 spin_lock_bh(&pd_chan->lock);
547 list_splice_init(&pd_chan->free_list, &tmp_list);
548 pd_chan->descs_allocated = 0;
549 spin_unlock_bh(&pd_chan->lock);
551 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node)
552 pci_pool_free(pd->pool, desc, desc->txd.phys);
554 pdc_enable_irq(chan, 0);
557 static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
558 struct dma_tx_state *txstate)
560 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
561 dma_cookie_t last_used;
562 dma_cookie_t last_completed;
565 spin_lock_bh(&pd_chan->lock);
566 last_completed = pd_chan->completed_cookie;
567 last_used = chan->cookie;
568 spin_unlock_bh(&pd_chan->lock);
570 ret = dma_async_is_complete(cookie, last_completed, last_used);
572 dma_set_tx_state(txstate, last_completed, last_used, 0);
577 static void pd_issue_pending(struct dma_chan *chan)
579 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
581 if (pdc_is_idle(pd_chan)) {
582 spin_lock(&pd_chan->lock);
583 pdc_advance_work(pd_chan);
584 spin_unlock(&pd_chan->lock);
588 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
589 struct scatterlist *sgl, unsigned int sg_len,
590 enum dma_data_direction direction, unsigned long flags)
592 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
593 struct pch_dma_slave *pd_slave = chan->private;
594 struct pch_dma_desc *first = NULL;
595 struct pch_dma_desc *prev = NULL;
596 struct pch_dma_desc *desc = NULL;
597 struct scatterlist *sg;
601 if (unlikely(!sg_len)) {
602 dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n");
606 if (direction == DMA_FROM_DEVICE)
607 reg = pd_slave->rx_reg;
608 else if (direction == DMA_TO_DEVICE)
609 reg = pd_slave->tx_reg;
613 pd_chan->dir = direction;
616 for_each_sg(sgl, sg, sg_len, i) {
617 desc = pdc_desc_get(pd_chan);
622 desc->regs.dev_addr = reg;
623 desc->regs.mem_addr = sg_phys(sg);
624 desc->regs.size = sg_dma_len(sg);
625 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
627 switch (pd_slave->width) {
628 case PCH_DMA_WIDTH_1_BYTE:
629 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE)
631 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE;
633 case PCH_DMA_WIDTH_2_BYTES:
634 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES)
636 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES;
638 case PCH_DMA_WIDTH_4_BYTES:
639 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES)
641 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES;
650 prev->regs.next |= desc->txd.phys;
651 list_add_tail(&desc->desc_node, &first->tx_list);
657 if (flags & DMA_PREP_INTERRUPT)
658 desc->regs.next = DMA_DESC_END_WITH_IRQ;
660 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
662 first->txd.cookie = -EBUSY;
663 desc->txd.flags = flags;
668 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
669 pdc_desc_put(pd_chan, first);
673 static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
676 struct pch_dma_chan *pd_chan = to_pd_chan(chan);
677 struct pch_dma_desc *desc, *_d;
680 if (cmd != DMA_TERMINATE_ALL)
683 spin_lock_bh(&pd_chan->lock);
685 pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE);
687 list_splice_init(&pd_chan->active_list, &list);
688 list_splice_init(&pd_chan->queue, &list);
690 list_for_each_entry_safe(desc, _d, &list, desc_node)
691 pdc_chain_complete(pd_chan, desc);
693 spin_unlock_bh(&pd_chan->lock);
698 static void pdc_tasklet(unsigned long data)
700 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data;
703 if (!pdc_is_idle(pd_chan)) {
704 dev_err(chan2dev(&pd_chan->chan),
705 "BUG: handle non-idle channel in tasklet\n");
709 spin_lock_irqsave(&pd_chan->lock, flags);
710 if (test_and_clear_bit(0, &pd_chan->err_status))
711 pdc_handle_error(pd_chan);
713 pdc_advance_work(pd_chan);
714 spin_unlock_irqrestore(&pd_chan->lock, flags);
717 static irqreturn_t pd_irq(int irq, void *devid)
719 struct pch_dma *pd = (struct pch_dma *)devid;
720 struct pch_dma_chan *pd_chan;
727 sts0 = dma_readl(pd, STS0);
728 sts2 = dma_readl(pd, STS2);
730 dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0);
732 for (i = 0; i < pd->dma.chancnt; i++) {
733 pd_chan = &pd->channels[i];
736 if (sts0 & DMA_STATUS_IRQ(i)) {
737 if (sts0 & DMA_STATUS0_ERR(i))
738 set_bit(0, &pd_chan->err_status);
740 tasklet_schedule(&pd_chan->tasklet);
744 if (sts2 & DMA_STATUS_IRQ(i - 8)) {
745 if (sts2 & DMA_STATUS2_ERR(i))
746 set_bit(0, &pd_chan->err_status);
748 tasklet_schedule(&pd_chan->tasklet);
754 /* clear interrupt bits in status register */
756 dma_writel(pd, STS0, sts0);
758 dma_writel(pd, STS2, sts2);
764 static void pch_dma_save_regs(struct pch_dma *pd)
766 struct pch_dma_chan *pd_chan;
767 struct dma_chan *chan, *_c;
770 pd->regs.dma_ctl0 = dma_readl(pd, CTL0);
771 pd->regs.dma_ctl1 = dma_readl(pd, CTL1);
772 pd->regs.dma_ctl2 = dma_readl(pd, CTL2);
773 pd->regs.dma_ctl3 = dma_readl(pd, CTL3);
775 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
776 pd_chan = to_pd_chan(chan);
778 pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR);
779 pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR);
780 pd->ch_regs[i].size = channel_readl(pd_chan, SIZE);
781 pd->ch_regs[i].next = channel_readl(pd_chan, NEXT);
787 static void pch_dma_restore_regs(struct pch_dma *pd)
789 struct pch_dma_chan *pd_chan;
790 struct dma_chan *chan, *_c;
793 dma_writel(pd, CTL0, pd->regs.dma_ctl0);
794 dma_writel(pd, CTL1, pd->regs.dma_ctl1);
795 dma_writel(pd, CTL2, pd->regs.dma_ctl2);
796 dma_writel(pd, CTL3, pd->regs.dma_ctl3);
798 list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) {
799 pd_chan = to_pd_chan(chan);
801 channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr);
802 channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr);
803 channel_writel(pd_chan, SIZE, pd->ch_regs[i].size);
804 channel_writel(pd_chan, NEXT, pd->ch_regs[i].next);
810 static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state)
812 struct pch_dma *pd = pci_get_drvdata(pdev);
815 pch_dma_save_regs(pd);
817 pci_save_state(pdev);
818 pci_disable_device(pdev);
819 pci_set_power_state(pdev, pci_choose_state(pdev, state));
824 static int pch_dma_resume(struct pci_dev *pdev)
826 struct pch_dma *pd = pci_get_drvdata(pdev);
829 pci_set_power_state(pdev, PCI_D0);
830 pci_restore_state(pdev);
832 err = pci_enable_device(pdev);
834 dev_dbg(&pdev->dev, "failed to enable device\n");
839 pch_dma_restore_regs(pd);
845 static int __devinit pch_dma_probe(struct pci_dev *pdev,
846 const struct pci_device_id *id)
849 struct pch_dma_regs *regs;
850 unsigned int nr_channels;
854 nr_channels = id->driver_data;
855 pd = kzalloc(sizeof(struct pch_dma)+
856 sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL);
860 pci_set_drvdata(pdev, pd);
862 err = pci_enable_device(pdev);
864 dev_err(&pdev->dev, "Cannot enable PCI device\n");
868 if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
869 dev_err(&pdev->dev, "Cannot find proper base address\n");
870 goto err_disable_pdev;
873 err = pci_request_regions(pdev, DRV_NAME);
875 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
876 goto err_disable_pdev;
879 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
881 dev_err(&pdev->dev, "Cannot set proper DMA config\n");
885 regs = pd->membase = pci_iomap(pdev, 1, 0);
887 dev_err(&pdev->dev, "Cannot map MMIO registers\n");
892 pci_set_master(pdev);
894 err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
896 dev_err(&pdev->dev, "Failed to request IRQ\n");
900 pd->pool = pci_pool_create("pch_dma_desc_pool", pdev,
901 sizeof(struct pch_dma_desc), 4, 0);
903 dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n");
908 pd->dma.dev = &pdev->dev;
909 pd->dma.chancnt = nr_channels;
911 INIT_LIST_HEAD(&pd->dma.channels);
913 for (i = 0; i < nr_channels; i++) {
914 struct pch_dma_chan *pd_chan = &pd->channels[i];
916 pd_chan->chan.device = &pd->dma;
917 pd_chan->chan.cookie = 1;
918 pd_chan->chan.chan_id = i;
920 pd_chan->membase = ®s->desc[i];
922 spin_lock_init(&pd_chan->lock);
924 INIT_LIST_HEAD(&pd_chan->active_list);
925 INIT_LIST_HEAD(&pd_chan->queue);
926 INIT_LIST_HEAD(&pd_chan->free_list);
928 tasklet_init(&pd_chan->tasklet, pdc_tasklet,
929 (unsigned long)pd_chan);
930 list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels);
933 dma_cap_zero(pd->dma.cap_mask);
934 dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask);
935 dma_cap_set(DMA_SLAVE, pd->dma.cap_mask);
937 pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources;
938 pd->dma.device_free_chan_resources = pd_free_chan_resources;
939 pd->dma.device_tx_status = pd_tx_status;
940 pd->dma.device_issue_pending = pd_issue_pending;
941 pd->dma.device_prep_slave_sg = pd_prep_slave_sg;
942 pd->dma.device_control = pd_device_control;
944 err = dma_async_device_register(&pd->dma);
946 dev_err(&pdev->dev, "Failed to register DMA device\n");
953 pci_pool_destroy(pd->pool);
955 free_irq(pdev->irq, pd);
957 pci_iounmap(pdev, pd->membase);
959 pci_release_regions(pdev);
961 pci_disable_device(pdev);
966 static void __devexit pch_dma_remove(struct pci_dev *pdev)
968 struct pch_dma *pd = pci_get_drvdata(pdev);
969 struct pch_dma_chan *pd_chan;
970 struct dma_chan *chan, *_c;
973 dma_async_device_unregister(&pd->dma);
975 list_for_each_entry_safe(chan, _c, &pd->dma.channels,
977 pd_chan = to_pd_chan(chan);
979 tasklet_disable(&pd_chan->tasklet);
980 tasklet_kill(&pd_chan->tasklet);
983 pci_pool_destroy(pd->pool);
984 free_irq(pdev->irq, pd);
985 pci_iounmap(pdev, pd->membase);
986 pci_release_regions(pdev);
987 pci_disable_device(pdev);
992 /* PCI Device ID of DMA device */
993 #define PCI_VENDOR_ID_ROHM 0x10DB
994 #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810
995 #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815
996 #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026
997 #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B
998 #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034
999 #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032
1000 #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B
1001 #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
1002 #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
1003 #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
1005 DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
1006 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1007 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1008 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
1009 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */
1010 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */
1011 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */
1012 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */
1013 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
1014 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
1015 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
1019 static struct pci_driver pch_dma_driver = {
1021 .id_table = pch_dma_id_table,
1022 .probe = pch_dma_probe,
1023 .remove = __devexit_p(pch_dma_remove),
1025 .suspend = pch_dma_suspend,
1026 .resume = pch_dma_resume,
1030 static int __init pch_dma_init(void)
1032 return pci_register_driver(&pch_dma_driver);
1035 static void __exit pch_dma_exit(void)
1037 pci_unregister_driver(&pch_dma_driver);
1040 module_init(pch_dma_init);
1041 module_exit(pch_dma_exit);
1043 MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
1044 "DMA controller driver");
1045 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
1046 MODULE_LICENSE("GPL v2");