2 * linux/arch/arm/plat-omap/dma.c
4 * Copyright (C) 2003 - 2008 Nokia Corporation
5 * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6 * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7 * Graphics DMA and LCD DMA graphics tranformations
8 * by Imre Deak <imre.deak@nokia.com>
9 * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10 * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11 * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
13 * Copyright (C) 2009 Texas Instruments
14 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
16 * Support functions for the OMAP internal DMA channels.
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation.
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
35 #include <asm/system.h>
36 #include <mach/hardware.h>
43 static u16 reg_map_omap1[] = {
63 /* Common Registers */
72 [CPC] = 0x18, /* 15xx only */
79 /* Channel specific register offsets */
87 static u16 reg_map_omap2[] = {
90 [IRQSTATUS_L0] = 0x08,
91 [IRQSTATUS_L1] = 0x0c,
92 [IRQSTATUS_L2] = 0x10,
93 [IRQSTATUS_L3] = 0x14,
94 [IRQENABLE_L0] = 0x18,
95 [IRQENABLE_L1] = 0x1c,
96 [IRQENABLE_L2] = 0x20,
97 [IRQENABLE_L3] = 0x24,
99 [OCP_SYSCONFIG] = 0x2c,
105 /* Common register offsets */
120 /* Channel specific register offsets */
127 /* OMAP4 specific registers */
133 #ifndef CONFIG_ARCH_OMAP1
134 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
135 DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
138 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
141 #define OMAP_DMA_ACTIVE 0x01
142 #define OMAP2_DMA_CSR_CLEAR_MASK 0xffe
144 #define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
146 static int enable_1510_mode;
149 static struct omap_dma_global_context_registers {
150 u32 dma_irqenable_l0;
151 u32 dma_ocp_sysconfig;
153 } omap_dma_global_context;
155 struct omap_dma_lch {
160 const char *dev_name;
161 void (*callback)(int lch, u16 ch_status, void *data);
164 #ifndef CONFIG_ARCH_OMAP1
165 /* required for Dynamic chaining */
176 struct dma_link_info {
178 int no_of_lchs_linked;
189 static struct dma_link_info *dma_linked_lch;
191 #ifndef CONFIG_ARCH_OMAP1
193 /* Chain handling macros */
194 #define OMAP_DMA_CHAIN_QINIT(chain_id) \
196 dma_linked_lch[chain_id].q_head = \
197 dma_linked_lch[chain_id].q_tail = \
198 dma_linked_lch[chain_id].q_count = 0; \
200 #define OMAP_DMA_CHAIN_QFULL(chain_id) \
201 (dma_linked_lch[chain_id].no_of_lchs_linked == \
202 dma_linked_lch[chain_id].q_count)
203 #define OMAP_DMA_CHAIN_QLAST(chain_id) \
205 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) == \
206 dma_linked_lch[chain_id].q_count) \
208 #define OMAP_DMA_CHAIN_QEMPTY(chain_id) \
209 (0 == dma_linked_lch[chain_id].q_count)
210 #define __OMAP_DMA_CHAIN_INCQ(end) \
211 ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
212 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id) \
214 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
215 dma_linked_lch[chain_id].q_count--; \
218 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id) \
220 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
221 dma_linked_lch[chain_id].q_count++; \
225 static int dma_lch_count;
226 static int dma_chan_count;
227 static int omap_dma_reserve_channels;
229 static spinlock_t dma_chan_lock;
230 static struct omap_dma_lch *dma_chan;
231 static void __iomem *omap_dma_base;
233 static u8 dma_stride;
234 static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
236 static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
237 INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
238 INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
239 INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
240 INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
241 INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
244 static inline void disable_lnk(int lch);
245 static void omap_disable_channel_irq(int lch);
246 static inline void omap_enable_channel_irq(int lch);
248 #define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
251 static inline void dma_write(u32 val, int reg, int lch)
256 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
257 offset = reg_map[reg] + (stride * lch);
259 if (dma_stride == 0x40) {
260 __raw_writew(val, omap_dma_base + offset);
261 if ((reg > CLNK_CTRL && reg < CCEN) ||
262 (reg > PCHD_ID && reg < CAPS_2)) {
263 u32 offset2 = reg_map[reg] + 2 + (stride * lch);
264 __raw_writew(val >> 16, omap_dma_base + offset2);
267 __raw_writel(val, omap_dma_base + offset);
271 static inline u32 dma_read(int reg, int lch)
276 stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
277 offset = reg_map[reg] + (stride * lch);
279 if (dma_stride == 0x40) {
280 val = __raw_readw(omap_dma_base + offset);
281 if ((reg > CLNK_CTRL && reg < CCEN) ||
282 (reg > PCHD_ID && reg < CAPS_2)) {
284 u32 offset2 = reg_map[reg] + 2 + (stride * lch);
285 upper = __raw_readw(omap_dma_base + offset2);
286 val |= (upper << 16);
289 val = __raw_readl(omap_dma_base + offset);
294 #ifdef CONFIG_ARCH_OMAP15XX
295 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
296 static int omap_dma_in_1510_mode(void)
298 return enable_1510_mode;
301 #define omap_dma_in_1510_mode() 0
304 #ifdef CONFIG_ARCH_OMAP1
305 static inline int get_gdma_dev(int req)
307 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
308 int shift = ((req - 1) % 5) * 6;
310 return ((omap_readl(reg) >> shift) & 0x3f) + 1;
313 static inline void set_gdma_dev(int req, int dev)
315 u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
316 int shift = ((req - 1) % 5) * 6;
320 l &= ~(0x3f << shift);
321 l |= (dev - 1) << shift;
325 #define set_gdma_dev(req, dev) do {} while (0)
329 static void clear_lch_regs(int lch)
331 int i = dma_common_ch_start;
333 for (; i <= dma_common_ch_end; i += 1)
334 dma_write(0, i, lch);
337 void omap_set_dma_priority(int lch, int dst_port, int priority)
342 if (cpu_class_is_omap1()) {
344 case OMAP_DMA_PORT_OCP_T1: /* FFFECC00 */
345 reg = OMAP_TC_OCPT1_PRIOR;
347 case OMAP_DMA_PORT_OCP_T2: /* FFFECCD0 */
348 reg = OMAP_TC_OCPT2_PRIOR;
350 case OMAP_DMA_PORT_EMIFF: /* FFFECC08 */
351 reg = OMAP_TC_EMIFF_PRIOR;
353 case OMAP_DMA_PORT_EMIFS: /* FFFECC04 */
354 reg = OMAP_TC_EMIFS_PRIOR;
362 l |= (priority & 0xf) << 8;
366 if (cpu_class_is_omap2()) {
369 ccr = dma_read(CCR, lch);
374 dma_write(ccr, CCR, lch);
377 EXPORT_SYMBOL(omap_set_dma_priority);
379 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
380 int frame_count, int sync_mode,
381 int dma_trigger, int src_or_dst_synch)
385 l = dma_read(CSDP, lch);
388 dma_write(l, CSDP, lch);
390 if (cpu_class_is_omap1()) {
393 ccr = dma_read(CCR, lch);
395 if (sync_mode == OMAP_DMA_SYNC_FRAME)
397 dma_write(ccr, CCR, lch);
399 ccr = dma_read(CCR2, lch);
401 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
403 dma_write(ccr, CCR2, lch);
406 if (cpu_class_is_omap2() && dma_trigger) {
409 val = dma_read(CCR, lch);
411 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
412 val &= ~((1 << 23) | (3 << 19) | 0x1f);
413 val |= (dma_trigger & ~0x1f) << 14;
414 val |= dma_trigger & 0x1f;
416 if (sync_mode & OMAP_DMA_SYNC_FRAME)
421 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
426 if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
427 val &= ~(1 << 24); /* dest synch */
428 val |= (1 << 23); /* Prefetch */
429 } else if (src_or_dst_synch) {
430 val |= 1 << 24; /* source synch */
432 val &= ~(1 << 24); /* dest synch */
434 dma_write(val, CCR, lch);
437 dma_write(elem_count, CEN, lch);
438 dma_write(frame_count, CFN, lch);
440 EXPORT_SYMBOL(omap_set_dma_transfer_params);
442 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
444 BUG_ON(omap_dma_in_1510_mode());
446 if (cpu_class_is_omap1()) {
449 w = dma_read(CCR2, lch);
453 case OMAP_DMA_CONSTANT_FILL:
456 case OMAP_DMA_TRANSPARENT_COPY:
459 case OMAP_DMA_COLOR_DIS:
464 dma_write(w, CCR2, lch);
466 w = dma_read(LCH_CTRL, lch);
468 /* Default is channel type 2D */
470 dma_write(color, COLOR, lch);
471 w |= 1; /* Channel type G */
473 dma_write(w, LCH_CTRL, lch);
476 if (cpu_class_is_omap2()) {
479 val = dma_read(CCR, lch);
480 val &= ~((1 << 17) | (1 << 16));
483 case OMAP_DMA_CONSTANT_FILL:
486 case OMAP_DMA_TRANSPARENT_COPY:
489 case OMAP_DMA_COLOR_DIS:
494 dma_write(val, CCR, lch);
497 dma_write(color, COLOR, lch);
500 EXPORT_SYMBOL(omap_set_dma_color_mode);
502 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
504 if (cpu_class_is_omap2()) {
507 csdp = dma_read(CSDP, lch);
508 csdp &= ~(0x3 << 16);
509 csdp |= (mode << 16);
510 dma_write(csdp, CSDP, lch);
513 EXPORT_SYMBOL(omap_set_dma_write_mode);
515 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
517 if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
520 l = dma_read(LCH_CTRL, lch);
523 dma_write(l, LCH_CTRL, lch);
526 EXPORT_SYMBOL(omap_set_dma_channel_mode);
528 /* Note that src_port is only for omap1 */
529 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
530 unsigned long src_start,
531 int src_ei, int src_fi)
535 if (cpu_class_is_omap1()) {
538 w = dma_read(CSDP, lch);
541 dma_write(w, CSDP, lch);
544 l = dma_read(CCR, lch);
546 l |= src_amode << 12;
547 dma_write(l, CCR, lch);
549 dma_write(src_start, CSSA, lch);
551 dma_write(src_ei, CSEI, lch);
552 dma_write(src_fi, CSFI, lch);
554 EXPORT_SYMBOL(omap_set_dma_src_params);
556 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
558 omap_set_dma_transfer_params(lch, params->data_type,
559 params->elem_count, params->frame_count,
560 params->sync_mode, params->trigger,
561 params->src_or_dst_synch);
562 omap_set_dma_src_params(lch, params->src_port,
563 params->src_amode, params->src_start,
564 params->src_ei, params->src_fi);
566 omap_set_dma_dest_params(lch, params->dst_port,
567 params->dst_amode, params->dst_start,
568 params->dst_ei, params->dst_fi);
569 if (params->read_prio || params->write_prio)
570 omap_dma_set_prio_lch(lch, params->read_prio,
573 EXPORT_SYMBOL(omap_set_dma_params);
575 void omap_set_dma_src_index(int lch, int eidx, int fidx)
577 if (cpu_class_is_omap2())
580 dma_write(eidx, CSEI, lch);
581 dma_write(fidx, CSFI, lch);
583 EXPORT_SYMBOL(omap_set_dma_src_index);
585 void omap_set_dma_src_data_pack(int lch, int enable)
589 l = dma_read(CSDP, lch);
593 dma_write(l, CSDP, lch);
595 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
597 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
599 unsigned int burst = 0;
602 l = dma_read(CSDP, lch);
605 switch (burst_mode) {
606 case OMAP_DMA_DATA_BURST_DIS:
608 case OMAP_DMA_DATA_BURST_4:
609 if (cpu_class_is_omap2())
614 case OMAP_DMA_DATA_BURST_8:
615 if (cpu_class_is_omap2()) {
620 * not supported by current hardware on OMAP1
624 case OMAP_DMA_DATA_BURST_16:
625 if (cpu_class_is_omap2()) {
630 * OMAP1 don't support burst 16
638 dma_write(l, CSDP, lch);
640 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
642 /* Note that dest_port is only for OMAP1 */
643 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
644 unsigned long dest_start,
645 int dst_ei, int dst_fi)
649 if (cpu_class_is_omap1()) {
650 l = dma_read(CSDP, lch);
653 dma_write(l, CSDP, lch);
656 l = dma_read(CCR, lch);
658 l |= dest_amode << 14;
659 dma_write(l, CCR, lch);
661 dma_write(dest_start, CDSA, lch);
663 dma_write(dst_ei, CDEI, lch);
664 dma_write(dst_fi, CDFI, lch);
666 EXPORT_SYMBOL(omap_set_dma_dest_params);
668 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
670 if (cpu_class_is_omap2())
673 dma_write(eidx, CDEI, lch);
674 dma_write(fidx, CDFI, lch);
676 EXPORT_SYMBOL(omap_set_dma_dest_index);
678 void omap_set_dma_dest_data_pack(int lch, int enable)
682 l = dma_read(CSDP, lch);
686 dma_write(l, CSDP, lch);
688 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
690 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
692 unsigned int burst = 0;
695 l = dma_read(CSDP, lch);
698 switch (burst_mode) {
699 case OMAP_DMA_DATA_BURST_DIS:
701 case OMAP_DMA_DATA_BURST_4:
702 if (cpu_class_is_omap2())
707 case OMAP_DMA_DATA_BURST_8:
708 if (cpu_class_is_omap2())
713 case OMAP_DMA_DATA_BURST_16:
714 if (cpu_class_is_omap2()) {
719 * OMAP1 don't support burst 16
723 printk(KERN_ERR "Invalid DMA burst mode\n");
728 dma_write(l, CSDP, lch);
730 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
732 static inline void omap_enable_channel_irq(int lch)
737 if (cpu_class_is_omap1())
738 status = dma_read(CSR, lch);
739 else if (cpu_class_is_omap2())
740 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
742 /* Enable some nice interrupts. */
743 dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
746 static void omap_disable_channel_irq(int lch)
748 if (cpu_class_is_omap2())
749 dma_write(0, CICR, lch);
752 void omap_enable_dma_irq(int lch, u16 bits)
754 dma_chan[lch].enabled_irqs |= bits;
756 EXPORT_SYMBOL(omap_enable_dma_irq);
758 void omap_disable_dma_irq(int lch, u16 bits)
760 dma_chan[lch].enabled_irqs &= ~bits;
762 EXPORT_SYMBOL(omap_disable_dma_irq);
764 static inline void enable_lnk(int lch)
768 l = dma_read(CLNK_CTRL, lch);
770 if (cpu_class_is_omap1())
773 /* Set the ENABLE_LNK bits */
774 if (dma_chan[lch].next_lch != -1)
775 l = dma_chan[lch].next_lch | (1 << 15);
777 #ifndef CONFIG_ARCH_OMAP1
778 if (cpu_class_is_omap2())
779 if (dma_chan[lch].next_linked_ch != -1)
780 l = dma_chan[lch].next_linked_ch | (1 << 15);
783 dma_write(l, CLNK_CTRL, lch);
786 static inline void disable_lnk(int lch)
790 l = dma_read(CLNK_CTRL, lch);
792 /* Disable interrupts */
793 if (cpu_class_is_omap1()) {
794 dma_write(0, CICR, lch);
795 /* Set the STOP_LNK bit */
799 if (cpu_class_is_omap2()) {
800 omap_disable_channel_irq(lch);
801 /* Clear the ENABLE_LNK bit */
805 dma_write(l, CLNK_CTRL, lch);
806 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
809 static inline void omap2_enable_irq_lch(int lch)
814 if (!cpu_class_is_omap2())
817 spin_lock_irqsave(&dma_chan_lock, flags);
818 val = dma_read(IRQENABLE_L0, lch);
820 dma_write(val, IRQENABLE_L0, lch);
821 spin_unlock_irqrestore(&dma_chan_lock, flags);
824 static inline void omap2_disable_irq_lch(int lch)
829 if (!cpu_class_is_omap2())
832 spin_lock_irqsave(&dma_chan_lock, flags);
833 val = dma_read(IRQENABLE_L0, lch);
835 dma_write(val, IRQENABLE_L0, lch);
836 spin_unlock_irqrestore(&dma_chan_lock, flags);
839 int omap_request_dma(int dev_id, const char *dev_name,
840 void (*callback)(int lch, u16 ch_status, void *data),
841 void *data, int *dma_ch_out)
843 int ch, free_ch = -1;
845 struct omap_dma_lch *chan;
847 spin_lock_irqsave(&dma_chan_lock, flags);
848 for (ch = 0; ch < dma_chan_count; ch++) {
849 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
856 spin_unlock_irqrestore(&dma_chan_lock, flags);
859 chan = dma_chan + free_ch;
860 chan->dev_id = dev_id;
862 if (cpu_class_is_omap1())
863 clear_lch_regs(free_ch);
865 if (cpu_class_is_omap2())
866 omap_clear_dma(free_ch);
868 spin_unlock_irqrestore(&dma_chan_lock, flags);
870 chan->dev_name = dev_name;
871 chan->callback = callback;
875 #ifndef CONFIG_ARCH_OMAP1
876 if (cpu_class_is_omap2()) {
878 chan->next_linked_ch = -1;
882 chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
884 if (cpu_class_is_omap1())
885 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
886 else if (cpu_class_is_omap2())
887 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
888 OMAP2_DMA_TRANS_ERR_IRQ;
890 if (cpu_is_omap16xx()) {
891 /* If the sync device is set, configure it dynamically. */
893 set_gdma_dev(free_ch + 1, dev_id);
894 dev_id = free_ch + 1;
897 * Disable the 1510 compatibility mode and set the sync device
900 dma_write(dev_id | (1 << 10), CCR, free_ch);
901 } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
902 dma_write(dev_id, CCR, free_ch);
905 if (cpu_class_is_omap2()) {
906 omap2_enable_irq_lch(free_ch);
907 omap_enable_channel_irq(free_ch);
908 /* Clear the CSR register and IRQ status register */
909 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
910 dma_write(1 << free_ch, IRQSTATUS_L0, 0);
913 *dma_ch_out = free_ch;
917 EXPORT_SYMBOL(omap_request_dma);
919 void omap_free_dma(int lch)
923 if (dma_chan[lch].dev_id == -1) {
924 pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
929 if (cpu_class_is_omap1()) {
930 /* Disable all DMA interrupts for the channel. */
931 dma_write(0, CICR, lch);
932 /* Make sure the DMA transfer is stopped. */
933 dma_write(0, CCR, lch);
936 if (cpu_class_is_omap2()) {
937 omap2_disable_irq_lch(lch);
939 /* Clear the CSR register and IRQ status register */
940 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
941 dma_write(1 << lch, IRQSTATUS_L0, lch);
943 /* Disable all DMA interrupts for the channel. */
944 dma_write(0, CICR, lch);
946 /* Make sure the DMA transfer is stopped. */
947 dma_write(0, CCR, lch);
951 spin_lock_irqsave(&dma_chan_lock, flags);
952 dma_chan[lch].dev_id = -1;
953 dma_chan[lch].next_lch = -1;
954 dma_chan[lch].callback = NULL;
955 spin_unlock_irqrestore(&dma_chan_lock, flags);
957 EXPORT_SYMBOL(omap_free_dma);
960 * @brief omap_dma_set_global_params : Set global priority settings for dma
963 * @param max_fifo_depth
964 * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
965 * DMA_THREAD_RESERVE_ONET
966 * DMA_THREAD_RESERVE_TWOT
967 * DMA_THREAD_RESERVE_THREET
970 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
974 if (!cpu_class_is_omap2()) {
975 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
979 if (max_fifo_depth == 0)
984 reg = 0xff & max_fifo_depth;
985 reg |= (0x3 & tparams) << 12;
986 reg |= (arb_rate & 0xff) << 16;
988 dma_write(reg, GCR, 0);
990 EXPORT_SYMBOL(omap_dma_set_global_params);
993 * @brief omap_dma_set_prio_lch : Set channel wise priority settings
996 * @param read_prio - Read priority
997 * @param write_prio - Write priority
998 * Both of the above can be set with one of the following values :
999 * DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
1002 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
1003 unsigned char write_prio)
1007 if (unlikely((lch < 0 || lch >= dma_lch_count))) {
1008 printk(KERN_ERR "Invalid channel id\n");
1011 l = dma_read(CCR, lch);
1012 l &= ~((1 << 6) | (1 << 26));
1013 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
1014 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
1016 l |= ((read_prio & 0x1) << 6);
1018 dma_write(l, CCR, lch);
1022 EXPORT_SYMBOL(omap_dma_set_prio_lch);
1025 * Clears any DMA state so the DMA engine is ready to restart with new buffers
1026 * through omap_start_dma(). Any buffers in flight are discarded.
1028 void omap_clear_dma(int lch)
1030 unsigned long flags;
1032 local_irq_save(flags);
1034 if (cpu_class_is_omap1()) {
1037 l = dma_read(CCR, lch);
1038 l &= ~OMAP_DMA_CCR_EN;
1039 dma_write(l, CCR, lch);
1041 /* Clear pending interrupts */
1042 l = dma_read(CSR, lch);
1045 if (cpu_class_is_omap2()) {
1046 int i = dma_common_ch_start;
1047 for (; i <= dma_common_ch_end; i += 1)
1048 dma_write(0, i, lch);
1051 local_irq_restore(flags);
1053 EXPORT_SYMBOL(omap_clear_dma);
1055 void omap_start_dma(int lch)
1060 * The CPC/CDAC register needs to be initialized to zero
1061 * before starting dma transfer.
1063 if (cpu_is_omap15xx())
1064 dma_write(0, CPC, lch);
1066 dma_write(0, CDAC, lch);
1068 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1069 int next_lch, cur_lch;
1070 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1072 dma_chan_link_map[lch] = 1;
1073 /* Set the link register of the first channel */
1076 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1077 cur_lch = dma_chan[lch].next_lch;
1079 next_lch = dma_chan[cur_lch].next_lch;
1081 /* The loop case: we've been here already */
1082 if (dma_chan_link_map[cur_lch])
1084 /* Mark the current channel */
1085 dma_chan_link_map[cur_lch] = 1;
1087 enable_lnk(cur_lch);
1088 omap_enable_channel_irq(cur_lch);
1091 } while (next_lch != -1);
1092 } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
1093 dma_write(lch, CLNK_CTRL, lch);
1095 omap_enable_channel_irq(lch);
1097 l = dma_read(CCR, lch);
1099 if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
1100 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1101 l |= OMAP_DMA_CCR_EN;
1103 dma_write(l, CCR, lch);
1105 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1107 EXPORT_SYMBOL(omap_start_dma);
1109 void omap_stop_dma(int lch)
1113 /* Disable all interrupts on the channel */
1114 if (cpu_class_is_omap1())
1115 dma_write(0, CICR, lch);
1117 l = dma_read(CCR, lch);
1118 if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
1119 (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
1123 /* Configure No-Standby */
1124 l = dma_read(OCP_SYSCONFIG, lch);
1126 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
1127 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
1128 dma_write(l , OCP_SYSCONFIG, 0);
1130 l = dma_read(CCR, lch);
1131 l &= ~OMAP_DMA_CCR_EN;
1132 dma_write(l, CCR, lch);
1134 /* Wait for sDMA FIFO drain */
1135 l = dma_read(CCR, lch);
1136 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
1137 OMAP_DMA_CCR_WR_ACTIVE))) {
1140 l = dma_read(CCR, lch);
1143 printk(KERN_ERR "DMA drain did not complete on "
1145 /* Restore OCP_SYSCONFIG */
1146 dma_write(sys_cf, OCP_SYSCONFIG, lch);
1148 l &= ~OMAP_DMA_CCR_EN;
1149 dma_write(l, CCR, lch);
1152 if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1153 int next_lch, cur_lch = lch;
1154 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1156 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1158 /* The loop case: we've been here already */
1159 if (dma_chan_link_map[cur_lch])
1161 /* Mark the current channel */
1162 dma_chan_link_map[cur_lch] = 1;
1164 disable_lnk(cur_lch);
1166 next_lch = dma_chan[cur_lch].next_lch;
1168 } while (next_lch != -1);
1171 dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1173 EXPORT_SYMBOL(omap_stop_dma);
1176 * Allows changing the DMA callback function or data. This may be needed if
1177 * the driver shares a single DMA channel for multiple dma triggers.
1179 int omap_set_dma_callback(int lch,
1180 void (*callback)(int lch, u16 ch_status, void *data),
1183 unsigned long flags;
1188 spin_lock_irqsave(&dma_chan_lock, flags);
1189 if (dma_chan[lch].dev_id == -1) {
1190 printk(KERN_ERR "DMA callback for not set for free channel\n");
1191 spin_unlock_irqrestore(&dma_chan_lock, flags);
1194 dma_chan[lch].callback = callback;
1195 dma_chan[lch].data = data;
1196 spin_unlock_irqrestore(&dma_chan_lock, flags);
1200 EXPORT_SYMBOL(omap_set_dma_callback);
1203 * Returns current physical source address for the given DMA channel.
1204 * If the channel is running the caller must disable interrupts prior calling
1205 * this function and process the returned value before re-enabling interrupt to
1206 * prevent races with the interrupt handler. Note that in continuous mode there
1207 * is a chance for CSSA_L register overflow inbetween the two reads resulting
1208 * in incorrect return value.
1210 dma_addr_t omap_get_dma_src_pos(int lch)
1212 dma_addr_t offset = 0;
1214 if (cpu_is_omap15xx())
1215 offset = dma_read(CPC, lch);
1217 offset = dma_read(CSAC, lch);
1219 if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1220 offset = dma_read(CSAC, lch);
1222 if (cpu_class_is_omap1())
1223 offset |= (dma_read(CSSA, lch) & 0xFFFF0000);
1227 EXPORT_SYMBOL(omap_get_dma_src_pos);
1230 * Returns current physical destination address for the given DMA channel.
1231 * If the channel is running the caller must disable interrupts prior calling
1232 * this function and process the returned value before re-enabling interrupt to
1233 * prevent races with the interrupt handler. Note that in continuous mode there
1234 * is a chance for CDSA_L register overflow inbetween the two reads resulting
1235 * in incorrect return value.
1237 dma_addr_t omap_get_dma_dst_pos(int lch)
1239 dma_addr_t offset = 0;
1241 if (cpu_is_omap15xx())
1242 offset = dma_read(CPC, lch);
1244 offset = dma_read(CDAC, lch);
1247 * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1248 * read before the DMA controller finished disabling the channel.
1250 if (!cpu_is_omap15xx() && offset == 0)
1251 offset = dma_read(CDAC, lch);
1253 if (cpu_class_is_omap1())
1254 offset |= (dma_read(CDSA, lch) & 0xFFFF0000);
1258 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1260 int omap_get_dma_active_status(int lch)
1262 return (dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1264 EXPORT_SYMBOL(omap_get_dma_active_status);
1266 int omap_dma_running(void)
1270 if (cpu_class_is_omap1())
1271 if (omap_lcd_dma_running())
1274 for (lch = 0; lch < dma_chan_count; lch++)
1275 if (dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1282 * lch_queue DMA will start right after lch_head one is finished.
1283 * For this DMA link to start, you still need to start (see omap_start_dma)
1284 * the first one. That will fire up the entire queue.
1286 void omap_dma_link_lch(int lch_head, int lch_queue)
1288 if (omap_dma_in_1510_mode()) {
1289 if (lch_head == lch_queue) {
1290 dma_write(dma_read(CCR, lch_head) | (3 << 8),
1294 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1299 if ((dma_chan[lch_head].dev_id == -1) ||
1300 (dma_chan[lch_queue].dev_id == -1)) {
1301 printk(KERN_ERR "omap_dma: trying to link "
1302 "non requested channels\n");
1306 dma_chan[lch_head].next_lch = lch_queue;
1308 EXPORT_SYMBOL(omap_dma_link_lch);
1311 * Once the DMA queue is stopped, we can destroy it.
1313 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1315 if (omap_dma_in_1510_mode()) {
1316 if (lch_head == lch_queue) {
1317 dma_write(dma_read(CCR, lch_head) & ~(3 << 8),
1321 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1326 if (dma_chan[lch_head].next_lch != lch_queue ||
1327 dma_chan[lch_head].next_lch == -1) {
1328 printk(KERN_ERR "omap_dma: trying to unlink "
1329 "non linked channels\n");
1333 if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1334 (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1335 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1336 "before unlinking\n");
1340 dma_chan[lch_head].next_lch = -1;
1342 EXPORT_SYMBOL(omap_dma_unlink_lch);
1344 /*----------------------------------------------------------------------------*/
1346 #ifndef CONFIG_ARCH_OMAP1
1347 /* Create chain of DMA channesls */
1348 static void create_dma_lch_chain(int lch_head, int lch_queue)
1352 /* Check if this is the first link in chain */
1353 if (dma_chan[lch_head].next_linked_ch == -1) {
1354 dma_chan[lch_head].next_linked_ch = lch_queue;
1355 dma_chan[lch_head].prev_linked_ch = lch_queue;
1356 dma_chan[lch_queue].next_linked_ch = lch_head;
1357 dma_chan[lch_queue].prev_linked_ch = lch_head;
1360 /* a link exists, link the new channel in circular chain */
1362 dma_chan[lch_queue].next_linked_ch =
1363 dma_chan[lch_head].next_linked_ch;
1364 dma_chan[lch_queue].prev_linked_ch = lch_head;
1365 dma_chan[lch_head].next_linked_ch = lch_queue;
1366 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1370 l = dma_read(CLNK_CTRL, lch_head);
1373 dma_write(l, CLNK_CTRL, lch_head);
1375 l = dma_read(CLNK_CTRL, lch_queue);
1377 l |= (dma_chan[lch_queue].next_linked_ch);
1378 dma_write(l, CLNK_CTRL, lch_queue);
1382 * @brief omap_request_dma_chain : Request a chain of DMA channels
1384 * @param dev_id - Device id using the dma channel
1385 * @param dev_name - Device name
1386 * @param callback - Call back function
1388 * @no_of_chans - Number of channels requested
1389 * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1390 * OMAP_DMA_DYNAMIC_CHAIN
1391 * @params - Channel parameters
1393 * @return - Success : 0
1394 * Failure: -EINVAL/-ENOMEM
1396 int omap_request_dma_chain(int dev_id, const char *dev_name,
1397 void (*callback) (int lch, u16 ch_status,
1399 int *chain_id, int no_of_chans, int chain_mode,
1400 struct omap_dma_channel_params params)
1405 /* Is the chain mode valid ? */
1406 if (chain_mode != OMAP_DMA_STATIC_CHAIN
1407 && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1408 printk(KERN_ERR "Invalid chain mode requested\n");
1412 if (unlikely((no_of_chans < 1
1413 || no_of_chans > dma_lch_count))) {
1414 printk(KERN_ERR "Invalid Number of channels requested\n");
1419 * Allocate a queue to maintain the status of the channels
1422 channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1423 if (channels == NULL) {
1424 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1428 /* request and reserve DMA channels for the chain */
1429 for (i = 0; i < no_of_chans; i++) {
1430 err = omap_request_dma(dev_id, dev_name,
1431 callback, NULL, &channels[i]);
1434 for (j = 0; j < i; j++)
1435 omap_free_dma(channels[j]);
1437 printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1440 dma_chan[channels[i]].prev_linked_ch = -1;
1441 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1444 * Allowing client drivers to set common parameters now,
1445 * so that later only relevant (src_start, dest_start
1446 * and element count) can be set
1448 omap_set_dma_params(channels[i], ¶ms);
1451 *chain_id = channels[0];
1452 dma_linked_lch[*chain_id].linked_dmach_q = channels;
1453 dma_linked_lch[*chain_id].chain_mode = chain_mode;
1454 dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1455 dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1457 for (i = 0; i < no_of_chans; i++)
1458 dma_chan[channels[i]].chain_id = *chain_id;
1460 /* Reset the Queue pointers */
1461 OMAP_DMA_CHAIN_QINIT(*chain_id);
1463 /* Set up the chain */
1464 if (no_of_chans == 1)
1465 create_dma_lch_chain(channels[0], channels[0]);
1467 for (i = 0; i < (no_of_chans - 1); i++)
1468 create_dma_lch_chain(channels[i], channels[i + 1]);
1473 EXPORT_SYMBOL(omap_request_dma_chain);
1476 * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1477 * params after setting it. Dont do this while dma is running!!
1479 * @param chain_id - Chained logical channel id.
1482 * @return - Success : 0
1485 int omap_modify_dma_chain_params(int chain_id,
1486 struct omap_dma_channel_params params)
1491 /* Check for input params */
1492 if (unlikely((chain_id < 0
1493 || chain_id >= dma_lch_count))) {
1494 printk(KERN_ERR "Invalid chain id\n");
1498 /* Check if the chain exists */
1499 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1500 printk(KERN_ERR "Chain doesn't exists\n");
1503 channels = dma_linked_lch[chain_id].linked_dmach_q;
1505 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1507 * Allowing client drivers to set common parameters now,
1508 * so that later only relevant (src_start, dest_start
1509 * and element count) can be set
1511 omap_set_dma_params(channels[i], ¶ms);
1516 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1519 * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1523 * @return - Success : 0
1526 int omap_free_dma_chain(int chain_id)
1531 /* Check for input params */
1532 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1533 printk(KERN_ERR "Invalid chain id\n");
1537 /* Check if the chain exists */
1538 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1539 printk(KERN_ERR "Chain doesn't exists\n");
1543 channels = dma_linked_lch[chain_id].linked_dmach_q;
1544 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1545 dma_chan[channels[i]].next_linked_ch = -1;
1546 dma_chan[channels[i]].prev_linked_ch = -1;
1547 dma_chan[channels[i]].chain_id = -1;
1548 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1549 omap_free_dma(channels[i]);
1554 dma_linked_lch[chain_id].linked_dmach_q = NULL;
1555 dma_linked_lch[chain_id].chain_mode = -1;
1556 dma_linked_lch[chain_id].chain_state = -1;
1560 EXPORT_SYMBOL(omap_free_dma_chain);
1563 * @brief omap_dma_chain_status - Check if the chain is in
1564 * active / inactive state.
1567 * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1570 int omap_dma_chain_status(int chain_id)
1572 /* Check for input params */
1573 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1574 printk(KERN_ERR "Invalid chain id\n");
1578 /* Check if the chain exists */
1579 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1580 printk(KERN_ERR "Chain doesn't exists\n");
1583 pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1584 dma_linked_lch[chain_id].q_count);
1586 if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1587 return OMAP_DMA_CHAIN_INACTIVE;
1589 return OMAP_DMA_CHAIN_ACTIVE;
1591 EXPORT_SYMBOL(omap_dma_chain_status);
1594 * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1595 * set the params and start the transfer.
1598 * @param src_start - buffer start address
1599 * @param dest_start - Dest address
1601 * @param frame_count
1602 * @param callbk_data - channel callback parameter data.
1604 * @return - Success : 0
1605 * Failure: -EINVAL/-EBUSY
1607 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1608 int elem_count, int frame_count, void *callbk_data)
1615 * if buffer size is less than 1 then there is
1616 * no use of starting the chain
1618 if (elem_count < 1) {
1619 printk(KERN_ERR "Invalid buffer size\n");
1623 /* Check for input params */
1624 if (unlikely((chain_id < 0
1625 || chain_id >= dma_lch_count))) {
1626 printk(KERN_ERR "Invalid chain id\n");
1630 /* Check if the chain exists */
1631 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1632 printk(KERN_ERR "Chain doesn't exist\n");
1636 /* Check if all the channels in chain are in use */
1637 if (OMAP_DMA_CHAIN_QFULL(chain_id))
1640 /* Frame count may be negative in case of indexed transfers */
1641 channels = dma_linked_lch[chain_id].linked_dmach_q;
1643 /* Get a free channel */
1644 lch = channels[dma_linked_lch[chain_id].q_tail];
1646 /* Store the callback data */
1647 dma_chan[lch].data = callbk_data;
1649 /* Increment the q_tail */
1650 OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1652 /* Set the params to the free channel */
1654 dma_write(src_start, CSSA, lch);
1655 if (dest_start != 0)
1656 dma_write(dest_start, CDSA, lch);
1658 /* Write the buffer size */
1659 dma_write(elem_count, CEN, lch);
1660 dma_write(frame_count, CFN, lch);
1663 * If the chain is dynamically linked,
1664 * then we may have to start the chain if its not active
1666 if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1669 * In Dynamic chain, if the chain is not started,
1672 if (dma_linked_lch[chain_id].chain_state ==
1673 DMA_CHAIN_NOTSTARTED) {
1674 /* Enable the link in previous channel */
1675 if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1677 enable_lnk(dma_chan[lch].prev_linked_ch);
1678 dma_chan[lch].state = DMA_CH_QUEUED;
1682 * Chain is already started, make sure its active,
1683 * if not then start the chain
1688 if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1690 enable_lnk(dma_chan[lch].prev_linked_ch);
1691 dma_chan[lch].state = DMA_CH_QUEUED;
1693 if (0 == ((1 << 7) & dma_read(
1694 CCR, dma_chan[lch].prev_linked_ch))) {
1695 disable_lnk(dma_chan[lch].
1697 pr_debug("\n prev ch is stopped\n");
1702 else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1704 enable_lnk(dma_chan[lch].prev_linked_ch);
1705 dma_chan[lch].state = DMA_CH_QUEUED;
1708 omap_enable_channel_irq(lch);
1710 l = dma_read(CCR, lch);
1712 if ((0 == (l & (1 << 24))))
1716 if (start_dma == 1) {
1717 if (0 == (l & (1 << 7))) {
1719 dma_chan[lch].state = DMA_CH_STARTED;
1720 pr_debug("starting %d\n", lch);
1721 dma_write(l, CCR, lch);
1725 if (0 == (l & (1 << 7)))
1726 dma_write(l, CCR, lch);
1728 dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1734 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1737 * @brief omap_start_dma_chain_transfers - Start the chain
1741 * @return - Success : 0
1742 * Failure : -EINVAL/-EBUSY
1744 int omap_start_dma_chain_transfers(int chain_id)
1749 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1750 printk(KERN_ERR "Invalid chain id\n");
1754 channels = dma_linked_lch[chain_id].linked_dmach_q;
1756 if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1757 printk(KERN_ERR "Chain is already started\n");
1761 if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1762 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1764 enable_lnk(channels[i]);
1765 omap_enable_channel_irq(channels[i]);
1768 omap_enable_channel_irq(channels[0]);
1771 l = dma_read(CCR, channels[0]);
1773 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1774 dma_chan[channels[0]].state = DMA_CH_STARTED;
1776 if ((0 == (l & (1 << 24))))
1780 dma_write(l, CCR, channels[0]);
1782 dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1786 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1789 * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1793 * @return - Success : 0
1796 int omap_stop_dma_chain_transfers(int chain_id)
1802 /* Check for input params */
1803 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1804 printk(KERN_ERR "Invalid chain id\n");
1808 /* Check if the chain exists */
1809 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1810 printk(KERN_ERR "Chain doesn't exists\n");
1813 channels = dma_linked_lch[chain_id].linked_dmach_q;
1815 if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1816 sys_cf = dma_read(OCP_SYSCONFIG, 0);
1818 /* Middle mode reg set no Standby */
1819 l &= ~((1 << 12)|(1 << 13));
1820 dma_write(l, OCP_SYSCONFIG, 0);
1823 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1825 /* Stop the Channel transmission */
1826 l = dma_read(CCR, channels[i]);
1828 dma_write(l, CCR, channels[i]);
1830 /* Disable the link in all the channels */
1831 disable_lnk(channels[i]);
1832 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1835 dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1837 /* Reset the Queue pointers */
1838 OMAP_DMA_CHAIN_QINIT(chain_id);
1840 if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1841 dma_write(sys_cf, OCP_SYSCONFIG, 0);
1845 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1847 /* Get the index of the ongoing DMA in chain */
1849 * @brief omap_get_dma_chain_index - Get the element and frame index
1850 * of the ongoing DMA in chain
1853 * @param ei - Element index
1854 * @param fi - Frame index
1856 * @return - Success : 0
1859 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1864 /* Check for input params */
1865 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1866 printk(KERN_ERR "Invalid chain id\n");
1870 /* Check if the chain exists */
1871 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1872 printk(KERN_ERR "Chain doesn't exists\n");
1878 channels = dma_linked_lch[chain_id].linked_dmach_q;
1880 /* Get the current channel */
1881 lch = channels[dma_linked_lch[chain_id].q_head];
1883 *ei = dma_read(CCEN, lch);
1884 *fi = dma_read(CCFN, lch);
1888 EXPORT_SYMBOL(omap_get_dma_chain_index);
1891 * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1892 * ongoing DMA in chain
1896 * @return - Success : Destination position
1899 int omap_get_dma_chain_dst_pos(int chain_id)
1904 /* Check for input params */
1905 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1906 printk(KERN_ERR "Invalid chain id\n");
1910 /* Check if the chain exists */
1911 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1912 printk(KERN_ERR "Chain doesn't exists\n");
1916 channels = dma_linked_lch[chain_id].linked_dmach_q;
1918 /* Get the current channel */
1919 lch = channels[dma_linked_lch[chain_id].q_head];
1921 return dma_read(CDAC, lch);
1923 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1926 * @brief omap_get_dma_chain_src_pos - Get the source position
1927 * of the ongoing DMA in chain
1930 * @return - Success : Destination position
1933 int omap_get_dma_chain_src_pos(int chain_id)
1938 /* Check for input params */
1939 if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1940 printk(KERN_ERR "Invalid chain id\n");
1944 /* Check if the chain exists */
1945 if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1946 printk(KERN_ERR "Chain doesn't exists\n");
1950 channels = dma_linked_lch[chain_id].linked_dmach_q;
1952 /* Get the current channel */
1953 lch = channels[dma_linked_lch[chain_id].q_head];
1955 return dma_read(CSAC, lch);
1957 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1958 #endif /* ifndef CONFIG_ARCH_OMAP1 */
1960 /*----------------------------------------------------------------------------*/
1962 #ifdef CONFIG_ARCH_OMAP1
1964 static int omap1_dma_handle_ch(int ch)
1968 if (enable_1510_mode && ch >= 6) {
1969 csr = dma_chan[ch].saved_csr;
1970 dma_chan[ch].saved_csr = 0;
1972 csr = dma_read(CSR, ch);
1973 if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1974 dma_chan[ch + 6].saved_csr = csr >> 7;
1977 if ((csr & 0x3f) == 0)
1979 if (unlikely(dma_chan[ch].dev_id == -1)) {
1980 printk(KERN_WARNING "Spurious interrupt from DMA channel "
1981 "%d (CSR %04x)\n", ch, csr);
1984 if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1985 printk(KERN_WARNING "DMA timeout with device %d\n",
1986 dma_chan[ch].dev_id);
1987 if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1988 printk(KERN_WARNING "DMA synchronization event drop occurred "
1989 "with device %d\n", dma_chan[ch].dev_id);
1990 if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1991 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1992 if (likely(dma_chan[ch].callback != NULL))
1993 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1998 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
2000 int ch = ((int) dev_id) - 1;
2004 int handled_now = 0;
2006 handled_now += omap1_dma_handle_ch(ch);
2007 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
2008 handled_now += omap1_dma_handle_ch(ch + 6);
2011 handled += handled_now;
2014 return handled ? IRQ_HANDLED : IRQ_NONE;
2018 #define omap1_dma_irq_handler NULL
2021 #ifdef CONFIG_ARCH_OMAP2PLUS
2023 static int omap2_dma_handle_ch(int ch)
2025 u32 status = dma_read(CSR, ch);
2028 if (printk_ratelimit())
2029 printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
2031 dma_write(1 << ch, IRQSTATUS_L0, ch);
2034 if (unlikely(dma_chan[ch].dev_id == -1)) {
2035 if (printk_ratelimit())
2036 printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
2037 "channel %d\n", status, ch);
2040 if (unlikely(status & OMAP_DMA_DROP_IRQ))
2042 "DMA synchronization event drop occurred with device "
2043 "%d\n", dma_chan[ch].dev_id);
2044 if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
2045 printk(KERN_INFO "DMA transaction error with device %d\n",
2046 dma_chan[ch].dev_id);
2047 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
2050 ccr = dma_read(CCR, ch);
2051 ccr &= ~OMAP_DMA_CCR_EN;
2052 dma_write(ccr, CCR, ch);
2053 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
2056 if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
2057 printk(KERN_INFO "DMA secure error with device %d\n",
2058 dma_chan[ch].dev_id);
2059 if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
2060 printk(KERN_INFO "DMA misaligned error with device %d\n",
2061 dma_chan[ch].dev_id);
2063 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
2064 dma_write(1 << ch, IRQSTATUS_L0, ch);
2065 /* read back the register to flush the write */
2066 dma_read(IRQSTATUS_L0, ch);
2068 /* If the ch is not chained then chain_id will be -1 */
2069 if (dma_chan[ch].chain_id != -1) {
2070 int chain_id = dma_chan[ch].chain_id;
2071 dma_chan[ch].state = DMA_CH_NOTSTARTED;
2072 if (dma_read(CLNK_CTRL, ch) & (1 << 15))
2073 dma_chan[dma_chan[ch].next_linked_ch].state =
2075 if (dma_linked_lch[chain_id].chain_mode ==
2076 OMAP_DMA_DYNAMIC_CHAIN)
2079 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
2080 OMAP_DMA_CHAIN_INCQHEAD(chain_id);
2082 status = dma_read(CSR, ch);
2085 dma_write(status, CSR, ch);
2087 if (likely(dma_chan[ch].callback != NULL))
2088 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
2093 /* STATUS register count is from 1-32 while our is 0-31 */
2094 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
2096 u32 val, enable_reg;
2099 val = dma_read(IRQSTATUS_L0, 0);
2101 if (printk_ratelimit())
2102 printk(KERN_WARNING "Spurious DMA IRQ\n");
2105 enable_reg = dma_read(IRQENABLE_L0, 0);
2106 val &= enable_reg; /* Dispatch only relevant interrupts */
2107 for (i = 0; i < dma_lch_count && val != 0; i++) {
2109 omap2_dma_handle_ch(i);
2116 static struct irqaction omap24xx_dma_irq = {
2118 .handler = omap2_dma_irq_handler,
2119 .flags = IRQF_DISABLED
2123 static struct irqaction omap24xx_dma_irq;
2126 /*----------------------------------------------------------------------------*/
2128 void omap_dma_global_context_save(void)
2130 omap_dma_global_context.dma_irqenable_l0 =
2131 dma_read(IRQENABLE_L0, 0);
2132 omap_dma_global_context.dma_ocp_sysconfig =
2133 dma_read(OCP_SYSCONFIG, 0);
2134 omap_dma_global_context.dma_gcr = dma_read(GCR, 0);
2137 void omap_dma_global_context_restore(void)
2141 dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
2142 dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2144 dma_write(omap_dma_global_context.dma_irqenable_l0,
2147 if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
2148 dma_write(0x3 , IRQSTATUS_L0, 0);
2150 for (ch = 0; ch < dma_chan_count; ch++)
2151 if (dma_chan[ch].dev_id != -1)
2155 static void configure_dma_errata(void)
2159 * Errata applicable for OMAP2430ES1.0 and all omap2420
2162 * Erratum ID: Not Available
2163 * Inter Frame DMA buffering issue DMA will wrongly
2164 * buffer elements if packing and bursting is enabled. This might
2165 * result in data gets stalled in FIFO at the end of the block.
2166 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
2167 * guarantee no data will stay in the DMA FIFO in case inter frame
2171 * Erratum ID: Not Available
2172 * DMA may hang when several channels are used in parallel
2173 * In the following configuration, DMA channel hanging can occur:
2174 * a. Channel i, hardware synchronized, is enabled
2175 * b. Another channel (Channel x), software synchronized, is enabled.
2176 * c. Channel i is disabled before end of transfer
2177 * d. Channel i is reenabled.
2178 * e. Steps 1 to 4 are repeated a certain number of times.
2179 * f. A third channel (Channel y), software synchronized, is enabled.
2180 * Channel x and Channel y may hang immediately after step 'f'.
2182 * For any channel used - make sure NextLCH_ID is set to the value j.
2184 if (cpu_is_omap2420() || (cpu_is_omap2430() &&
2185 (omap_type() == OMAP2430_REV_ES1_0))) {
2186 SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
2187 SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
2191 * Erratum ID: i378: OMAP2plus: sDMA Channel is not disabled
2192 * after a transaction error.
2193 * Workaround: SW should explicitely disable the channel.
2195 if (cpu_class_is_omap2())
2196 SET_DMA_ERRATA(DMA_ERRATA_i378);
2199 * Erratum ID: i541: sDMA FIFO draining does not finish
2200 * If sDMA channel is disabled on the fly, sDMA enters standby even
2201 * through FIFO Drain is still in progress
2202 * Workaround: Put sDMA in NoStandby more before a logical channel is
2203 * disabled, then put it back to SmartStandby right after the channel
2204 * finishes FIFO draining.
2206 if (cpu_is_omap34xx())
2207 SET_DMA_ERRATA(DMA_ERRATA_i541);
2210 * Erratum ID: i88 : Special programming model needed to disable DMA
2211 * before end of block.
2212 * Workaround: software must ensure that the DMA is configured in No
2213 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
2215 if (cpu_is_omap34xx() && (omap_type() == OMAP3430_REV_ES1_0))
2216 SET_DMA_ERRATA(DMA_ERRATA_i88);
2219 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
2220 * read before the DMA controller finished disabling the channel.
2222 if (!cpu_is_omap15xx())
2223 SET_DMA_ERRATA(DMA_ERRATA_3_3);
2226 * Erratum ID: Not Available
2227 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
2228 * after secure sram context save and restore.
2229 * Work around: Hence we need to manually clear those IRQs to avoid
2230 * spurious interrupts. This affects only secure devices.
2232 if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2233 SET_DMA_ERRATA(DMA_ROMCODE_BUG);
2236 /*----------------------------------------------------------------------------*/
2238 static int __init omap_init_dma(void)
2243 if (cpu_class_is_omap1()) {
2244 base = OMAP1_DMA_BASE;
2245 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2246 } else if (cpu_is_omap24xx()) {
2247 base = OMAP24XX_DMA4_BASE;
2248 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2249 } else if (cpu_is_omap34xx()) {
2250 base = OMAP34XX_DMA4_BASE;
2251 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2252 } else if (cpu_is_omap44xx()) {
2253 base = OMAP44XX_DMA4_BASE;
2254 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2256 pr_err("DMA init failed for unsupported omap\n");
2260 omap_dma_base = ioremap(base, SZ_4K);
2261 BUG_ON(!omap_dma_base);
2263 if (cpu_class_is_omap1()) {
2265 reg_map = reg_map_omap1;
2266 dma_common_ch_start = CPC;
2267 dma_common_ch_end = COLOR;
2270 reg_map = reg_map_omap2;
2271 dma_common_ch_start = CSDP;
2272 if (cpu_is_omap3630() || cpu_is_omap4430())
2273 dma_common_ch_end = CCDN;
2275 dma_common_ch_end = CCFN;
2278 if (cpu_class_is_omap2() && omap_dma_reserve_channels
2279 && (omap_dma_reserve_channels <= dma_lch_count))
2280 dma_lch_count = omap_dma_reserve_channels;
2282 dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
2289 if (cpu_class_is_omap2()) {
2290 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2291 dma_lch_count, GFP_KERNEL);
2292 if (!dma_linked_lch) {
2298 if (cpu_is_omap15xx()) {
2299 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2301 enable_1510_mode = 1;
2302 } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2303 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2304 dma_read(HW_ID, 0));
2305 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2306 dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
2307 dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
2308 dma_read(CAPS_4, 0));
2309 if (!enable_1510_mode) {
2312 /* Disable OMAP 3.0/3.1 compatibility mode. */
2313 w = dma_read(GSCR, 0);
2315 dma_write(w, GSCR, 0);
2316 dma_chan_count = 16;
2319 } else if (cpu_class_is_omap2()) {
2320 u8 revision = dma_read(REVISION, 0) & 0xff;
2321 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2322 revision >> 4, revision & 0xf);
2323 dma_chan_count = dma_lch_count;
2329 spin_lock_init(&dma_chan_lock);
2331 for (ch = 0; ch < dma_chan_count; ch++) {
2333 if (cpu_class_is_omap2())
2334 omap2_disable_irq_lch(ch);
2336 dma_chan[ch].dev_id = -1;
2337 dma_chan[ch].next_lch = -1;
2339 if (ch >= 6 && enable_1510_mode)
2342 if (cpu_class_is_omap1()) {
2344 * request_irq() doesn't like dev_id (ie. ch) being
2345 * zero, so we have to kludge around this.
2347 r = request_irq(omap1_dma_irq[ch],
2348 omap1_dma_irq_handler, 0, "DMA",
2353 printk(KERN_ERR "unable to request IRQ %d "
2354 "for DMA (error %d)\n",
2355 omap1_dma_irq[ch], r);
2356 for (i = 0; i < ch; i++)
2357 free_irq(omap1_dma_irq[i],
2364 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
2365 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2366 DMA_DEFAULT_FIFO_DEPTH, 0);
2368 if (cpu_class_is_omap2()) {
2370 if (cpu_is_omap44xx())
2371 irq = OMAP44XX_IRQ_SDMA_0;
2373 irq = INT_24XX_SDMA_IRQ0;
2374 setup_irq(irq, &omap24xx_dma_irq);
2377 if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
2378 /* Enable smartidle idlemodes and autoidle */
2379 u32 v = dma_read(OCP_SYSCONFIG, 0);
2380 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2381 DMA_SYSCONFIG_SIDLEMODE_MASK |
2382 DMA_SYSCONFIG_AUTOIDLE);
2383 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2384 DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2385 DMA_SYSCONFIG_AUTOIDLE);
2386 dma_write(v , OCP_SYSCONFIG, 0);
2387 /* reserve dma channels 0 and 1 in high security devices */
2388 if (cpu_is_omap34xx() &&
2389 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2390 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2392 dma_chan[0].dev_id = 0;
2393 dma_chan[1].dev_id = 1;
2396 configure_dma_errata();
2404 iounmap(omap_dma_base);
2409 arch_initcall(omap_init_dma);
2412 * Reserve the omap SDMA channels using cmdline bootarg
2413 * "omap_dma_reserve_ch=". The valid range is 1 to 32
2415 static int __init omap_dma_cmdline_reserve_ch(char *str)
2417 if (get_option(&str, &omap_dma_reserve_channels) != 1)
2418 omap_dma_reserve_channels = 0;
2422 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);