]> Pileus Git - ~andy/linux/blob - arch/arm/plat-omap/dma.c
OMAP: DMA: Replace read/write macros with functions
[~andy/linux] / arch / arm / plat-omap / dma.c
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Copyright (C) 2009 Texas Instruments
14  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15  *
16  * Support functions for the OMAP internal DMA channels.
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License version 2 as
20  * published by the Free Software Foundation.
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/io.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34
35 #include <asm/system.h>
36 #include <mach/hardware.h>
37 #include <plat/dma.h>
38
39 #include <plat/tc.h>
40
41 #undef DEBUG
42
43 static u16 reg_map_omap1[] = {
44         [GCR]           = 0x400,
45         [GSCR]          = 0x404,
46         [GRST1]         = 0x408,
47         [HW_ID]         = 0x442,
48         [PCH2_ID]       = 0x444,
49         [PCH0_ID]       = 0x446,
50         [PCH1_ID]       = 0x448,
51         [PCHG_ID]       = 0x44a,
52         [PCHD_ID]       = 0x44c,
53         [CAPS_0]        = 0x44e,
54         [CAPS_1]        = 0x452,
55         [CAPS_2]        = 0x456,
56         [CAPS_3]        = 0x458,
57         [CAPS_4]        = 0x45a,
58         [PCH2_SR]       = 0x460,
59         [PCH0_SR]       = 0x480,
60         [PCH1_SR]       = 0x482,
61         [PCHD_SR]       = 0x4c0,
62
63         /* Common Registers */
64         [CSDP]          = 0x00,
65         [CCR]           = 0x02,
66         [CICR]          = 0x04,
67         [CSR]           = 0x06,
68         [CEN]           = 0x10,
69         [CFN]           = 0x12,
70         [CSFI]          = 0x14,
71         [CSEI]          = 0x16,
72         [CPC]           = 0x18, /* 15xx only */
73         [CSAC]          = 0x18,
74         [CDAC]          = 0x1a,
75         [CDEI]          = 0x1c,
76         [CDFI]          = 0x1e,
77         [CLNK_CTRL]     = 0x28,
78
79         /* Channel specific register offsets */
80         [CSSA]          = 0x08,
81         [CDSA]          = 0x0c,
82         [COLOR]         = 0x20,
83         [CCR2]          = 0x24,
84         [LCH_CTRL]      = 0x2a,
85 };
86
87 static u16 reg_map_omap2[] = {
88         [REVISION]              = 0x00,
89         [GCR]                   = 0x78,
90         [IRQSTATUS_L0]          = 0x08,
91         [IRQSTATUS_L1]          = 0x0c,
92         [IRQSTATUS_L2]          = 0x10,
93         [IRQSTATUS_L3]          = 0x14,
94         [IRQENABLE_L0]          = 0x18,
95         [IRQENABLE_L1]          = 0x1c,
96         [IRQENABLE_L2]          = 0x20,
97         [IRQENABLE_L3]          = 0x24,
98         [SYSSTATUS]             = 0x28,
99         [OCP_SYSCONFIG]         = 0x2c,
100         [CAPS_0]                = 0x64,
101         [CAPS_2]                = 0x6c,
102         [CAPS_3]                = 0x70,
103         [CAPS_4]                = 0x74,
104
105         /* Common register offsets */
106         [CCR]                   = 0x80,
107         [CLNK_CTRL]             = 0x84,
108         [CICR]                  = 0x88,
109         [CSR]                   = 0x8c,
110         [CSDP]                  = 0x90,
111         [CEN]                   = 0x94,
112         [CFN]                   = 0x98,
113         [CSEI]                  = 0xa4,
114         [CSFI]                  = 0xa8,
115         [CDEI]                  = 0xac,
116         [CDFI]                  = 0xb0,
117         [CSAC]                  = 0xb4,
118         [CDAC]                  = 0xb8,
119
120         /* Channel specific register offsets */
121         [CSSA]                  = 0x9c,
122         [CDSA]                  = 0xa0,
123         [CCEN]                  = 0xbc,
124         [CCFN]                  = 0xc0,
125         [COLOR]                 = 0xc4,
126
127         /* OMAP4 specific registers */
128         [CDP]                   = 0xd0,
129         [CNDP]                  = 0xd4,
130         [CCDN]                  = 0xd8,
131 };
132
133 #ifndef CONFIG_ARCH_OMAP1
134 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
135         DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
136 };
137
138 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
139 #endif
140
141 #define OMAP_DMA_ACTIVE                 0x01
142 #define OMAP2_DMA_CSR_CLEAR_MASK        0xffe
143
144 #define OMAP_FUNC_MUX_ARM_BASE          (0xfffe1000 + 0xec)
145
146 static int enable_1510_mode;
147
148 static struct omap_dma_global_context_registers {
149         u32 dma_irqenable_l0;
150         u32 dma_ocp_sysconfig;
151         u32 dma_gcr;
152 } omap_dma_global_context;
153
154 struct omap_dma_lch {
155         int next_lch;
156         int dev_id;
157         u16 saved_csr;
158         u16 enabled_irqs;
159         const char *dev_name;
160         void (*callback)(int lch, u16 ch_status, void *data);
161         void *data;
162
163 #ifndef CONFIG_ARCH_OMAP1
164         /* required for Dynamic chaining */
165         int prev_linked_ch;
166         int next_linked_ch;
167         int state;
168         int chain_id;
169
170         int status;
171 #endif
172         long flags;
173 };
174
175 struct dma_link_info {
176         int *linked_dmach_q;
177         int no_of_lchs_linked;
178
179         int q_count;
180         int q_tail;
181         int q_head;
182
183         int chain_state;
184         int chain_mode;
185
186 };
187
188 static struct dma_link_info *dma_linked_lch;
189
190 #ifndef CONFIG_ARCH_OMAP1
191
192 /* Chain handling macros */
193 #define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
194         do {                                                            \
195                 dma_linked_lch[chain_id].q_head =                       \
196                 dma_linked_lch[chain_id].q_tail =                       \
197                 dma_linked_lch[chain_id].q_count = 0;                   \
198         } while (0)
199 #define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
200                 (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
201                 dma_linked_lch[chain_id].q_count)
202 #define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
203         do {                                                            \
204                 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
205                 dma_linked_lch[chain_id].q_count)                       \
206         } while (0)
207 #define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
208                 (0 == dma_linked_lch[chain_id].q_count)
209 #define __OMAP_DMA_CHAIN_INCQ(end)                                      \
210         ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
211 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
212         do {                                                            \
213                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
214                 dma_linked_lch[chain_id].q_count--;                     \
215         } while (0)
216
217 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
218         do {                                                            \
219                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
220                 dma_linked_lch[chain_id].q_count++; \
221         } while (0)
222 #endif
223
224 static int dma_lch_count;
225 static int dma_chan_count;
226 static int omap_dma_reserve_channels;
227
228 static spinlock_t dma_chan_lock;
229 static struct omap_dma_lch *dma_chan;
230 static void __iomem *omap_dma_base;
231 static u16 *reg_map;
232 static u8 dma_stride;
233 static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
234
235 static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
236         INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
237         INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
238         INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
239         INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
240         INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
241 };
242
243 static inline void disable_lnk(int lch);
244 static void omap_disable_channel_irq(int lch);
245 static inline void omap_enable_channel_irq(int lch);
246
247 #define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
248                                                 __func__);
249
250 static inline void dma_write(u32 val, int reg, int lch)
251 {
252         u8  stride;
253         u32 offset;
254
255         stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
256         offset = reg_map[reg] + (stride * lch);
257
258         if (dma_stride  == 0x40) {
259                 __raw_writew(val, omap_dma_base + offset);
260                 if ((reg > CLNK_CTRL && reg < CCEN) ||
261                                 (reg > PCHD_ID && reg < CAPS_2)) {
262                         u32 offset2 = reg_map[reg] + 2 + (stride * lch);
263                         __raw_writew(val >> 16, omap_dma_base + offset2);
264                 }
265         } else {
266                 __raw_writel(val, omap_dma_base + offset);
267         }
268 }
269
270 static inline u32 dma_read(int reg, int lch)
271 {
272         u8 stride;
273         u32 offset, val;
274
275         stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
276         offset = reg_map[reg] + (stride * lch);
277
278         if (dma_stride  == 0x40) {
279                 val = __raw_readw(omap_dma_base + offset);
280                 if ((reg > CLNK_CTRL && reg < CCEN) ||
281                                 (reg > PCHD_ID && reg < CAPS_2)) {
282                         u16 upper;
283                         u32 offset2 = reg_map[reg] + 2 + (stride * lch);
284                         upper = __raw_readw(omap_dma_base + offset2);
285                         val |= (upper << 16);
286                 }
287         } else {
288                 val = __raw_readl(omap_dma_base + offset);
289         }
290         return val;
291 }
292
293 #ifdef CONFIG_ARCH_OMAP15XX
294 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
295 static int omap_dma_in_1510_mode(void)
296 {
297         return enable_1510_mode;
298 }
299 #else
300 #define omap_dma_in_1510_mode()         0
301 #endif
302
303 #ifdef CONFIG_ARCH_OMAP1
304 static inline int get_gdma_dev(int req)
305 {
306         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
307         int shift = ((req - 1) % 5) * 6;
308
309         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
310 }
311
312 static inline void set_gdma_dev(int req, int dev)
313 {
314         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
315         int shift = ((req - 1) % 5) * 6;
316         u32 l;
317
318         l = omap_readl(reg);
319         l &= ~(0x3f << shift);
320         l |= (dev - 1) << shift;
321         omap_writel(l, reg);
322 }
323 #else
324 #define set_gdma_dev(req, dev)  do {} while (0)
325 #endif
326
327 /* Omap1 only */
328 static void clear_lch_regs(int lch)
329 {
330         int i = dma_common_ch_start;
331
332         for (; i <= dma_common_ch_end; i += 1)
333                 dma_write(0, i, lch);
334 }
335
336 void omap_set_dma_priority(int lch, int dst_port, int priority)
337 {
338         unsigned long reg;
339         u32 l;
340
341         if (cpu_class_is_omap1()) {
342                 switch (dst_port) {
343                 case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
344                         reg = OMAP_TC_OCPT1_PRIOR;
345                         break;
346                 case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
347                         reg = OMAP_TC_OCPT2_PRIOR;
348                         break;
349                 case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
350                         reg = OMAP_TC_EMIFF_PRIOR;
351                         break;
352                 case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
353                         reg = OMAP_TC_EMIFS_PRIOR;
354                         break;
355                 default:
356                         BUG();
357                         return;
358                 }
359                 l = omap_readl(reg);
360                 l &= ~(0xf << 8);
361                 l |= (priority & 0xf) << 8;
362                 omap_writel(l, reg);
363         }
364
365         if (cpu_class_is_omap2()) {
366                 u32 ccr;
367
368                 ccr = dma_read(CCR, lch);
369                 if (priority)
370                         ccr |= (1 << 6);
371                 else
372                         ccr &= ~(1 << 6);
373                 dma_write(ccr, CCR, lch);
374         }
375 }
376 EXPORT_SYMBOL(omap_set_dma_priority);
377
378 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
379                                   int frame_count, int sync_mode,
380                                   int dma_trigger, int src_or_dst_synch)
381 {
382         u32 l;
383
384         l = dma_read(CSDP, lch);
385         l &= ~0x03;
386         l |= data_type;
387         dma_write(l, CSDP, lch);
388
389         if (cpu_class_is_omap1()) {
390                 u16 ccr;
391
392                 ccr = dma_read(CCR, lch);
393                 ccr &= ~(1 << 5);
394                 if (sync_mode == OMAP_DMA_SYNC_FRAME)
395                         ccr |= 1 << 5;
396                 dma_write(ccr, CCR, lch);
397
398                 ccr = dma_read(CCR2, lch);
399                 ccr &= ~(1 << 2);
400                 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
401                         ccr |= 1 << 2;
402                 dma_write(ccr, CCR2, lch);
403         }
404
405         if (cpu_class_is_omap2() && dma_trigger) {
406                 u32 val;
407
408                 val = dma_read(CCR, lch);
409
410                 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
411                 val &= ~((1 << 23) | (3 << 19) | 0x1f);
412                 val |= (dma_trigger & ~0x1f) << 14;
413                 val |= dma_trigger & 0x1f;
414
415                 if (sync_mode & OMAP_DMA_SYNC_FRAME)
416                         val |= 1 << 5;
417                 else
418                         val &= ~(1 << 5);
419
420                 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
421                         val |= 1 << 18;
422                 else
423                         val &= ~(1 << 18);
424
425                 if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
426                         val &= ~(1 << 24);      /* dest synch */
427                         val |= (1 << 23);       /* Prefetch */
428                 } else if (src_or_dst_synch) {
429                         val |= 1 << 24;         /* source synch */
430                 } else {
431                         val &= ~(1 << 24);      /* dest synch */
432                 }
433                 dma_write(val, CCR, lch);
434         }
435
436         dma_write(elem_count, CEN, lch);
437         dma_write(frame_count, CFN, lch);
438 }
439 EXPORT_SYMBOL(omap_set_dma_transfer_params);
440
441 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
442 {
443         BUG_ON(omap_dma_in_1510_mode());
444
445         if (cpu_class_is_omap1()) {
446                 u16 w;
447
448                 w = dma_read(CCR2, lch);
449                 w &= ~0x03;
450
451                 switch (mode) {
452                 case OMAP_DMA_CONSTANT_FILL:
453                         w |= 0x01;
454                         break;
455                 case OMAP_DMA_TRANSPARENT_COPY:
456                         w |= 0x02;
457                         break;
458                 case OMAP_DMA_COLOR_DIS:
459                         break;
460                 default:
461                         BUG();
462                 }
463                 dma_write(w, CCR2, lch);
464
465                 w = dma_read(LCH_CTRL, lch);
466                 w &= ~0x0f;
467                 /* Default is channel type 2D */
468                 if (mode) {
469                         dma_write(color, COLOR, lch);
470                         w |= 1;         /* Channel type G */
471                 }
472                 dma_write(w, LCH_CTRL, lch);
473         }
474
475         if (cpu_class_is_omap2()) {
476                 u32 val;
477
478                 val = dma_read(CCR, lch);
479                 val &= ~((1 << 17) | (1 << 16));
480
481                 switch (mode) {
482                 case OMAP_DMA_CONSTANT_FILL:
483                         val |= 1 << 16;
484                         break;
485                 case OMAP_DMA_TRANSPARENT_COPY:
486                         val |= 1 << 17;
487                         break;
488                 case OMAP_DMA_COLOR_DIS:
489                         break;
490                 default:
491                         BUG();
492                 }
493                 dma_write(val, CCR, lch);
494
495                 color &= 0xffffff;
496                 dma_write(color, COLOR, lch);
497         }
498 }
499 EXPORT_SYMBOL(omap_set_dma_color_mode);
500
501 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
502 {
503         if (cpu_class_is_omap2()) {
504                 u32 csdp;
505
506                 csdp = dma_read(CSDP, lch);
507                 csdp &= ~(0x3 << 16);
508                 csdp |= (mode << 16);
509                 dma_write(csdp, CSDP, lch);
510         }
511 }
512 EXPORT_SYMBOL(omap_set_dma_write_mode);
513
514 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
515 {
516         if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
517                 u32 l;
518
519                 l = dma_read(LCH_CTRL, lch);
520                 l &= ~0x7;
521                 l |= mode;
522                 dma_write(l, LCH_CTRL, lch);
523         }
524 }
525 EXPORT_SYMBOL(omap_set_dma_channel_mode);
526
527 /* Note that src_port is only for omap1 */
528 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
529                              unsigned long src_start,
530                              int src_ei, int src_fi)
531 {
532         u32 l;
533
534         if (cpu_class_is_omap1()) {
535                 u16 w;
536
537                 w = dma_read(CSDP, lch);
538                 w &= ~(0x1f << 2);
539                 w |= src_port << 2;
540                 dma_write(w, CSDP, lch);
541         }
542
543         l = dma_read(CCR, lch);
544         l &= ~(0x03 << 12);
545         l |= src_amode << 12;
546         dma_write(l, CCR, lch);
547
548         dma_write(src_start, CSSA, lch);
549
550         dma_write(src_ei, CSEI, lch);
551         dma_write(src_fi, CSFI, lch);
552 }
553 EXPORT_SYMBOL(omap_set_dma_src_params);
554
555 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
556 {
557         omap_set_dma_transfer_params(lch, params->data_type,
558                                      params->elem_count, params->frame_count,
559                                      params->sync_mode, params->trigger,
560                                      params->src_or_dst_synch);
561         omap_set_dma_src_params(lch, params->src_port,
562                                 params->src_amode, params->src_start,
563                                 params->src_ei, params->src_fi);
564
565         omap_set_dma_dest_params(lch, params->dst_port,
566                                  params->dst_amode, params->dst_start,
567                                  params->dst_ei, params->dst_fi);
568         if (params->read_prio || params->write_prio)
569                 omap_dma_set_prio_lch(lch, params->read_prio,
570                                       params->write_prio);
571 }
572 EXPORT_SYMBOL(omap_set_dma_params);
573
574 void omap_set_dma_src_index(int lch, int eidx, int fidx)
575 {
576         if (cpu_class_is_omap2())
577                 return;
578
579         dma_write(eidx, CSEI, lch);
580         dma_write(fidx, CSFI, lch);
581 }
582 EXPORT_SYMBOL(omap_set_dma_src_index);
583
584 void omap_set_dma_src_data_pack(int lch, int enable)
585 {
586         u32 l;
587
588         l = dma_read(CSDP, lch);
589         l &= ~(1 << 6);
590         if (enable)
591                 l |= (1 << 6);
592         dma_write(l, CSDP, lch);
593 }
594 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
595
596 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
597 {
598         unsigned int burst = 0;
599         u32 l;
600
601         l = dma_read(CSDP, lch);
602         l &= ~(0x03 << 7);
603
604         switch (burst_mode) {
605         case OMAP_DMA_DATA_BURST_DIS:
606                 break;
607         case OMAP_DMA_DATA_BURST_4:
608                 if (cpu_class_is_omap2())
609                         burst = 0x1;
610                 else
611                         burst = 0x2;
612                 break;
613         case OMAP_DMA_DATA_BURST_8:
614                 if (cpu_class_is_omap2()) {
615                         burst = 0x2;
616                         break;
617                 }
618                 /*
619                  * not supported by current hardware on OMAP1
620                  * w |= (0x03 << 7);
621                  * fall through
622                  */
623         case OMAP_DMA_DATA_BURST_16:
624                 if (cpu_class_is_omap2()) {
625                         burst = 0x3;
626                         break;
627                 }
628                 /*
629                  * OMAP1 don't support burst 16
630                  * fall through
631                  */
632         default:
633                 BUG();
634         }
635
636         l |= (burst << 7);
637         dma_write(l, CSDP, lch);
638 }
639 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
640
641 /* Note that dest_port is only for OMAP1 */
642 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
643                               unsigned long dest_start,
644                               int dst_ei, int dst_fi)
645 {
646         u32 l;
647
648         if (cpu_class_is_omap1()) {
649                 l = dma_read(CSDP, lch);
650                 l &= ~(0x1f << 9);
651                 l |= dest_port << 9;
652                 dma_write(l, CSDP, lch);
653         }
654
655         l = dma_read(CCR, lch);
656         l &= ~(0x03 << 14);
657         l |= dest_amode << 14;
658         dma_write(l, CCR, lch);
659
660         dma_write(dest_start, CDSA, lch);
661
662         dma_write(dst_ei, CDEI, lch);
663         dma_write(dst_fi, CDFI, lch);
664 }
665 EXPORT_SYMBOL(omap_set_dma_dest_params);
666
667 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
668 {
669         if (cpu_class_is_omap2())
670                 return;
671
672         dma_write(eidx, CDEI, lch);
673         dma_write(fidx, CDFI, lch);
674 }
675 EXPORT_SYMBOL(omap_set_dma_dest_index);
676
677 void omap_set_dma_dest_data_pack(int lch, int enable)
678 {
679         u32 l;
680
681         l = dma_read(CSDP, lch);
682         l &= ~(1 << 13);
683         if (enable)
684                 l |= 1 << 13;
685         dma_write(l, CSDP, lch);
686 }
687 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
688
689 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
690 {
691         unsigned int burst = 0;
692         u32 l;
693
694         l = dma_read(CSDP, lch);
695         l &= ~(0x03 << 14);
696
697         switch (burst_mode) {
698         case OMAP_DMA_DATA_BURST_DIS:
699                 break;
700         case OMAP_DMA_DATA_BURST_4:
701                 if (cpu_class_is_omap2())
702                         burst = 0x1;
703                 else
704                         burst = 0x2;
705                 break;
706         case OMAP_DMA_DATA_BURST_8:
707                 if (cpu_class_is_omap2())
708                         burst = 0x2;
709                 else
710                         burst = 0x3;
711                 break;
712         case OMAP_DMA_DATA_BURST_16:
713                 if (cpu_class_is_omap2()) {
714                         burst = 0x3;
715                         break;
716                 }
717                 /*
718                  * OMAP1 don't support burst 16
719                  * fall through
720                  */
721         default:
722                 printk(KERN_ERR "Invalid DMA burst mode\n");
723                 BUG();
724                 return;
725         }
726         l |= (burst << 14);
727         dma_write(l, CSDP, lch);
728 }
729 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
730
731 static inline void omap_enable_channel_irq(int lch)
732 {
733         u32 status;
734
735         /* Clear CSR */
736         if (cpu_class_is_omap1())
737                 status = dma_read(CSR, lch);
738         else if (cpu_class_is_omap2())
739                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
740
741         /* Enable some nice interrupts. */
742         dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
743 }
744
745 static void omap_disable_channel_irq(int lch)
746 {
747         if (cpu_class_is_omap2())
748                 dma_write(0, CICR, lch);
749 }
750
751 void omap_enable_dma_irq(int lch, u16 bits)
752 {
753         dma_chan[lch].enabled_irqs |= bits;
754 }
755 EXPORT_SYMBOL(omap_enable_dma_irq);
756
757 void omap_disable_dma_irq(int lch, u16 bits)
758 {
759         dma_chan[lch].enabled_irqs &= ~bits;
760 }
761 EXPORT_SYMBOL(omap_disable_dma_irq);
762
763 static inline void enable_lnk(int lch)
764 {
765         u32 l;
766
767         l = dma_read(CLNK_CTRL, lch);
768
769         if (cpu_class_is_omap1())
770                 l &= ~(1 << 14);
771
772         /* Set the ENABLE_LNK bits */
773         if (dma_chan[lch].next_lch != -1)
774                 l = dma_chan[lch].next_lch | (1 << 15);
775
776 #ifndef CONFIG_ARCH_OMAP1
777         if (cpu_class_is_omap2())
778                 if (dma_chan[lch].next_linked_ch != -1)
779                         l = dma_chan[lch].next_linked_ch | (1 << 15);
780 #endif
781
782         dma_write(l, CLNK_CTRL, lch);
783 }
784
785 static inline void disable_lnk(int lch)
786 {
787         u32 l;
788
789         l = dma_read(CLNK_CTRL, lch);
790
791         /* Disable interrupts */
792         if (cpu_class_is_omap1()) {
793                 dma_write(0, CICR, lch);
794                 /* Set the STOP_LNK bit */
795                 l |= 1 << 14;
796         }
797
798         if (cpu_class_is_omap2()) {
799                 omap_disable_channel_irq(lch);
800                 /* Clear the ENABLE_LNK bit */
801                 l &= ~(1 << 15);
802         }
803
804         dma_write(l, CLNK_CTRL, lch);
805         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
806 }
807
808 static inline void omap2_enable_irq_lch(int lch)
809 {
810         u32 val;
811         unsigned long flags;
812
813         if (!cpu_class_is_omap2())
814                 return;
815
816         spin_lock_irqsave(&dma_chan_lock, flags);
817         val = dma_read(IRQENABLE_L0, lch);
818         val |= 1 << lch;
819         dma_write(val, IRQENABLE_L0, lch);
820         spin_unlock_irqrestore(&dma_chan_lock, flags);
821 }
822
823 static inline void omap2_disable_irq_lch(int lch)
824 {
825         u32 val;
826         unsigned long flags;
827
828         if (!cpu_class_is_omap2())
829                 return;
830
831         spin_lock_irqsave(&dma_chan_lock, flags);
832         val = dma_read(IRQENABLE_L0, lch);
833         val &= ~(1 << lch);
834         dma_write(val, IRQENABLE_L0, lch);
835         spin_unlock_irqrestore(&dma_chan_lock, flags);
836 }
837
838 int omap_request_dma(int dev_id, const char *dev_name,
839                      void (*callback)(int lch, u16 ch_status, void *data),
840                      void *data, int *dma_ch_out)
841 {
842         int ch, free_ch = -1;
843         unsigned long flags;
844         struct omap_dma_lch *chan;
845
846         spin_lock_irqsave(&dma_chan_lock, flags);
847         for (ch = 0; ch < dma_chan_count; ch++) {
848                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
849                         free_ch = ch;
850                         if (dev_id == 0)
851                                 break;
852                 }
853         }
854         if (free_ch == -1) {
855                 spin_unlock_irqrestore(&dma_chan_lock, flags);
856                 return -EBUSY;
857         }
858         chan = dma_chan + free_ch;
859         chan->dev_id = dev_id;
860
861         if (cpu_class_is_omap1())
862                 clear_lch_regs(free_ch);
863
864         if (cpu_class_is_omap2())
865                 omap_clear_dma(free_ch);
866
867         spin_unlock_irqrestore(&dma_chan_lock, flags);
868
869         chan->dev_name = dev_name;
870         chan->callback = callback;
871         chan->data = data;
872         chan->flags = 0;
873
874 #ifndef CONFIG_ARCH_OMAP1
875         if (cpu_class_is_omap2()) {
876                 chan->chain_id = -1;
877                 chan->next_linked_ch = -1;
878         }
879 #endif
880
881         chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
882
883         if (cpu_class_is_omap1())
884                 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
885         else if (cpu_class_is_omap2())
886                 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
887                         OMAP2_DMA_TRANS_ERR_IRQ;
888
889         if (cpu_is_omap16xx()) {
890                 /* If the sync device is set, configure it dynamically. */
891                 if (dev_id != 0) {
892                         set_gdma_dev(free_ch + 1, dev_id);
893                         dev_id = free_ch + 1;
894                 }
895                 /*
896                  * Disable the 1510 compatibility mode and set the sync device
897                  * id.
898                  */
899                 dma_write(dev_id | (1 << 10), CCR, free_ch);
900         } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
901                 dma_write(dev_id, CCR, free_ch);
902         }
903
904         if (cpu_class_is_omap2()) {
905                 omap2_enable_irq_lch(free_ch);
906                 omap_enable_channel_irq(free_ch);
907                 /* Clear the CSR register and IRQ status register */
908                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
909                 dma_write(1 << free_ch, IRQSTATUS_L0, 0);
910         }
911
912         *dma_ch_out = free_ch;
913
914         return 0;
915 }
916 EXPORT_SYMBOL(omap_request_dma);
917
918 void omap_free_dma(int lch)
919 {
920         unsigned long flags;
921
922         if (dma_chan[lch].dev_id == -1) {
923                 pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
924                        lch);
925                 return;
926         }
927
928         if (cpu_class_is_omap1()) {
929                 /* Disable all DMA interrupts for the channel. */
930                 dma_write(0, CICR, lch);
931                 /* Make sure the DMA transfer is stopped. */
932                 dma_write(0, CCR, lch);
933         }
934
935         if (cpu_class_is_omap2()) {
936                 omap2_disable_irq_lch(lch);
937
938                 /* Clear the CSR register and IRQ status register */
939                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
940                 dma_write(1 << lch, IRQSTATUS_L0, lch);
941
942                 /* Disable all DMA interrupts for the channel. */
943                 dma_write(0, CICR, lch);
944
945                 /* Make sure the DMA transfer is stopped. */
946                 dma_write(0, CCR, lch);
947                 omap_clear_dma(lch);
948         }
949
950         spin_lock_irqsave(&dma_chan_lock, flags);
951         dma_chan[lch].dev_id = -1;
952         dma_chan[lch].next_lch = -1;
953         dma_chan[lch].callback = NULL;
954         spin_unlock_irqrestore(&dma_chan_lock, flags);
955 }
956 EXPORT_SYMBOL(omap_free_dma);
957
958 /**
959  * @brief omap_dma_set_global_params : Set global priority settings for dma
960  *
961  * @param arb_rate
962  * @param max_fifo_depth
963  * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
964  *                                                 DMA_THREAD_RESERVE_ONET
965  *                                                 DMA_THREAD_RESERVE_TWOT
966  *                                                 DMA_THREAD_RESERVE_THREET
967  */
968 void
969 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
970 {
971         u32 reg;
972
973         if (!cpu_class_is_omap2()) {
974                 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
975                 return;
976         }
977
978         if (max_fifo_depth == 0)
979                 max_fifo_depth = 1;
980         if (arb_rate == 0)
981                 arb_rate = 1;
982
983         reg = 0xff & max_fifo_depth;
984         reg |= (0x3 & tparams) << 12;
985         reg |= (arb_rate & 0xff) << 16;
986
987         dma_write(reg, GCR, 0);
988 }
989 EXPORT_SYMBOL(omap_dma_set_global_params);
990
991 /**
992  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
993  *
994  * @param lch
995  * @param read_prio - Read priority
996  * @param write_prio - Write priority
997  * Both of the above can be set with one of the following values :
998  *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
999  */
1000 int
1001 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
1002                       unsigned char write_prio)
1003 {
1004         u32 l;
1005
1006         if (unlikely((lch < 0 || lch >= dma_lch_count))) {
1007                 printk(KERN_ERR "Invalid channel id\n");
1008                 return -EINVAL;
1009         }
1010         l = dma_read(CCR, lch);
1011         l &= ~((1 << 6) | (1 << 26));
1012         if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
1013                 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
1014         else
1015                 l |= ((read_prio & 0x1) << 6);
1016
1017         dma_write(l, CCR, lch);
1018
1019         return 0;
1020 }
1021 EXPORT_SYMBOL(omap_dma_set_prio_lch);
1022
1023 /*
1024  * Clears any DMA state so the DMA engine is ready to restart with new buffers
1025  * through omap_start_dma(). Any buffers in flight are discarded.
1026  */
1027 void omap_clear_dma(int lch)
1028 {
1029         unsigned long flags;
1030
1031         local_irq_save(flags);
1032
1033         if (cpu_class_is_omap1()) {
1034                 u32 l;
1035
1036                 l = dma_read(CCR, lch);
1037                 l &= ~OMAP_DMA_CCR_EN;
1038                 dma_write(l, CCR, lch);
1039
1040                 /* Clear pending interrupts */
1041                 l = dma_read(CSR, lch);
1042         }
1043
1044         if (cpu_class_is_omap2()) {
1045                 int i = dma_common_ch_start;
1046                 for (; i <= dma_common_ch_end; i += 1)
1047                         dma_write(0, i, lch);
1048         }
1049
1050         local_irq_restore(flags);
1051 }
1052 EXPORT_SYMBOL(omap_clear_dma);
1053
1054 void omap_start_dma(int lch)
1055 {
1056         u32 l;
1057
1058         /*
1059          * The CPC/CDAC register needs to be initialized to zero
1060          * before starting dma transfer.
1061          */
1062         if (cpu_is_omap15xx())
1063                 dma_write(0, CPC, lch);
1064         else
1065                 dma_write(0, CDAC, lch);
1066
1067         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1068                 int next_lch, cur_lch;
1069                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1070
1071                 dma_chan_link_map[lch] = 1;
1072                 /* Set the link register of the first channel */
1073                 enable_lnk(lch);
1074
1075                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1076                 cur_lch = dma_chan[lch].next_lch;
1077                 do {
1078                         next_lch = dma_chan[cur_lch].next_lch;
1079
1080                         /* The loop case: we've been here already */
1081                         if (dma_chan_link_map[cur_lch])
1082                                 break;
1083                         /* Mark the current channel */
1084                         dma_chan_link_map[cur_lch] = 1;
1085
1086                         enable_lnk(cur_lch);
1087                         omap_enable_channel_irq(cur_lch);
1088
1089                         cur_lch = next_lch;
1090                 } while (next_lch != -1);
1091         } else if (cpu_is_omap242x() ||
1092                 (cpu_is_omap243x() &&  omap_type() <= OMAP2430_REV_ES1_0)) {
1093
1094                 /* Errata: Need to write lch even if not using chaining */
1095                 dma_write(lch, CLNK_CTRL, lch);
1096         }
1097
1098         omap_enable_channel_irq(lch);
1099
1100         l = dma_read(CCR, lch);
1101
1102         /*
1103          * Errata: Inter Frame DMA buffering issue (All OMAP2420 and
1104          * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and
1105          * bursting is enabled. This might result in data gets stalled in
1106          * FIFO at the end of the block.
1107          * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
1108          * guarantee no data will stay in the DMA FIFO in case inter frame
1109          * buffering occurs.
1110          */
1111         if (cpu_is_omap2420() ||
1112             (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0)))
1113                 l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1114
1115         l |= OMAP_DMA_CCR_EN;
1116         dma_write(l, CCR, lch);
1117
1118         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1119 }
1120 EXPORT_SYMBOL(omap_start_dma);
1121
1122 void omap_stop_dma(int lch)
1123 {
1124         u32 l;
1125
1126         /* Disable all interrupts on the channel */
1127         if (cpu_class_is_omap1())
1128                 dma_write(0, CICR, lch);
1129
1130         l = dma_read(CCR, lch);
1131         /* OMAP3 Errata i541: sDMA FIFO draining does not finish */
1132         if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
1133                 int i = 0;
1134                 u32 sys_cf;
1135
1136                 /* Configure No-Standby */
1137                 l = dma_read(OCP_SYSCONFIG, lch);
1138                 sys_cf = l;
1139                 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
1140                 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
1141                 dma_write(l , OCP_SYSCONFIG, 0);
1142
1143                 l = dma_read(CCR, lch);
1144                 l &= ~OMAP_DMA_CCR_EN;
1145                 dma_write(l, CCR, lch);
1146
1147                 /* Wait for sDMA FIFO drain */
1148                 l = dma_read(CCR, lch);
1149                 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
1150                                         OMAP_DMA_CCR_WR_ACTIVE))) {
1151                         udelay(5);
1152                         i++;
1153                         l = dma_read(CCR, lch);
1154                 }
1155                 if (i >= 100)
1156                         printk(KERN_ERR "DMA drain did not complete on "
1157                                         "lch %d\n", lch);
1158                 /* Restore OCP_SYSCONFIG */
1159                 dma_write(sys_cf, OCP_SYSCONFIG, lch);
1160         } else {
1161                 l &= ~OMAP_DMA_CCR_EN;
1162                 dma_write(l, CCR, lch);
1163         }
1164
1165         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1166                 int next_lch, cur_lch = lch;
1167                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1168
1169                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1170                 do {
1171                         /* The loop case: we've been here already */
1172                         if (dma_chan_link_map[cur_lch])
1173                                 break;
1174                         /* Mark the current channel */
1175                         dma_chan_link_map[cur_lch] = 1;
1176
1177                         disable_lnk(cur_lch);
1178
1179                         next_lch = dma_chan[cur_lch].next_lch;
1180                         cur_lch = next_lch;
1181                 } while (next_lch != -1);
1182         }
1183
1184         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1185 }
1186 EXPORT_SYMBOL(omap_stop_dma);
1187
1188 /*
1189  * Allows changing the DMA callback function or data. This may be needed if
1190  * the driver shares a single DMA channel for multiple dma triggers.
1191  */
1192 int omap_set_dma_callback(int lch,
1193                           void (*callback)(int lch, u16 ch_status, void *data),
1194                           void *data)
1195 {
1196         unsigned long flags;
1197
1198         if (lch < 0)
1199                 return -ENODEV;
1200
1201         spin_lock_irqsave(&dma_chan_lock, flags);
1202         if (dma_chan[lch].dev_id == -1) {
1203                 printk(KERN_ERR "DMA callback for not set for free channel\n");
1204                 spin_unlock_irqrestore(&dma_chan_lock, flags);
1205                 return -EINVAL;
1206         }
1207         dma_chan[lch].callback = callback;
1208         dma_chan[lch].data = data;
1209         spin_unlock_irqrestore(&dma_chan_lock, flags);
1210
1211         return 0;
1212 }
1213 EXPORT_SYMBOL(omap_set_dma_callback);
1214
1215 /*
1216  * Returns current physical source address for the given DMA channel.
1217  * If the channel is running the caller must disable interrupts prior calling
1218  * this function and process the returned value before re-enabling interrupt to
1219  * prevent races with the interrupt handler. Note that in continuous mode there
1220  * is a chance for CSSA_L register overflow inbetween the two reads resulting
1221  * in incorrect return value.
1222  */
1223 dma_addr_t omap_get_dma_src_pos(int lch)
1224 {
1225         dma_addr_t offset = 0;
1226
1227         if (cpu_is_omap15xx())
1228                 offset = dma_read(CPC, lch);
1229         else
1230                 offset = dma_read(CSAC, lch);
1231
1232         /*
1233          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1234          * read before the DMA controller finished disabling the channel.
1235          */
1236         if (!cpu_is_omap15xx() && offset == 0)
1237                 offset = dma_read(CSAC, lch);
1238
1239         if (cpu_class_is_omap1())
1240                 offset |= (dma_read(CSSA, lch) & 0xFFFF0000);
1241
1242         return offset;
1243 }
1244 EXPORT_SYMBOL(omap_get_dma_src_pos);
1245
1246 /*
1247  * Returns current physical destination address for the given DMA channel.
1248  * If the channel is running the caller must disable interrupts prior calling
1249  * this function and process the returned value before re-enabling interrupt to
1250  * prevent races with the interrupt handler. Note that in continuous mode there
1251  * is a chance for CDSA_L register overflow inbetween the two reads resulting
1252  * in incorrect return value.
1253  */
1254 dma_addr_t omap_get_dma_dst_pos(int lch)
1255 {
1256         dma_addr_t offset = 0;
1257
1258         if (cpu_is_omap15xx())
1259                 offset = dma_read(CPC, lch);
1260         else
1261                 offset = dma_read(CDAC, lch);
1262
1263         /*
1264          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1265          * read before the DMA controller finished disabling the channel.
1266          */
1267         if (!cpu_is_omap15xx() && offset == 0)
1268                 offset = dma_read(CDAC, lch);
1269
1270         if (cpu_class_is_omap1())
1271                 offset |= (dma_read(CDSA, lch) & 0xFFFF0000);
1272
1273         return offset;
1274 }
1275 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1276
1277 int omap_get_dma_active_status(int lch)
1278 {
1279         return (dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1280 }
1281 EXPORT_SYMBOL(omap_get_dma_active_status);
1282
1283 int omap_dma_running(void)
1284 {
1285         int lch;
1286
1287         if (cpu_class_is_omap1())
1288                 if (omap_lcd_dma_running())
1289                         return 1;
1290
1291         for (lch = 0; lch < dma_chan_count; lch++)
1292                 if (dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1293                         return 1;
1294
1295         return 0;
1296 }
1297
1298 /*
1299  * lch_queue DMA will start right after lch_head one is finished.
1300  * For this DMA link to start, you still need to start (see omap_start_dma)
1301  * the first one. That will fire up the entire queue.
1302  */
1303 void omap_dma_link_lch(int lch_head, int lch_queue)
1304 {
1305         if (omap_dma_in_1510_mode()) {
1306                 if (lch_head == lch_queue) {
1307                         dma_write(dma_read(CCR, lch_head) | (3 << 8),
1308                                                                 CCR, lch_head);
1309                         return;
1310                 }
1311                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1312                 BUG();
1313                 return;
1314         }
1315
1316         if ((dma_chan[lch_head].dev_id == -1) ||
1317             (dma_chan[lch_queue].dev_id == -1)) {
1318                 printk(KERN_ERR "omap_dma: trying to link "
1319                        "non requested channels\n");
1320                 dump_stack();
1321         }
1322
1323         dma_chan[lch_head].next_lch = lch_queue;
1324 }
1325 EXPORT_SYMBOL(omap_dma_link_lch);
1326
1327 /*
1328  * Once the DMA queue is stopped, we can destroy it.
1329  */
1330 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1331 {
1332         if (omap_dma_in_1510_mode()) {
1333                 if (lch_head == lch_queue) {
1334                         dma_write(dma_read(CCR, lch_head) & ~(3 << 8),
1335                                                                 CCR, lch_head);
1336                         return;
1337                 }
1338                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1339                 BUG();
1340                 return;
1341         }
1342
1343         if (dma_chan[lch_head].next_lch != lch_queue ||
1344             dma_chan[lch_head].next_lch == -1) {
1345                 printk(KERN_ERR "omap_dma: trying to unlink "
1346                        "non linked channels\n");
1347                 dump_stack();
1348         }
1349
1350         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1351             (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1352                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1353                        "before unlinking\n");
1354                 dump_stack();
1355         }
1356
1357         dma_chan[lch_head].next_lch = -1;
1358 }
1359 EXPORT_SYMBOL(omap_dma_unlink_lch);
1360
1361 /*----------------------------------------------------------------------------*/
1362
1363 #ifndef CONFIG_ARCH_OMAP1
1364 /* Create chain of DMA channesls */
1365 static void create_dma_lch_chain(int lch_head, int lch_queue)
1366 {
1367         u32 l;
1368
1369         /* Check if this is the first link in chain */
1370         if (dma_chan[lch_head].next_linked_ch == -1) {
1371                 dma_chan[lch_head].next_linked_ch = lch_queue;
1372                 dma_chan[lch_head].prev_linked_ch = lch_queue;
1373                 dma_chan[lch_queue].next_linked_ch = lch_head;
1374                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1375         }
1376
1377         /* a link exists, link the new channel in circular chain */
1378         else {
1379                 dma_chan[lch_queue].next_linked_ch =
1380                                         dma_chan[lch_head].next_linked_ch;
1381                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1382                 dma_chan[lch_head].next_linked_ch = lch_queue;
1383                 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1384                                         lch_queue;
1385         }
1386
1387         l = dma_read(CLNK_CTRL, lch_head);
1388         l &= ~(0x1f);
1389         l |= lch_queue;
1390         dma_write(l, CLNK_CTRL, lch_head);
1391
1392         l = dma_read(CLNK_CTRL, lch_queue);
1393         l &= ~(0x1f);
1394         l |= (dma_chan[lch_queue].next_linked_ch);
1395         dma_write(l, CLNK_CTRL, lch_queue);
1396 }
1397
1398 /**
1399  * @brief omap_request_dma_chain : Request a chain of DMA channels
1400  *
1401  * @param dev_id - Device id using the dma channel
1402  * @param dev_name - Device name
1403  * @param callback - Call back function
1404  * @chain_id -
1405  * @no_of_chans - Number of channels requested
1406  * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1407  *                                            OMAP_DMA_DYNAMIC_CHAIN
1408  * @params - Channel parameters
1409  *
1410  * @return - Success : 0
1411  *           Failure: -EINVAL/-ENOMEM
1412  */
1413 int omap_request_dma_chain(int dev_id, const char *dev_name,
1414                            void (*callback) (int lch, u16 ch_status,
1415                                              void *data),
1416                            int *chain_id, int no_of_chans, int chain_mode,
1417                            struct omap_dma_channel_params params)
1418 {
1419         int *channels;
1420         int i, err;
1421
1422         /* Is the chain mode valid ? */
1423         if (chain_mode != OMAP_DMA_STATIC_CHAIN
1424                         && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1425                 printk(KERN_ERR "Invalid chain mode requested\n");
1426                 return -EINVAL;
1427         }
1428
1429         if (unlikely((no_of_chans < 1
1430                         || no_of_chans > dma_lch_count))) {
1431                 printk(KERN_ERR "Invalid Number of channels requested\n");
1432                 return -EINVAL;
1433         }
1434
1435         /*
1436          * Allocate a queue to maintain the status of the channels
1437          * in the chain
1438          */
1439         channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1440         if (channels == NULL) {
1441                 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1442                 return -ENOMEM;
1443         }
1444
1445         /* request and reserve DMA channels for the chain */
1446         for (i = 0; i < no_of_chans; i++) {
1447                 err = omap_request_dma(dev_id, dev_name,
1448                                         callback, NULL, &channels[i]);
1449                 if (err < 0) {
1450                         int j;
1451                         for (j = 0; j < i; j++)
1452                                 omap_free_dma(channels[j]);
1453                         kfree(channels);
1454                         printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1455                         return err;
1456                 }
1457                 dma_chan[channels[i]].prev_linked_ch = -1;
1458                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1459
1460                 /*
1461                  * Allowing client drivers to set common parameters now,
1462                  * so that later only relevant (src_start, dest_start
1463                  * and element count) can be set
1464                  */
1465                 omap_set_dma_params(channels[i], &params);
1466         }
1467
1468         *chain_id = channels[0];
1469         dma_linked_lch[*chain_id].linked_dmach_q = channels;
1470         dma_linked_lch[*chain_id].chain_mode = chain_mode;
1471         dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1472         dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1473
1474         for (i = 0; i < no_of_chans; i++)
1475                 dma_chan[channels[i]].chain_id = *chain_id;
1476
1477         /* Reset the Queue pointers */
1478         OMAP_DMA_CHAIN_QINIT(*chain_id);
1479
1480         /* Set up the chain */
1481         if (no_of_chans == 1)
1482                 create_dma_lch_chain(channels[0], channels[0]);
1483         else {
1484                 for (i = 0; i < (no_of_chans - 1); i++)
1485                         create_dma_lch_chain(channels[i], channels[i + 1]);
1486         }
1487
1488         return 0;
1489 }
1490 EXPORT_SYMBOL(omap_request_dma_chain);
1491
1492 /**
1493  * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1494  * params after setting it. Dont do this while dma is running!!
1495  *
1496  * @param chain_id - Chained logical channel id.
1497  * @param params
1498  *
1499  * @return - Success : 0
1500  *           Failure : -EINVAL
1501  */
1502 int omap_modify_dma_chain_params(int chain_id,
1503                                 struct omap_dma_channel_params params)
1504 {
1505         int *channels;
1506         u32 i;
1507
1508         /* Check for input params */
1509         if (unlikely((chain_id < 0
1510                         || chain_id >= dma_lch_count))) {
1511                 printk(KERN_ERR "Invalid chain id\n");
1512                 return -EINVAL;
1513         }
1514
1515         /* Check if the chain exists */
1516         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1517                 printk(KERN_ERR "Chain doesn't exists\n");
1518                 return -EINVAL;
1519         }
1520         channels = dma_linked_lch[chain_id].linked_dmach_q;
1521
1522         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1523                 /*
1524                  * Allowing client drivers to set common parameters now,
1525                  * so that later only relevant (src_start, dest_start
1526                  * and element count) can be set
1527                  */
1528                 omap_set_dma_params(channels[i], &params);
1529         }
1530
1531         return 0;
1532 }
1533 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1534
1535 /**
1536  * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1537  *
1538  * @param chain_id
1539  *
1540  * @return - Success : 0
1541  *           Failure : -EINVAL
1542  */
1543 int omap_free_dma_chain(int chain_id)
1544 {
1545         int *channels;
1546         u32 i;
1547
1548         /* Check for input params */
1549         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1550                 printk(KERN_ERR "Invalid chain id\n");
1551                 return -EINVAL;
1552         }
1553
1554         /* Check if the chain exists */
1555         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1556                 printk(KERN_ERR "Chain doesn't exists\n");
1557                 return -EINVAL;
1558         }
1559
1560         channels = dma_linked_lch[chain_id].linked_dmach_q;
1561         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1562                 dma_chan[channels[i]].next_linked_ch = -1;
1563                 dma_chan[channels[i]].prev_linked_ch = -1;
1564                 dma_chan[channels[i]].chain_id = -1;
1565                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1566                 omap_free_dma(channels[i]);
1567         }
1568
1569         kfree(channels);
1570
1571         dma_linked_lch[chain_id].linked_dmach_q = NULL;
1572         dma_linked_lch[chain_id].chain_mode = -1;
1573         dma_linked_lch[chain_id].chain_state = -1;
1574
1575         return (0);
1576 }
1577 EXPORT_SYMBOL(omap_free_dma_chain);
1578
1579 /**
1580  * @brief omap_dma_chain_status - Check if the chain is in
1581  * active / inactive state.
1582  * @param chain_id
1583  *
1584  * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1585  *           Failure : -EINVAL
1586  */
1587 int omap_dma_chain_status(int chain_id)
1588 {
1589         /* Check for input params */
1590         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1591                 printk(KERN_ERR "Invalid chain id\n");
1592                 return -EINVAL;
1593         }
1594
1595         /* Check if the chain exists */
1596         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1597                 printk(KERN_ERR "Chain doesn't exists\n");
1598                 return -EINVAL;
1599         }
1600         pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1601                         dma_linked_lch[chain_id].q_count);
1602
1603         if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1604                 return OMAP_DMA_CHAIN_INACTIVE;
1605
1606         return OMAP_DMA_CHAIN_ACTIVE;
1607 }
1608 EXPORT_SYMBOL(omap_dma_chain_status);
1609
1610 /**
1611  * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1612  * set the params and start the transfer.
1613  *
1614  * @param chain_id
1615  * @param src_start - buffer start address
1616  * @param dest_start - Dest address
1617  * @param elem_count
1618  * @param frame_count
1619  * @param callbk_data - channel callback parameter data.
1620  *
1621  * @return  - Success : 0
1622  *            Failure: -EINVAL/-EBUSY
1623  */
1624 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1625                         int elem_count, int frame_count, void *callbk_data)
1626 {
1627         int *channels;
1628         u32 l, lch;
1629         int start_dma = 0;
1630
1631         /*
1632          * if buffer size is less than 1 then there is
1633          * no use of starting the chain
1634          */
1635         if (elem_count < 1) {
1636                 printk(KERN_ERR "Invalid buffer size\n");
1637                 return -EINVAL;
1638         }
1639
1640         /* Check for input params */
1641         if (unlikely((chain_id < 0
1642                         || chain_id >= dma_lch_count))) {
1643                 printk(KERN_ERR "Invalid chain id\n");
1644                 return -EINVAL;
1645         }
1646
1647         /* Check if the chain exists */
1648         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1649                 printk(KERN_ERR "Chain doesn't exist\n");
1650                 return -EINVAL;
1651         }
1652
1653         /* Check if all the channels in chain are in use */
1654         if (OMAP_DMA_CHAIN_QFULL(chain_id))
1655                 return -EBUSY;
1656
1657         /* Frame count may be negative in case of indexed transfers */
1658         channels = dma_linked_lch[chain_id].linked_dmach_q;
1659
1660         /* Get a free channel */
1661         lch = channels[dma_linked_lch[chain_id].q_tail];
1662
1663         /* Store the callback data */
1664         dma_chan[lch].data = callbk_data;
1665
1666         /* Increment the q_tail */
1667         OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1668
1669         /* Set the params to the free channel */
1670         if (src_start != 0)
1671                 dma_write(src_start, CSSA, lch);
1672         if (dest_start != 0)
1673                 dma_write(dest_start, CDSA, lch);
1674
1675         /* Write the buffer size */
1676         dma_write(elem_count, CEN, lch);
1677         dma_write(frame_count, CFN, lch);
1678
1679         /*
1680          * If the chain is dynamically linked,
1681          * then we may have to start the chain if its not active
1682          */
1683         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1684
1685                 /*
1686                  * In Dynamic chain, if the chain is not started,
1687                  * queue the channel
1688                  */
1689                 if (dma_linked_lch[chain_id].chain_state ==
1690                                                 DMA_CHAIN_NOTSTARTED) {
1691                         /* Enable the link in previous channel */
1692                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1693                                                                 DMA_CH_QUEUED)
1694                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1695                         dma_chan[lch].state = DMA_CH_QUEUED;
1696                 }
1697
1698                 /*
1699                  * Chain is already started, make sure its active,
1700                  * if not then start the chain
1701                  */
1702                 else {
1703                         start_dma = 1;
1704
1705                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1706                                                         DMA_CH_STARTED) {
1707                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1708                                 dma_chan[lch].state = DMA_CH_QUEUED;
1709                                 start_dma = 0;
1710                                 if (0 == ((1 << 7) & dma_read(
1711                                         CCR, dma_chan[lch].prev_linked_ch))) {
1712                                         disable_lnk(dma_chan[lch].
1713                                                     prev_linked_ch);
1714                                         pr_debug("\n prev ch is stopped\n");
1715                                         start_dma = 1;
1716                                 }
1717                         }
1718
1719                         else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1720                                                         == DMA_CH_QUEUED) {
1721                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1722                                 dma_chan[lch].state = DMA_CH_QUEUED;
1723                                 start_dma = 0;
1724                         }
1725                         omap_enable_channel_irq(lch);
1726
1727                         l = dma_read(CCR, lch);
1728
1729                         if ((0 == (l & (1 << 24))))
1730                                 l &= ~(1 << 25);
1731                         else
1732                                 l |= (1 << 25);
1733                         if (start_dma == 1) {
1734                                 if (0 == (l & (1 << 7))) {
1735                                         l |= (1 << 7);
1736                                         dma_chan[lch].state = DMA_CH_STARTED;
1737                                         pr_debug("starting %d\n", lch);
1738                                         dma_write(l, CCR, lch);
1739                                 } else
1740                                         start_dma = 0;
1741                         } else {
1742                                 if (0 == (l & (1 << 7)))
1743                                         dma_write(l, CCR, lch);
1744                         }
1745                         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1746                 }
1747         }
1748
1749         return 0;
1750 }
1751 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1752
1753 /**
1754  * @brief omap_start_dma_chain_transfers - Start the chain
1755  *
1756  * @param chain_id
1757  *
1758  * @return - Success : 0
1759  *           Failure : -EINVAL/-EBUSY
1760  */
1761 int omap_start_dma_chain_transfers(int chain_id)
1762 {
1763         int *channels;
1764         u32 l, i;
1765
1766         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1767                 printk(KERN_ERR "Invalid chain id\n");
1768                 return -EINVAL;
1769         }
1770
1771         channels = dma_linked_lch[chain_id].linked_dmach_q;
1772
1773         if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1774                 printk(KERN_ERR "Chain is already started\n");
1775                 return -EBUSY;
1776         }
1777
1778         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1779                 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1780                                                                         i++) {
1781                         enable_lnk(channels[i]);
1782                         omap_enable_channel_irq(channels[i]);
1783                 }
1784         } else {
1785                 omap_enable_channel_irq(channels[0]);
1786         }
1787
1788         l = dma_read(CCR, channels[0]);
1789         l |= (1 << 7);
1790         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1791         dma_chan[channels[0]].state = DMA_CH_STARTED;
1792
1793         if ((0 == (l & (1 << 24))))
1794                 l &= ~(1 << 25);
1795         else
1796                 l |= (1 << 25);
1797         dma_write(l, CCR, channels[0]);
1798
1799         dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1800
1801         return 0;
1802 }
1803 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1804
1805 /**
1806  * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1807  *
1808  * @param chain_id
1809  *
1810  * @return - Success : 0
1811  *           Failure : EINVAL
1812  */
1813 int omap_stop_dma_chain_transfers(int chain_id)
1814 {
1815         int *channels;
1816         u32 l, i;
1817         u32 sys_cf;
1818
1819         /* Check for input params */
1820         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1821                 printk(KERN_ERR "Invalid chain id\n");
1822                 return -EINVAL;
1823         }
1824
1825         /* Check if the chain exists */
1826         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1827                 printk(KERN_ERR "Chain doesn't exists\n");
1828                 return -EINVAL;
1829         }
1830         channels = dma_linked_lch[chain_id].linked_dmach_q;
1831
1832         /*
1833          * DMA Errata:
1834          * Special programming model needed to disable DMA before end of block
1835          */
1836         sys_cf = dma_read(OCP_SYSCONFIG, 0);
1837         l = sys_cf;
1838         /* Middle mode reg set no Standby */
1839         l &= ~((1 << 12)|(1 << 13));
1840         dma_write(l, OCP_SYSCONFIG, 0);
1841
1842         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1843
1844                 /* Stop the Channel transmission */
1845                 l = dma_read(CCR, channels[i]);
1846                 l &= ~(1 << 7);
1847                 dma_write(l, CCR, channels[i]);
1848
1849                 /* Disable the link in all the channels */
1850                 disable_lnk(channels[i]);
1851                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1852
1853         }
1854         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1855
1856         /* Reset the Queue pointers */
1857         OMAP_DMA_CHAIN_QINIT(chain_id);
1858
1859         /* Errata - put in the old value */
1860         dma_write(sys_cf, OCP_SYSCONFIG, 0);
1861
1862         return 0;
1863 }
1864 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1865
1866 /* Get the index of the ongoing DMA in chain */
1867 /**
1868  * @brief omap_get_dma_chain_index - Get the element and frame index
1869  * of the ongoing DMA in chain
1870  *
1871  * @param chain_id
1872  * @param ei - Element index
1873  * @param fi - Frame index
1874  *
1875  * @return - Success : 0
1876  *           Failure : -EINVAL
1877  */
1878 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1879 {
1880         int lch;
1881         int *channels;
1882
1883         /* Check for input params */
1884         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1885                 printk(KERN_ERR "Invalid chain id\n");
1886                 return -EINVAL;
1887         }
1888
1889         /* Check if the chain exists */
1890         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1891                 printk(KERN_ERR "Chain doesn't exists\n");
1892                 return -EINVAL;
1893         }
1894         if ((!ei) || (!fi))
1895                 return -EINVAL;
1896
1897         channels = dma_linked_lch[chain_id].linked_dmach_q;
1898
1899         /* Get the current channel */
1900         lch = channels[dma_linked_lch[chain_id].q_head];
1901
1902         *ei = dma_read(CCEN, lch);
1903         *fi = dma_read(CCFN, lch);
1904
1905         return 0;
1906 }
1907 EXPORT_SYMBOL(omap_get_dma_chain_index);
1908
1909 /**
1910  * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1911  * ongoing DMA in chain
1912  *
1913  * @param chain_id
1914  *
1915  * @return - Success : Destination position
1916  *           Failure : -EINVAL
1917  */
1918 int omap_get_dma_chain_dst_pos(int chain_id)
1919 {
1920         int lch;
1921         int *channels;
1922
1923         /* Check for input params */
1924         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1925                 printk(KERN_ERR "Invalid chain id\n");
1926                 return -EINVAL;
1927         }
1928
1929         /* Check if the chain exists */
1930         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1931                 printk(KERN_ERR "Chain doesn't exists\n");
1932                 return -EINVAL;
1933         }
1934
1935         channels = dma_linked_lch[chain_id].linked_dmach_q;
1936
1937         /* Get the current channel */
1938         lch = channels[dma_linked_lch[chain_id].q_head];
1939
1940         return dma_read(CDAC, lch);
1941 }
1942 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1943
1944 /**
1945  * @brief omap_get_dma_chain_src_pos - Get the source position
1946  * of the ongoing DMA in chain
1947  * @param chain_id
1948  *
1949  * @return - Success : Destination position
1950  *           Failure : -EINVAL
1951  */
1952 int omap_get_dma_chain_src_pos(int chain_id)
1953 {
1954         int lch;
1955         int *channels;
1956
1957         /* Check for input params */
1958         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1959                 printk(KERN_ERR "Invalid chain id\n");
1960                 return -EINVAL;
1961         }
1962
1963         /* Check if the chain exists */
1964         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1965                 printk(KERN_ERR "Chain doesn't exists\n");
1966                 return -EINVAL;
1967         }
1968
1969         channels = dma_linked_lch[chain_id].linked_dmach_q;
1970
1971         /* Get the current channel */
1972         lch = channels[dma_linked_lch[chain_id].q_head];
1973
1974         return dma_read(CSAC, lch);
1975 }
1976 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1977 #endif  /* ifndef CONFIG_ARCH_OMAP1 */
1978
1979 /*----------------------------------------------------------------------------*/
1980
1981 #ifdef CONFIG_ARCH_OMAP1
1982
1983 static int omap1_dma_handle_ch(int ch)
1984 {
1985         u32 csr;
1986
1987         if (enable_1510_mode && ch >= 6) {
1988                 csr = dma_chan[ch].saved_csr;
1989                 dma_chan[ch].saved_csr = 0;
1990         } else
1991                 csr = dma_read(CSR, ch);
1992         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1993                 dma_chan[ch + 6].saved_csr = csr >> 7;
1994                 csr &= 0x7f;
1995         }
1996         if ((csr & 0x3f) == 0)
1997                 return 0;
1998         if (unlikely(dma_chan[ch].dev_id == -1)) {
1999                 printk(KERN_WARNING "Spurious interrupt from DMA channel "
2000                        "%d (CSR %04x)\n", ch, csr);
2001                 return 0;
2002         }
2003         if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
2004                 printk(KERN_WARNING "DMA timeout with device %d\n",
2005                        dma_chan[ch].dev_id);
2006         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
2007                 printk(KERN_WARNING "DMA synchronization event drop occurred "
2008                        "with device %d\n", dma_chan[ch].dev_id);
2009         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
2010                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
2011         if (likely(dma_chan[ch].callback != NULL))
2012                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
2013
2014         return 1;
2015 }
2016
2017 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
2018 {
2019         int ch = ((int) dev_id) - 1;
2020         int handled = 0;
2021
2022         for (;;) {
2023                 int handled_now = 0;
2024
2025                 handled_now += omap1_dma_handle_ch(ch);
2026                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
2027                         handled_now += omap1_dma_handle_ch(ch + 6);
2028                 if (!handled_now)
2029                         break;
2030                 handled += handled_now;
2031         }
2032
2033         return handled ? IRQ_HANDLED : IRQ_NONE;
2034 }
2035
2036 #else
2037 #define omap1_dma_irq_handler   NULL
2038 #endif
2039
2040 #ifdef CONFIG_ARCH_OMAP2PLUS
2041
2042 static int omap2_dma_handle_ch(int ch)
2043 {
2044         u32 status = dma_read(CSR, ch);
2045
2046         if (!status) {
2047                 if (printk_ratelimit())
2048                         printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
2049                                 ch);
2050                 dma_write(1 << ch, IRQSTATUS_L0, ch);
2051                 return 0;
2052         }
2053         if (unlikely(dma_chan[ch].dev_id == -1)) {
2054                 if (printk_ratelimit())
2055                         printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
2056                                         "channel %d\n", status, ch);
2057                 return 0;
2058         }
2059         if (unlikely(status & OMAP_DMA_DROP_IRQ))
2060                 printk(KERN_INFO
2061                        "DMA synchronization event drop occurred with device "
2062                        "%d\n", dma_chan[ch].dev_id);
2063         if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
2064                 printk(KERN_INFO "DMA transaction error with device %d\n",
2065                        dma_chan[ch].dev_id);
2066                 if (cpu_class_is_omap2()) {
2067                         /*
2068                          * Errata: sDMA Channel is not disabled
2069                          * after a transaction error. So we explicitely
2070                          * disable the channel
2071                          */
2072                         u32 ccr;
2073
2074                         ccr = dma_read(CCR, ch);
2075                         ccr &= ~OMAP_DMA_CCR_EN;
2076                         dma_write(ccr, CCR, ch);
2077                         dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
2078                 }
2079         }
2080         if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
2081                 printk(KERN_INFO "DMA secure error with device %d\n",
2082                        dma_chan[ch].dev_id);
2083         if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
2084                 printk(KERN_INFO "DMA misaligned error with device %d\n",
2085                        dma_chan[ch].dev_id);
2086
2087         dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
2088         dma_write(1 << ch, IRQSTATUS_L0, ch);
2089         /* read back the register to flush the write */
2090         dma_read(IRQSTATUS_L0, ch);
2091
2092         /* If the ch is not chained then chain_id will be -1 */
2093         if (dma_chan[ch].chain_id != -1) {
2094                 int chain_id = dma_chan[ch].chain_id;
2095                 dma_chan[ch].state = DMA_CH_NOTSTARTED;
2096                 if (dma_read(CLNK_CTRL, ch) & (1 << 15))
2097                         dma_chan[dma_chan[ch].next_linked_ch].state =
2098                                                         DMA_CH_STARTED;
2099                 if (dma_linked_lch[chain_id].chain_mode ==
2100                                                 OMAP_DMA_DYNAMIC_CHAIN)
2101                         disable_lnk(ch);
2102
2103                 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
2104                         OMAP_DMA_CHAIN_INCQHEAD(chain_id);
2105
2106                 status = dma_read(CSR, ch);
2107         }
2108
2109         dma_write(status, CSR, ch);
2110
2111         if (likely(dma_chan[ch].callback != NULL))
2112                 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
2113
2114         return 0;
2115 }
2116
2117 /* STATUS register count is from 1-32 while our is 0-31 */
2118 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
2119 {
2120         u32 val, enable_reg;
2121         int i;
2122
2123         val = dma_read(IRQSTATUS_L0, 0);
2124         if (val == 0) {
2125                 if (printk_ratelimit())
2126                         printk(KERN_WARNING "Spurious DMA IRQ\n");
2127                 return IRQ_HANDLED;
2128         }
2129         enable_reg = dma_read(IRQENABLE_L0, 0);
2130         val &= enable_reg; /* Dispatch only relevant interrupts */
2131         for (i = 0; i < dma_lch_count && val != 0; i++) {
2132                 if (val & 1)
2133                         omap2_dma_handle_ch(i);
2134                 val >>= 1;
2135         }
2136
2137         return IRQ_HANDLED;
2138 }
2139
2140 static struct irqaction omap24xx_dma_irq = {
2141         .name = "DMA",
2142         .handler = omap2_dma_irq_handler,
2143         .flags = IRQF_DISABLED
2144 };
2145
2146 #else
2147 static struct irqaction omap24xx_dma_irq;
2148 #endif
2149
2150 /*----------------------------------------------------------------------------*/
2151
2152 void omap_dma_global_context_save(void)
2153 {
2154         omap_dma_global_context.dma_irqenable_l0 =
2155                 dma_read(IRQENABLE_L0, 0);
2156         omap_dma_global_context.dma_ocp_sysconfig =
2157                 dma_read(OCP_SYSCONFIG, 0);
2158         omap_dma_global_context.dma_gcr = dma_read(GCR, 0);
2159 }
2160
2161 void omap_dma_global_context_restore(void)
2162 {
2163         int ch;
2164
2165         dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
2166         dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2167                 OCP_SYSCONFIG, 0);
2168         dma_write(omap_dma_global_context.dma_irqenable_l0,
2169                 IRQENABLE_L0, 0);
2170
2171         /*
2172          * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
2173          * after secure sram context save and restore. Hence we need to
2174          * manually clear those IRQs to avoid spurious interrupts. This
2175          * affects only secure devices.
2176          */
2177         if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2178                 dma_write(0x3 , IRQSTATUS_L0, 0);
2179
2180         for (ch = 0; ch < dma_chan_count; ch++)
2181                 if (dma_chan[ch].dev_id != -1)
2182                         omap_clear_dma(ch);
2183 }
2184
2185 /*----------------------------------------------------------------------------*/
2186
2187 static int __init omap_init_dma(void)
2188 {
2189         unsigned long base;
2190         int ch, r;
2191
2192         if (cpu_class_is_omap1()) {
2193                 base = OMAP1_DMA_BASE;
2194                 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2195         } else if (cpu_is_omap24xx()) {
2196                 base = OMAP24XX_DMA4_BASE;
2197                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2198         } else if (cpu_is_omap34xx()) {
2199                 base = OMAP34XX_DMA4_BASE;
2200                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2201         } else if (cpu_is_omap44xx()) {
2202                 base = OMAP44XX_DMA4_BASE;
2203                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2204         } else {
2205                 pr_err("DMA init failed for unsupported omap\n");
2206                 return -ENODEV;
2207         }
2208
2209         omap_dma_base = ioremap(base, SZ_4K);
2210         BUG_ON(!omap_dma_base);
2211
2212         if (cpu_class_is_omap1()) {
2213                 dma_stride              = 0x40;
2214                 reg_map                 = reg_map_omap1;
2215                 dma_common_ch_start     = CPC;
2216                 dma_common_ch_end       = COLOR;
2217         } else {
2218                 dma_stride              = 0x60;
2219                 reg_map                 = reg_map_omap2;
2220                 dma_common_ch_start     = CSDP;
2221                 if (cpu_is_omap3630() || cpu_is_omap4430())
2222                         dma_common_ch_end = CCDN;
2223                 else
2224                         dma_common_ch_end = CCFN;
2225         }
2226
2227         if (cpu_class_is_omap2() && omap_dma_reserve_channels
2228                         && (omap_dma_reserve_channels <= dma_lch_count))
2229                 dma_lch_count = omap_dma_reserve_channels;
2230
2231         dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
2232                                 GFP_KERNEL);
2233         if (!dma_chan) {
2234                 r = -ENOMEM;
2235                 goto out_unmap;
2236         }
2237
2238         if (cpu_class_is_omap2()) {
2239                 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2240                                                 dma_lch_count, GFP_KERNEL);
2241                 if (!dma_linked_lch) {
2242                         r = -ENOMEM;
2243                         goto out_free;
2244                 }
2245         }
2246
2247         if (cpu_is_omap15xx()) {
2248                 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2249                 dma_chan_count = 9;
2250                 enable_1510_mode = 1;
2251         } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2252                 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2253                        dma_read(HW_ID, 0));
2254                 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2255                        dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
2256                        dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
2257                        dma_read(CAPS_4, 0));
2258                 if (!enable_1510_mode) {
2259                         u16 w;
2260
2261                         /* Disable OMAP 3.0/3.1 compatibility mode. */
2262                         w = dma_read(GSCR, 0);
2263                         w |= 1 << 3;
2264                         dma_write(w, GSCR, 0);
2265                         dma_chan_count = 16;
2266                 } else
2267                         dma_chan_count = 9;
2268         } else if (cpu_class_is_omap2()) {
2269                 u8 revision = dma_read(REVISION, 0) & 0xff;
2270                 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2271                        revision >> 4, revision & 0xf);
2272                 dma_chan_count = dma_lch_count;
2273         } else {
2274                 dma_chan_count = 0;
2275                 return 0;
2276         }
2277
2278         spin_lock_init(&dma_chan_lock);
2279
2280         for (ch = 0; ch < dma_chan_count; ch++) {
2281                 omap_clear_dma(ch);
2282                 if (cpu_class_is_omap2())
2283                         omap2_disable_irq_lch(ch);
2284
2285                 dma_chan[ch].dev_id = -1;
2286                 dma_chan[ch].next_lch = -1;
2287
2288                 if (ch >= 6 && enable_1510_mode)
2289                         continue;
2290
2291                 if (cpu_class_is_omap1()) {
2292                         /*
2293                          * request_irq() doesn't like dev_id (ie. ch) being
2294                          * zero, so we have to kludge around this.
2295                          */
2296                         r = request_irq(omap1_dma_irq[ch],
2297                                         omap1_dma_irq_handler, 0, "DMA",
2298                                         (void *) (ch + 1));
2299                         if (r != 0) {
2300                                 int i;
2301
2302                                 printk(KERN_ERR "unable to request IRQ %d "
2303                                        "for DMA (error %d)\n",
2304                                        omap1_dma_irq[ch], r);
2305                                 for (i = 0; i < ch; i++)
2306                                         free_irq(omap1_dma_irq[i],
2307                                                  (void *) (i + 1));
2308                                 goto out_free;
2309                         }
2310                 }
2311         }
2312
2313         if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
2314                 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2315                                 DMA_DEFAULT_FIFO_DEPTH, 0);
2316
2317         if (cpu_class_is_omap2()) {
2318                 int irq;
2319                 if (cpu_is_omap44xx())
2320                         irq = OMAP44XX_IRQ_SDMA_0;
2321                 else
2322                         irq = INT_24XX_SDMA_IRQ0;
2323                 setup_irq(irq, &omap24xx_dma_irq);
2324         }
2325
2326         if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
2327                 /* Enable smartidle idlemodes and autoidle */
2328                 u32 v = dma_read(OCP_SYSCONFIG, 0);
2329                 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2330                                 DMA_SYSCONFIG_SIDLEMODE_MASK |
2331                                 DMA_SYSCONFIG_AUTOIDLE);
2332                 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2333                         DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2334                         DMA_SYSCONFIG_AUTOIDLE);
2335                 dma_write(v , OCP_SYSCONFIG, 0);
2336                 /* reserve dma channels 0 and 1 in high security devices */
2337                 if (cpu_is_omap34xx() &&
2338                         (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2339                         printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2340                                         "HS ROM code\n");
2341                         dma_chan[0].dev_id = 0;
2342                         dma_chan[1].dev_id = 1;
2343                 }
2344         }
2345
2346         return 0;
2347
2348 out_free:
2349         kfree(dma_chan);
2350
2351 out_unmap:
2352         iounmap(omap_dma_base);
2353
2354         return r;
2355 }
2356
2357 arch_initcall(omap_init_dma);
2358
2359 /*
2360  * Reserve the omap SDMA channels using cmdline bootarg
2361  * "omap_dma_reserve_ch=". The valid range is 1 to 32
2362  */
2363 static int __init omap_dma_cmdline_reserve_ch(char *str)
2364 {
2365         if (get_option(&str, &omap_dma_reserve_channels) != 1)
2366                 omap_dma_reserve_channels = 0;
2367         return 1;
2368 }
2369
2370 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2371
2372