]> Pileus Git - ~andy/linux/blob - arch/arm/plat-omap/dma.c
OMAP: DMA: Introduce errata handling feature
[~andy/linux] / arch / arm / plat-omap / dma.c
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Copyright (C) 2009 Texas Instruments
14  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15  *
16  * Support functions for the OMAP internal DMA channels.
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License version 2 as
20  * published by the Free Software Foundation.
21  *
22  */
23
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/irq.h>
31 #include <linux/io.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34
35 #include <asm/system.h>
36 #include <mach/hardware.h>
37 #include <plat/dma.h>
38
39 #include <plat/tc.h>
40
41 #undef DEBUG
42
43 static u16 reg_map_omap1[] = {
44         [GCR]           = 0x400,
45         [GSCR]          = 0x404,
46         [GRST1]         = 0x408,
47         [HW_ID]         = 0x442,
48         [PCH2_ID]       = 0x444,
49         [PCH0_ID]       = 0x446,
50         [PCH1_ID]       = 0x448,
51         [PCHG_ID]       = 0x44a,
52         [PCHD_ID]       = 0x44c,
53         [CAPS_0]        = 0x44e,
54         [CAPS_1]        = 0x452,
55         [CAPS_2]        = 0x456,
56         [CAPS_3]        = 0x458,
57         [CAPS_4]        = 0x45a,
58         [PCH2_SR]       = 0x460,
59         [PCH0_SR]       = 0x480,
60         [PCH1_SR]       = 0x482,
61         [PCHD_SR]       = 0x4c0,
62
63         /* Common Registers */
64         [CSDP]          = 0x00,
65         [CCR]           = 0x02,
66         [CICR]          = 0x04,
67         [CSR]           = 0x06,
68         [CEN]           = 0x10,
69         [CFN]           = 0x12,
70         [CSFI]          = 0x14,
71         [CSEI]          = 0x16,
72         [CPC]           = 0x18, /* 15xx only */
73         [CSAC]          = 0x18,
74         [CDAC]          = 0x1a,
75         [CDEI]          = 0x1c,
76         [CDFI]          = 0x1e,
77         [CLNK_CTRL]     = 0x28,
78
79         /* Channel specific register offsets */
80         [CSSA]          = 0x08,
81         [CDSA]          = 0x0c,
82         [COLOR]         = 0x20,
83         [CCR2]          = 0x24,
84         [LCH_CTRL]      = 0x2a,
85 };
86
87 static u16 reg_map_omap2[] = {
88         [REVISION]              = 0x00,
89         [GCR]                   = 0x78,
90         [IRQSTATUS_L0]          = 0x08,
91         [IRQSTATUS_L1]          = 0x0c,
92         [IRQSTATUS_L2]          = 0x10,
93         [IRQSTATUS_L3]          = 0x14,
94         [IRQENABLE_L0]          = 0x18,
95         [IRQENABLE_L1]          = 0x1c,
96         [IRQENABLE_L2]          = 0x20,
97         [IRQENABLE_L3]          = 0x24,
98         [SYSSTATUS]             = 0x28,
99         [OCP_SYSCONFIG]         = 0x2c,
100         [CAPS_0]                = 0x64,
101         [CAPS_2]                = 0x6c,
102         [CAPS_3]                = 0x70,
103         [CAPS_4]                = 0x74,
104
105         /* Common register offsets */
106         [CCR]                   = 0x80,
107         [CLNK_CTRL]             = 0x84,
108         [CICR]                  = 0x88,
109         [CSR]                   = 0x8c,
110         [CSDP]                  = 0x90,
111         [CEN]                   = 0x94,
112         [CFN]                   = 0x98,
113         [CSEI]                  = 0xa4,
114         [CSFI]                  = 0xa8,
115         [CDEI]                  = 0xac,
116         [CDFI]                  = 0xb0,
117         [CSAC]                  = 0xb4,
118         [CDAC]                  = 0xb8,
119
120         /* Channel specific register offsets */
121         [CSSA]                  = 0x9c,
122         [CDSA]                  = 0xa0,
123         [CCEN]                  = 0xbc,
124         [CCFN]                  = 0xc0,
125         [COLOR]                 = 0xc4,
126
127         /* OMAP4 specific registers */
128         [CDP]                   = 0xd0,
129         [CNDP]                  = 0xd4,
130         [CCDN]                  = 0xd8,
131 };
132
133 #ifndef CONFIG_ARCH_OMAP1
134 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
135         DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
136 };
137
138 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
139 #endif
140
141 #define OMAP_DMA_ACTIVE                 0x01
142 #define OMAP2_DMA_CSR_CLEAR_MASK        0xffe
143
144 #define OMAP_FUNC_MUX_ARM_BASE          (0xfffe1000 + 0xec)
145
146 static int enable_1510_mode;
147 static u32 errata;
148
149 static struct omap_dma_global_context_registers {
150         u32 dma_irqenable_l0;
151         u32 dma_ocp_sysconfig;
152         u32 dma_gcr;
153 } omap_dma_global_context;
154
155 struct omap_dma_lch {
156         int next_lch;
157         int dev_id;
158         u16 saved_csr;
159         u16 enabled_irqs;
160         const char *dev_name;
161         void (*callback)(int lch, u16 ch_status, void *data);
162         void *data;
163
164 #ifndef CONFIG_ARCH_OMAP1
165         /* required for Dynamic chaining */
166         int prev_linked_ch;
167         int next_linked_ch;
168         int state;
169         int chain_id;
170
171         int status;
172 #endif
173         long flags;
174 };
175
176 struct dma_link_info {
177         int *linked_dmach_q;
178         int no_of_lchs_linked;
179
180         int q_count;
181         int q_tail;
182         int q_head;
183
184         int chain_state;
185         int chain_mode;
186
187 };
188
189 static struct dma_link_info *dma_linked_lch;
190
191 #ifndef CONFIG_ARCH_OMAP1
192
193 /* Chain handling macros */
194 #define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
195         do {                                                            \
196                 dma_linked_lch[chain_id].q_head =                       \
197                 dma_linked_lch[chain_id].q_tail =                       \
198                 dma_linked_lch[chain_id].q_count = 0;                   \
199         } while (0)
200 #define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
201                 (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
202                 dma_linked_lch[chain_id].q_count)
203 #define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
204         do {                                                            \
205                 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
206                 dma_linked_lch[chain_id].q_count)                       \
207         } while (0)
208 #define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
209                 (0 == dma_linked_lch[chain_id].q_count)
210 #define __OMAP_DMA_CHAIN_INCQ(end)                                      \
211         ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
212 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
213         do {                                                            \
214                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
215                 dma_linked_lch[chain_id].q_count--;                     \
216         } while (0)
217
218 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
219         do {                                                            \
220                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
221                 dma_linked_lch[chain_id].q_count++; \
222         } while (0)
223 #endif
224
225 static int dma_lch_count;
226 static int dma_chan_count;
227 static int omap_dma_reserve_channels;
228
229 static spinlock_t dma_chan_lock;
230 static struct omap_dma_lch *dma_chan;
231 static void __iomem *omap_dma_base;
232 static u16 *reg_map;
233 static u8 dma_stride;
234 static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end;
235
236 static const u8 omap1_dma_irq[OMAP1_LOGICAL_DMA_CH_COUNT] = {
237         INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
238         INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
239         INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
240         INT_1610_DMA_CH11, INT_1610_DMA_CH12, INT_1610_DMA_CH13,
241         INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
242 };
243
244 static inline void disable_lnk(int lch);
245 static void omap_disable_channel_irq(int lch);
246 static inline void omap_enable_channel_irq(int lch);
247
248 #define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
249                                                 __func__);
250
251 static inline void dma_write(u32 val, int reg, int lch)
252 {
253         u8  stride;
254         u32 offset;
255
256         stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
257         offset = reg_map[reg] + (stride * lch);
258
259         if (dma_stride  == 0x40) {
260                 __raw_writew(val, omap_dma_base + offset);
261                 if ((reg > CLNK_CTRL && reg < CCEN) ||
262                                 (reg > PCHD_ID && reg < CAPS_2)) {
263                         u32 offset2 = reg_map[reg] + 2 + (stride * lch);
264                         __raw_writew(val >> 16, omap_dma_base + offset2);
265                 }
266         } else {
267                 __raw_writel(val, omap_dma_base + offset);
268         }
269 }
270
271 static inline u32 dma_read(int reg, int lch)
272 {
273         u8 stride;
274         u32 offset, val;
275
276         stride = (reg >= dma_common_ch_start) ? dma_stride : 0;
277         offset = reg_map[reg] + (stride * lch);
278
279         if (dma_stride  == 0x40) {
280                 val = __raw_readw(omap_dma_base + offset);
281                 if ((reg > CLNK_CTRL && reg < CCEN) ||
282                                 (reg > PCHD_ID && reg < CAPS_2)) {
283                         u16 upper;
284                         u32 offset2 = reg_map[reg] + 2 + (stride * lch);
285                         upper = __raw_readw(omap_dma_base + offset2);
286                         val |= (upper << 16);
287                 }
288         } else {
289                 val = __raw_readl(omap_dma_base + offset);
290         }
291         return val;
292 }
293
294 #ifdef CONFIG_ARCH_OMAP15XX
295 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
296 static int omap_dma_in_1510_mode(void)
297 {
298         return enable_1510_mode;
299 }
300 #else
301 #define omap_dma_in_1510_mode()         0
302 #endif
303
304 #ifdef CONFIG_ARCH_OMAP1
305 static inline int get_gdma_dev(int req)
306 {
307         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
308         int shift = ((req - 1) % 5) * 6;
309
310         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
311 }
312
313 static inline void set_gdma_dev(int req, int dev)
314 {
315         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
316         int shift = ((req - 1) % 5) * 6;
317         u32 l;
318
319         l = omap_readl(reg);
320         l &= ~(0x3f << shift);
321         l |= (dev - 1) << shift;
322         omap_writel(l, reg);
323 }
324 #else
325 #define set_gdma_dev(req, dev)  do {} while (0)
326 #endif
327
328 /* Omap1 only */
329 static void clear_lch_regs(int lch)
330 {
331         int i = dma_common_ch_start;
332
333         for (; i <= dma_common_ch_end; i += 1)
334                 dma_write(0, i, lch);
335 }
336
337 void omap_set_dma_priority(int lch, int dst_port, int priority)
338 {
339         unsigned long reg;
340         u32 l;
341
342         if (cpu_class_is_omap1()) {
343                 switch (dst_port) {
344                 case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
345                         reg = OMAP_TC_OCPT1_PRIOR;
346                         break;
347                 case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
348                         reg = OMAP_TC_OCPT2_PRIOR;
349                         break;
350                 case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
351                         reg = OMAP_TC_EMIFF_PRIOR;
352                         break;
353                 case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
354                         reg = OMAP_TC_EMIFS_PRIOR;
355                         break;
356                 default:
357                         BUG();
358                         return;
359                 }
360                 l = omap_readl(reg);
361                 l &= ~(0xf << 8);
362                 l |= (priority & 0xf) << 8;
363                 omap_writel(l, reg);
364         }
365
366         if (cpu_class_is_omap2()) {
367                 u32 ccr;
368
369                 ccr = dma_read(CCR, lch);
370                 if (priority)
371                         ccr |= (1 << 6);
372                 else
373                         ccr &= ~(1 << 6);
374                 dma_write(ccr, CCR, lch);
375         }
376 }
377 EXPORT_SYMBOL(omap_set_dma_priority);
378
379 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
380                                   int frame_count, int sync_mode,
381                                   int dma_trigger, int src_or_dst_synch)
382 {
383         u32 l;
384
385         l = dma_read(CSDP, lch);
386         l &= ~0x03;
387         l |= data_type;
388         dma_write(l, CSDP, lch);
389
390         if (cpu_class_is_omap1()) {
391                 u16 ccr;
392
393                 ccr = dma_read(CCR, lch);
394                 ccr &= ~(1 << 5);
395                 if (sync_mode == OMAP_DMA_SYNC_FRAME)
396                         ccr |= 1 << 5;
397                 dma_write(ccr, CCR, lch);
398
399                 ccr = dma_read(CCR2, lch);
400                 ccr &= ~(1 << 2);
401                 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
402                         ccr |= 1 << 2;
403                 dma_write(ccr, CCR2, lch);
404         }
405
406         if (cpu_class_is_omap2() && dma_trigger) {
407                 u32 val;
408
409                 val = dma_read(CCR, lch);
410
411                 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
412                 val &= ~((1 << 23) | (3 << 19) | 0x1f);
413                 val |= (dma_trigger & ~0x1f) << 14;
414                 val |= dma_trigger & 0x1f;
415
416                 if (sync_mode & OMAP_DMA_SYNC_FRAME)
417                         val |= 1 << 5;
418                 else
419                         val &= ~(1 << 5);
420
421                 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
422                         val |= 1 << 18;
423                 else
424                         val &= ~(1 << 18);
425
426                 if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
427                         val &= ~(1 << 24);      /* dest synch */
428                         val |= (1 << 23);       /* Prefetch */
429                 } else if (src_or_dst_synch) {
430                         val |= 1 << 24;         /* source synch */
431                 } else {
432                         val &= ~(1 << 24);      /* dest synch */
433                 }
434                 dma_write(val, CCR, lch);
435         }
436
437         dma_write(elem_count, CEN, lch);
438         dma_write(frame_count, CFN, lch);
439 }
440 EXPORT_SYMBOL(omap_set_dma_transfer_params);
441
442 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
443 {
444         BUG_ON(omap_dma_in_1510_mode());
445
446         if (cpu_class_is_omap1()) {
447                 u16 w;
448
449                 w = dma_read(CCR2, lch);
450                 w &= ~0x03;
451
452                 switch (mode) {
453                 case OMAP_DMA_CONSTANT_FILL:
454                         w |= 0x01;
455                         break;
456                 case OMAP_DMA_TRANSPARENT_COPY:
457                         w |= 0x02;
458                         break;
459                 case OMAP_DMA_COLOR_DIS:
460                         break;
461                 default:
462                         BUG();
463                 }
464                 dma_write(w, CCR2, lch);
465
466                 w = dma_read(LCH_CTRL, lch);
467                 w &= ~0x0f;
468                 /* Default is channel type 2D */
469                 if (mode) {
470                         dma_write(color, COLOR, lch);
471                         w |= 1;         /* Channel type G */
472                 }
473                 dma_write(w, LCH_CTRL, lch);
474         }
475
476         if (cpu_class_is_omap2()) {
477                 u32 val;
478
479                 val = dma_read(CCR, lch);
480                 val &= ~((1 << 17) | (1 << 16));
481
482                 switch (mode) {
483                 case OMAP_DMA_CONSTANT_FILL:
484                         val |= 1 << 16;
485                         break;
486                 case OMAP_DMA_TRANSPARENT_COPY:
487                         val |= 1 << 17;
488                         break;
489                 case OMAP_DMA_COLOR_DIS:
490                         break;
491                 default:
492                         BUG();
493                 }
494                 dma_write(val, CCR, lch);
495
496                 color &= 0xffffff;
497                 dma_write(color, COLOR, lch);
498         }
499 }
500 EXPORT_SYMBOL(omap_set_dma_color_mode);
501
502 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
503 {
504         if (cpu_class_is_omap2()) {
505                 u32 csdp;
506
507                 csdp = dma_read(CSDP, lch);
508                 csdp &= ~(0x3 << 16);
509                 csdp |= (mode << 16);
510                 dma_write(csdp, CSDP, lch);
511         }
512 }
513 EXPORT_SYMBOL(omap_set_dma_write_mode);
514
515 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
516 {
517         if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
518                 u32 l;
519
520                 l = dma_read(LCH_CTRL, lch);
521                 l &= ~0x7;
522                 l |= mode;
523                 dma_write(l, LCH_CTRL, lch);
524         }
525 }
526 EXPORT_SYMBOL(omap_set_dma_channel_mode);
527
528 /* Note that src_port is only for omap1 */
529 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
530                              unsigned long src_start,
531                              int src_ei, int src_fi)
532 {
533         u32 l;
534
535         if (cpu_class_is_omap1()) {
536                 u16 w;
537
538                 w = dma_read(CSDP, lch);
539                 w &= ~(0x1f << 2);
540                 w |= src_port << 2;
541                 dma_write(w, CSDP, lch);
542         }
543
544         l = dma_read(CCR, lch);
545         l &= ~(0x03 << 12);
546         l |= src_amode << 12;
547         dma_write(l, CCR, lch);
548
549         dma_write(src_start, CSSA, lch);
550
551         dma_write(src_ei, CSEI, lch);
552         dma_write(src_fi, CSFI, lch);
553 }
554 EXPORT_SYMBOL(omap_set_dma_src_params);
555
556 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
557 {
558         omap_set_dma_transfer_params(lch, params->data_type,
559                                      params->elem_count, params->frame_count,
560                                      params->sync_mode, params->trigger,
561                                      params->src_or_dst_synch);
562         omap_set_dma_src_params(lch, params->src_port,
563                                 params->src_amode, params->src_start,
564                                 params->src_ei, params->src_fi);
565
566         omap_set_dma_dest_params(lch, params->dst_port,
567                                  params->dst_amode, params->dst_start,
568                                  params->dst_ei, params->dst_fi);
569         if (params->read_prio || params->write_prio)
570                 omap_dma_set_prio_lch(lch, params->read_prio,
571                                       params->write_prio);
572 }
573 EXPORT_SYMBOL(omap_set_dma_params);
574
575 void omap_set_dma_src_index(int lch, int eidx, int fidx)
576 {
577         if (cpu_class_is_omap2())
578                 return;
579
580         dma_write(eidx, CSEI, lch);
581         dma_write(fidx, CSFI, lch);
582 }
583 EXPORT_SYMBOL(omap_set_dma_src_index);
584
585 void omap_set_dma_src_data_pack(int lch, int enable)
586 {
587         u32 l;
588
589         l = dma_read(CSDP, lch);
590         l &= ~(1 << 6);
591         if (enable)
592                 l |= (1 << 6);
593         dma_write(l, CSDP, lch);
594 }
595 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
596
597 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
598 {
599         unsigned int burst = 0;
600         u32 l;
601
602         l = dma_read(CSDP, lch);
603         l &= ~(0x03 << 7);
604
605         switch (burst_mode) {
606         case OMAP_DMA_DATA_BURST_DIS:
607                 break;
608         case OMAP_DMA_DATA_BURST_4:
609                 if (cpu_class_is_omap2())
610                         burst = 0x1;
611                 else
612                         burst = 0x2;
613                 break;
614         case OMAP_DMA_DATA_BURST_8:
615                 if (cpu_class_is_omap2()) {
616                         burst = 0x2;
617                         break;
618                 }
619                 /*
620                  * not supported by current hardware on OMAP1
621                  * w |= (0x03 << 7);
622                  * fall through
623                  */
624         case OMAP_DMA_DATA_BURST_16:
625                 if (cpu_class_is_omap2()) {
626                         burst = 0x3;
627                         break;
628                 }
629                 /*
630                  * OMAP1 don't support burst 16
631                  * fall through
632                  */
633         default:
634                 BUG();
635         }
636
637         l |= (burst << 7);
638         dma_write(l, CSDP, lch);
639 }
640 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
641
642 /* Note that dest_port is only for OMAP1 */
643 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
644                               unsigned long dest_start,
645                               int dst_ei, int dst_fi)
646 {
647         u32 l;
648
649         if (cpu_class_is_omap1()) {
650                 l = dma_read(CSDP, lch);
651                 l &= ~(0x1f << 9);
652                 l |= dest_port << 9;
653                 dma_write(l, CSDP, lch);
654         }
655
656         l = dma_read(CCR, lch);
657         l &= ~(0x03 << 14);
658         l |= dest_amode << 14;
659         dma_write(l, CCR, lch);
660
661         dma_write(dest_start, CDSA, lch);
662
663         dma_write(dst_ei, CDEI, lch);
664         dma_write(dst_fi, CDFI, lch);
665 }
666 EXPORT_SYMBOL(omap_set_dma_dest_params);
667
668 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
669 {
670         if (cpu_class_is_omap2())
671                 return;
672
673         dma_write(eidx, CDEI, lch);
674         dma_write(fidx, CDFI, lch);
675 }
676 EXPORT_SYMBOL(omap_set_dma_dest_index);
677
678 void omap_set_dma_dest_data_pack(int lch, int enable)
679 {
680         u32 l;
681
682         l = dma_read(CSDP, lch);
683         l &= ~(1 << 13);
684         if (enable)
685                 l |= 1 << 13;
686         dma_write(l, CSDP, lch);
687 }
688 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
689
690 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
691 {
692         unsigned int burst = 0;
693         u32 l;
694
695         l = dma_read(CSDP, lch);
696         l &= ~(0x03 << 14);
697
698         switch (burst_mode) {
699         case OMAP_DMA_DATA_BURST_DIS:
700                 break;
701         case OMAP_DMA_DATA_BURST_4:
702                 if (cpu_class_is_omap2())
703                         burst = 0x1;
704                 else
705                         burst = 0x2;
706                 break;
707         case OMAP_DMA_DATA_BURST_8:
708                 if (cpu_class_is_omap2())
709                         burst = 0x2;
710                 else
711                         burst = 0x3;
712                 break;
713         case OMAP_DMA_DATA_BURST_16:
714                 if (cpu_class_is_omap2()) {
715                         burst = 0x3;
716                         break;
717                 }
718                 /*
719                  * OMAP1 don't support burst 16
720                  * fall through
721                  */
722         default:
723                 printk(KERN_ERR "Invalid DMA burst mode\n");
724                 BUG();
725                 return;
726         }
727         l |= (burst << 14);
728         dma_write(l, CSDP, lch);
729 }
730 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
731
732 static inline void omap_enable_channel_irq(int lch)
733 {
734         u32 status;
735
736         /* Clear CSR */
737         if (cpu_class_is_omap1())
738                 status = dma_read(CSR, lch);
739         else if (cpu_class_is_omap2())
740                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
741
742         /* Enable some nice interrupts. */
743         dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
744 }
745
746 static void omap_disable_channel_irq(int lch)
747 {
748         if (cpu_class_is_omap2())
749                 dma_write(0, CICR, lch);
750 }
751
752 void omap_enable_dma_irq(int lch, u16 bits)
753 {
754         dma_chan[lch].enabled_irqs |= bits;
755 }
756 EXPORT_SYMBOL(omap_enable_dma_irq);
757
758 void omap_disable_dma_irq(int lch, u16 bits)
759 {
760         dma_chan[lch].enabled_irqs &= ~bits;
761 }
762 EXPORT_SYMBOL(omap_disable_dma_irq);
763
764 static inline void enable_lnk(int lch)
765 {
766         u32 l;
767
768         l = dma_read(CLNK_CTRL, lch);
769
770         if (cpu_class_is_omap1())
771                 l &= ~(1 << 14);
772
773         /* Set the ENABLE_LNK bits */
774         if (dma_chan[lch].next_lch != -1)
775                 l = dma_chan[lch].next_lch | (1 << 15);
776
777 #ifndef CONFIG_ARCH_OMAP1
778         if (cpu_class_is_omap2())
779                 if (dma_chan[lch].next_linked_ch != -1)
780                         l = dma_chan[lch].next_linked_ch | (1 << 15);
781 #endif
782
783         dma_write(l, CLNK_CTRL, lch);
784 }
785
786 static inline void disable_lnk(int lch)
787 {
788         u32 l;
789
790         l = dma_read(CLNK_CTRL, lch);
791
792         /* Disable interrupts */
793         if (cpu_class_is_omap1()) {
794                 dma_write(0, CICR, lch);
795                 /* Set the STOP_LNK bit */
796                 l |= 1 << 14;
797         }
798
799         if (cpu_class_is_omap2()) {
800                 omap_disable_channel_irq(lch);
801                 /* Clear the ENABLE_LNK bit */
802                 l &= ~(1 << 15);
803         }
804
805         dma_write(l, CLNK_CTRL, lch);
806         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
807 }
808
809 static inline void omap2_enable_irq_lch(int lch)
810 {
811         u32 val;
812         unsigned long flags;
813
814         if (!cpu_class_is_omap2())
815                 return;
816
817         spin_lock_irqsave(&dma_chan_lock, flags);
818         val = dma_read(IRQENABLE_L0, lch);
819         val |= 1 << lch;
820         dma_write(val, IRQENABLE_L0, lch);
821         spin_unlock_irqrestore(&dma_chan_lock, flags);
822 }
823
824 static inline void omap2_disable_irq_lch(int lch)
825 {
826         u32 val;
827         unsigned long flags;
828
829         if (!cpu_class_is_omap2())
830                 return;
831
832         spin_lock_irqsave(&dma_chan_lock, flags);
833         val = dma_read(IRQENABLE_L0, lch);
834         val &= ~(1 << lch);
835         dma_write(val, IRQENABLE_L0, lch);
836         spin_unlock_irqrestore(&dma_chan_lock, flags);
837 }
838
839 int omap_request_dma(int dev_id, const char *dev_name,
840                      void (*callback)(int lch, u16 ch_status, void *data),
841                      void *data, int *dma_ch_out)
842 {
843         int ch, free_ch = -1;
844         unsigned long flags;
845         struct omap_dma_lch *chan;
846
847         spin_lock_irqsave(&dma_chan_lock, flags);
848         for (ch = 0; ch < dma_chan_count; ch++) {
849                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
850                         free_ch = ch;
851                         if (dev_id == 0)
852                                 break;
853                 }
854         }
855         if (free_ch == -1) {
856                 spin_unlock_irqrestore(&dma_chan_lock, flags);
857                 return -EBUSY;
858         }
859         chan = dma_chan + free_ch;
860         chan->dev_id = dev_id;
861
862         if (cpu_class_is_omap1())
863                 clear_lch_regs(free_ch);
864
865         if (cpu_class_is_omap2())
866                 omap_clear_dma(free_ch);
867
868         spin_unlock_irqrestore(&dma_chan_lock, flags);
869
870         chan->dev_name = dev_name;
871         chan->callback = callback;
872         chan->data = data;
873         chan->flags = 0;
874
875 #ifndef CONFIG_ARCH_OMAP1
876         if (cpu_class_is_omap2()) {
877                 chan->chain_id = -1;
878                 chan->next_linked_ch = -1;
879         }
880 #endif
881
882         chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
883
884         if (cpu_class_is_omap1())
885                 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
886         else if (cpu_class_is_omap2())
887                 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
888                         OMAP2_DMA_TRANS_ERR_IRQ;
889
890         if (cpu_is_omap16xx()) {
891                 /* If the sync device is set, configure it dynamically. */
892                 if (dev_id != 0) {
893                         set_gdma_dev(free_ch + 1, dev_id);
894                         dev_id = free_ch + 1;
895                 }
896                 /*
897                  * Disable the 1510 compatibility mode and set the sync device
898                  * id.
899                  */
900                 dma_write(dev_id | (1 << 10), CCR, free_ch);
901         } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
902                 dma_write(dev_id, CCR, free_ch);
903         }
904
905         if (cpu_class_is_omap2()) {
906                 omap2_enable_irq_lch(free_ch);
907                 omap_enable_channel_irq(free_ch);
908                 /* Clear the CSR register and IRQ status register */
909                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
910                 dma_write(1 << free_ch, IRQSTATUS_L0, 0);
911         }
912
913         *dma_ch_out = free_ch;
914
915         return 0;
916 }
917 EXPORT_SYMBOL(omap_request_dma);
918
919 void omap_free_dma(int lch)
920 {
921         unsigned long flags;
922
923         if (dma_chan[lch].dev_id == -1) {
924                 pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
925                        lch);
926                 return;
927         }
928
929         if (cpu_class_is_omap1()) {
930                 /* Disable all DMA interrupts for the channel. */
931                 dma_write(0, CICR, lch);
932                 /* Make sure the DMA transfer is stopped. */
933                 dma_write(0, CCR, lch);
934         }
935
936         if (cpu_class_is_omap2()) {
937                 omap2_disable_irq_lch(lch);
938
939                 /* Clear the CSR register and IRQ status register */
940                 dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
941                 dma_write(1 << lch, IRQSTATUS_L0, lch);
942
943                 /* Disable all DMA interrupts for the channel. */
944                 dma_write(0, CICR, lch);
945
946                 /* Make sure the DMA transfer is stopped. */
947                 dma_write(0, CCR, lch);
948                 omap_clear_dma(lch);
949         }
950
951         spin_lock_irqsave(&dma_chan_lock, flags);
952         dma_chan[lch].dev_id = -1;
953         dma_chan[lch].next_lch = -1;
954         dma_chan[lch].callback = NULL;
955         spin_unlock_irqrestore(&dma_chan_lock, flags);
956 }
957 EXPORT_SYMBOL(omap_free_dma);
958
959 /**
960  * @brief omap_dma_set_global_params : Set global priority settings for dma
961  *
962  * @param arb_rate
963  * @param max_fifo_depth
964  * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
965  *                                                 DMA_THREAD_RESERVE_ONET
966  *                                                 DMA_THREAD_RESERVE_TWOT
967  *                                                 DMA_THREAD_RESERVE_THREET
968  */
969 void
970 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
971 {
972         u32 reg;
973
974         if (!cpu_class_is_omap2()) {
975                 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
976                 return;
977         }
978
979         if (max_fifo_depth == 0)
980                 max_fifo_depth = 1;
981         if (arb_rate == 0)
982                 arb_rate = 1;
983
984         reg = 0xff & max_fifo_depth;
985         reg |= (0x3 & tparams) << 12;
986         reg |= (arb_rate & 0xff) << 16;
987
988         dma_write(reg, GCR, 0);
989 }
990 EXPORT_SYMBOL(omap_dma_set_global_params);
991
992 /**
993  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
994  *
995  * @param lch
996  * @param read_prio - Read priority
997  * @param write_prio - Write priority
998  * Both of the above can be set with one of the following values :
999  *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
1000  */
1001 int
1002 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
1003                       unsigned char write_prio)
1004 {
1005         u32 l;
1006
1007         if (unlikely((lch < 0 || lch >= dma_lch_count))) {
1008                 printk(KERN_ERR "Invalid channel id\n");
1009                 return -EINVAL;
1010         }
1011         l = dma_read(CCR, lch);
1012         l &= ~((1 << 6) | (1 << 26));
1013         if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
1014                 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
1015         else
1016                 l |= ((read_prio & 0x1) << 6);
1017
1018         dma_write(l, CCR, lch);
1019
1020         return 0;
1021 }
1022 EXPORT_SYMBOL(omap_dma_set_prio_lch);
1023
1024 /*
1025  * Clears any DMA state so the DMA engine is ready to restart with new buffers
1026  * through omap_start_dma(). Any buffers in flight are discarded.
1027  */
1028 void omap_clear_dma(int lch)
1029 {
1030         unsigned long flags;
1031
1032         local_irq_save(flags);
1033
1034         if (cpu_class_is_omap1()) {
1035                 u32 l;
1036
1037                 l = dma_read(CCR, lch);
1038                 l &= ~OMAP_DMA_CCR_EN;
1039                 dma_write(l, CCR, lch);
1040
1041                 /* Clear pending interrupts */
1042                 l = dma_read(CSR, lch);
1043         }
1044
1045         if (cpu_class_is_omap2()) {
1046                 int i = dma_common_ch_start;
1047                 for (; i <= dma_common_ch_end; i += 1)
1048                         dma_write(0, i, lch);
1049         }
1050
1051         local_irq_restore(flags);
1052 }
1053 EXPORT_SYMBOL(omap_clear_dma);
1054
1055 void omap_start_dma(int lch)
1056 {
1057         u32 l;
1058
1059         /*
1060          * The CPC/CDAC register needs to be initialized to zero
1061          * before starting dma transfer.
1062          */
1063         if (cpu_is_omap15xx())
1064                 dma_write(0, CPC, lch);
1065         else
1066                 dma_write(0, CDAC, lch);
1067
1068         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1069                 int next_lch, cur_lch;
1070                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1071
1072                 dma_chan_link_map[lch] = 1;
1073                 /* Set the link register of the first channel */
1074                 enable_lnk(lch);
1075
1076                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1077                 cur_lch = dma_chan[lch].next_lch;
1078                 do {
1079                         next_lch = dma_chan[cur_lch].next_lch;
1080
1081                         /* The loop case: we've been here already */
1082                         if (dma_chan_link_map[cur_lch])
1083                                 break;
1084                         /* Mark the current channel */
1085                         dma_chan_link_map[cur_lch] = 1;
1086
1087                         enable_lnk(cur_lch);
1088                         omap_enable_channel_irq(cur_lch);
1089
1090                         cur_lch = next_lch;
1091                 } while (next_lch != -1);
1092         } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
1093                 dma_write(lch, CLNK_CTRL, lch);
1094
1095         omap_enable_channel_irq(lch);
1096
1097         l = dma_read(CCR, lch);
1098
1099         if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
1100                         l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
1101         l |= OMAP_DMA_CCR_EN;
1102
1103         dma_write(l, CCR, lch);
1104
1105         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1106 }
1107 EXPORT_SYMBOL(omap_start_dma);
1108
1109 void omap_stop_dma(int lch)
1110 {
1111         u32 l;
1112
1113         /* Disable all interrupts on the channel */
1114         if (cpu_class_is_omap1())
1115                 dma_write(0, CICR, lch);
1116
1117         l = dma_read(CCR, lch);
1118         if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
1119                         (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
1120                 int i = 0;
1121                 u32 sys_cf;
1122
1123                 /* Configure No-Standby */
1124                 l = dma_read(OCP_SYSCONFIG, lch);
1125                 sys_cf = l;
1126                 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
1127                 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
1128                 dma_write(l , OCP_SYSCONFIG, 0);
1129
1130                 l = dma_read(CCR, lch);
1131                 l &= ~OMAP_DMA_CCR_EN;
1132                 dma_write(l, CCR, lch);
1133
1134                 /* Wait for sDMA FIFO drain */
1135                 l = dma_read(CCR, lch);
1136                 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
1137                                         OMAP_DMA_CCR_WR_ACTIVE))) {
1138                         udelay(5);
1139                         i++;
1140                         l = dma_read(CCR, lch);
1141                 }
1142                 if (i >= 100)
1143                         printk(KERN_ERR "DMA drain did not complete on "
1144                                         "lch %d\n", lch);
1145                 /* Restore OCP_SYSCONFIG */
1146                 dma_write(sys_cf, OCP_SYSCONFIG, lch);
1147         } else {
1148                 l &= ~OMAP_DMA_CCR_EN;
1149                 dma_write(l, CCR, lch);
1150         }
1151
1152         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
1153                 int next_lch, cur_lch = lch;
1154                 char dma_chan_link_map[OMAP_DMA4_LOGICAL_DMA_CH_COUNT];
1155
1156                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
1157                 do {
1158                         /* The loop case: we've been here already */
1159                         if (dma_chan_link_map[cur_lch])
1160                                 break;
1161                         /* Mark the current channel */
1162                         dma_chan_link_map[cur_lch] = 1;
1163
1164                         disable_lnk(cur_lch);
1165
1166                         next_lch = dma_chan[cur_lch].next_lch;
1167                         cur_lch = next_lch;
1168                 } while (next_lch != -1);
1169         }
1170
1171         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1172 }
1173 EXPORT_SYMBOL(omap_stop_dma);
1174
1175 /*
1176  * Allows changing the DMA callback function or data. This may be needed if
1177  * the driver shares a single DMA channel for multiple dma triggers.
1178  */
1179 int omap_set_dma_callback(int lch,
1180                           void (*callback)(int lch, u16 ch_status, void *data),
1181                           void *data)
1182 {
1183         unsigned long flags;
1184
1185         if (lch < 0)
1186                 return -ENODEV;
1187
1188         spin_lock_irqsave(&dma_chan_lock, flags);
1189         if (dma_chan[lch].dev_id == -1) {
1190                 printk(KERN_ERR "DMA callback for not set for free channel\n");
1191                 spin_unlock_irqrestore(&dma_chan_lock, flags);
1192                 return -EINVAL;
1193         }
1194         dma_chan[lch].callback = callback;
1195         dma_chan[lch].data = data;
1196         spin_unlock_irqrestore(&dma_chan_lock, flags);
1197
1198         return 0;
1199 }
1200 EXPORT_SYMBOL(omap_set_dma_callback);
1201
1202 /*
1203  * Returns current physical source address for the given DMA channel.
1204  * If the channel is running the caller must disable interrupts prior calling
1205  * this function and process the returned value before re-enabling interrupt to
1206  * prevent races with the interrupt handler. Note that in continuous mode there
1207  * is a chance for CSSA_L register overflow inbetween the two reads resulting
1208  * in incorrect return value.
1209  */
1210 dma_addr_t omap_get_dma_src_pos(int lch)
1211 {
1212         dma_addr_t offset = 0;
1213
1214         if (cpu_is_omap15xx())
1215                 offset = dma_read(CPC, lch);
1216         else
1217                 offset = dma_read(CSAC, lch);
1218
1219         if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1220                 offset = dma_read(CSAC, lch);
1221
1222         if (cpu_class_is_omap1())
1223                 offset |= (dma_read(CSSA, lch) & 0xFFFF0000);
1224
1225         return offset;
1226 }
1227 EXPORT_SYMBOL(omap_get_dma_src_pos);
1228
1229 /*
1230  * Returns current physical destination address for the given DMA channel.
1231  * If the channel is running the caller must disable interrupts prior calling
1232  * this function and process the returned value before re-enabling interrupt to
1233  * prevent races with the interrupt handler. Note that in continuous mode there
1234  * is a chance for CDSA_L register overflow inbetween the two reads resulting
1235  * in incorrect return value.
1236  */
1237 dma_addr_t omap_get_dma_dst_pos(int lch)
1238 {
1239         dma_addr_t offset = 0;
1240
1241         if (cpu_is_omap15xx())
1242                 offset = dma_read(CPC, lch);
1243         else
1244                 offset = dma_read(CDAC, lch);
1245
1246         /*
1247          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1248          * read before the DMA controller finished disabling the channel.
1249          */
1250         if (!cpu_is_omap15xx() && offset == 0)
1251                 offset = dma_read(CDAC, lch);
1252
1253         if (cpu_class_is_omap1())
1254                 offset |= (dma_read(CDSA, lch) & 0xFFFF0000);
1255
1256         return offset;
1257 }
1258 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1259
1260 int omap_get_dma_active_status(int lch)
1261 {
1262         return (dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1263 }
1264 EXPORT_SYMBOL(omap_get_dma_active_status);
1265
1266 int omap_dma_running(void)
1267 {
1268         int lch;
1269
1270         if (cpu_class_is_omap1())
1271                 if (omap_lcd_dma_running())
1272                         return 1;
1273
1274         for (lch = 0; lch < dma_chan_count; lch++)
1275                 if (dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1276                         return 1;
1277
1278         return 0;
1279 }
1280
1281 /*
1282  * lch_queue DMA will start right after lch_head one is finished.
1283  * For this DMA link to start, you still need to start (see omap_start_dma)
1284  * the first one. That will fire up the entire queue.
1285  */
1286 void omap_dma_link_lch(int lch_head, int lch_queue)
1287 {
1288         if (omap_dma_in_1510_mode()) {
1289                 if (lch_head == lch_queue) {
1290                         dma_write(dma_read(CCR, lch_head) | (3 << 8),
1291                                                                 CCR, lch_head);
1292                         return;
1293                 }
1294                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1295                 BUG();
1296                 return;
1297         }
1298
1299         if ((dma_chan[lch_head].dev_id == -1) ||
1300             (dma_chan[lch_queue].dev_id == -1)) {
1301                 printk(KERN_ERR "omap_dma: trying to link "
1302                        "non requested channels\n");
1303                 dump_stack();
1304         }
1305
1306         dma_chan[lch_head].next_lch = lch_queue;
1307 }
1308 EXPORT_SYMBOL(omap_dma_link_lch);
1309
1310 /*
1311  * Once the DMA queue is stopped, we can destroy it.
1312  */
1313 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1314 {
1315         if (omap_dma_in_1510_mode()) {
1316                 if (lch_head == lch_queue) {
1317                         dma_write(dma_read(CCR, lch_head) & ~(3 << 8),
1318                                                                 CCR, lch_head);
1319                         return;
1320                 }
1321                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1322                 BUG();
1323                 return;
1324         }
1325
1326         if (dma_chan[lch_head].next_lch != lch_queue ||
1327             dma_chan[lch_head].next_lch == -1) {
1328                 printk(KERN_ERR "omap_dma: trying to unlink "
1329                        "non linked channels\n");
1330                 dump_stack();
1331         }
1332
1333         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1334             (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1335                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1336                        "before unlinking\n");
1337                 dump_stack();
1338         }
1339
1340         dma_chan[lch_head].next_lch = -1;
1341 }
1342 EXPORT_SYMBOL(omap_dma_unlink_lch);
1343
1344 /*----------------------------------------------------------------------------*/
1345
1346 #ifndef CONFIG_ARCH_OMAP1
1347 /* Create chain of DMA channesls */
1348 static void create_dma_lch_chain(int lch_head, int lch_queue)
1349 {
1350         u32 l;
1351
1352         /* Check if this is the first link in chain */
1353         if (dma_chan[lch_head].next_linked_ch == -1) {
1354                 dma_chan[lch_head].next_linked_ch = lch_queue;
1355                 dma_chan[lch_head].prev_linked_ch = lch_queue;
1356                 dma_chan[lch_queue].next_linked_ch = lch_head;
1357                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1358         }
1359
1360         /* a link exists, link the new channel in circular chain */
1361         else {
1362                 dma_chan[lch_queue].next_linked_ch =
1363                                         dma_chan[lch_head].next_linked_ch;
1364                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1365                 dma_chan[lch_head].next_linked_ch = lch_queue;
1366                 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1367                                         lch_queue;
1368         }
1369
1370         l = dma_read(CLNK_CTRL, lch_head);
1371         l &= ~(0x1f);
1372         l |= lch_queue;
1373         dma_write(l, CLNK_CTRL, lch_head);
1374
1375         l = dma_read(CLNK_CTRL, lch_queue);
1376         l &= ~(0x1f);
1377         l |= (dma_chan[lch_queue].next_linked_ch);
1378         dma_write(l, CLNK_CTRL, lch_queue);
1379 }
1380
1381 /**
1382  * @brief omap_request_dma_chain : Request a chain of DMA channels
1383  *
1384  * @param dev_id - Device id using the dma channel
1385  * @param dev_name - Device name
1386  * @param callback - Call back function
1387  * @chain_id -
1388  * @no_of_chans - Number of channels requested
1389  * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1390  *                                            OMAP_DMA_DYNAMIC_CHAIN
1391  * @params - Channel parameters
1392  *
1393  * @return - Success : 0
1394  *           Failure: -EINVAL/-ENOMEM
1395  */
1396 int omap_request_dma_chain(int dev_id, const char *dev_name,
1397                            void (*callback) (int lch, u16 ch_status,
1398                                              void *data),
1399                            int *chain_id, int no_of_chans, int chain_mode,
1400                            struct omap_dma_channel_params params)
1401 {
1402         int *channels;
1403         int i, err;
1404
1405         /* Is the chain mode valid ? */
1406         if (chain_mode != OMAP_DMA_STATIC_CHAIN
1407                         && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1408                 printk(KERN_ERR "Invalid chain mode requested\n");
1409                 return -EINVAL;
1410         }
1411
1412         if (unlikely((no_of_chans < 1
1413                         || no_of_chans > dma_lch_count))) {
1414                 printk(KERN_ERR "Invalid Number of channels requested\n");
1415                 return -EINVAL;
1416         }
1417
1418         /*
1419          * Allocate a queue to maintain the status of the channels
1420          * in the chain
1421          */
1422         channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1423         if (channels == NULL) {
1424                 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1425                 return -ENOMEM;
1426         }
1427
1428         /* request and reserve DMA channels for the chain */
1429         for (i = 0; i < no_of_chans; i++) {
1430                 err = omap_request_dma(dev_id, dev_name,
1431                                         callback, NULL, &channels[i]);
1432                 if (err < 0) {
1433                         int j;
1434                         for (j = 0; j < i; j++)
1435                                 omap_free_dma(channels[j]);
1436                         kfree(channels);
1437                         printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1438                         return err;
1439                 }
1440                 dma_chan[channels[i]].prev_linked_ch = -1;
1441                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1442
1443                 /*
1444                  * Allowing client drivers to set common parameters now,
1445                  * so that later only relevant (src_start, dest_start
1446                  * and element count) can be set
1447                  */
1448                 omap_set_dma_params(channels[i], &params);
1449         }
1450
1451         *chain_id = channels[0];
1452         dma_linked_lch[*chain_id].linked_dmach_q = channels;
1453         dma_linked_lch[*chain_id].chain_mode = chain_mode;
1454         dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1455         dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1456
1457         for (i = 0; i < no_of_chans; i++)
1458                 dma_chan[channels[i]].chain_id = *chain_id;
1459
1460         /* Reset the Queue pointers */
1461         OMAP_DMA_CHAIN_QINIT(*chain_id);
1462
1463         /* Set up the chain */
1464         if (no_of_chans == 1)
1465                 create_dma_lch_chain(channels[0], channels[0]);
1466         else {
1467                 for (i = 0; i < (no_of_chans - 1); i++)
1468                         create_dma_lch_chain(channels[i], channels[i + 1]);
1469         }
1470
1471         return 0;
1472 }
1473 EXPORT_SYMBOL(omap_request_dma_chain);
1474
1475 /**
1476  * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1477  * params after setting it. Dont do this while dma is running!!
1478  *
1479  * @param chain_id - Chained logical channel id.
1480  * @param params
1481  *
1482  * @return - Success : 0
1483  *           Failure : -EINVAL
1484  */
1485 int omap_modify_dma_chain_params(int chain_id,
1486                                 struct omap_dma_channel_params params)
1487 {
1488         int *channels;
1489         u32 i;
1490
1491         /* Check for input params */
1492         if (unlikely((chain_id < 0
1493                         || chain_id >= dma_lch_count))) {
1494                 printk(KERN_ERR "Invalid chain id\n");
1495                 return -EINVAL;
1496         }
1497
1498         /* Check if the chain exists */
1499         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1500                 printk(KERN_ERR "Chain doesn't exists\n");
1501                 return -EINVAL;
1502         }
1503         channels = dma_linked_lch[chain_id].linked_dmach_q;
1504
1505         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1506                 /*
1507                  * Allowing client drivers to set common parameters now,
1508                  * so that later only relevant (src_start, dest_start
1509                  * and element count) can be set
1510                  */
1511                 omap_set_dma_params(channels[i], &params);
1512         }
1513
1514         return 0;
1515 }
1516 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1517
1518 /**
1519  * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1520  *
1521  * @param chain_id
1522  *
1523  * @return - Success : 0
1524  *           Failure : -EINVAL
1525  */
1526 int omap_free_dma_chain(int chain_id)
1527 {
1528         int *channels;
1529         u32 i;
1530
1531         /* Check for input params */
1532         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1533                 printk(KERN_ERR "Invalid chain id\n");
1534                 return -EINVAL;
1535         }
1536
1537         /* Check if the chain exists */
1538         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1539                 printk(KERN_ERR "Chain doesn't exists\n");
1540                 return -EINVAL;
1541         }
1542
1543         channels = dma_linked_lch[chain_id].linked_dmach_q;
1544         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1545                 dma_chan[channels[i]].next_linked_ch = -1;
1546                 dma_chan[channels[i]].prev_linked_ch = -1;
1547                 dma_chan[channels[i]].chain_id = -1;
1548                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1549                 omap_free_dma(channels[i]);
1550         }
1551
1552         kfree(channels);
1553
1554         dma_linked_lch[chain_id].linked_dmach_q = NULL;
1555         dma_linked_lch[chain_id].chain_mode = -1;
1556         dma_linked_lch[chain_id].chain_state = -1;
1557
1558         return (0);
1559 }
1560 EXPORT_SYMBOL(omap_free_dma_chain);
1561
1562 /**
1563  * @brief omap_dma_chain_status - Check if the chain is in
1564  * active / inactive state.
1565  * @param chain_id
1566  *
1567  * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1568  *           Failure : -EINVAL
1569  */
1570 int omap_dma_chain_status(int chain_id)
1571 {
1572         /* Check for input params */
1573         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1574                 printk(KERN_ERR "Invalid chain id\n");
1575                 return -EINVAL;
1576         }
1577
1578         /* Check if the chain exists */
1579         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1580                 printk(KERN_ERR "Chain doesn't exists\n");
1581                 return -EINVAL;
1582         }
1583         pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1584                         dma_linked_lch[chain_id].q_count);
1585
1586         if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1587                 return OMAP_DMA_CHAIN_INACTIVE;
1588
1589         return OMAP_DMA_CHAIN_ACTIVE;
1590 }
1591 EXPORT_SYMBOL(omap_dma_chain_status);
1592
1593 /**
1594  * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1595  * set the params and start the transfer.
1596  *
1597  * @param chain_id
1598  * @param src_start - buffer start address
1599  * @param dest_start - Dest address
1600  * @param elem_count
1601  * @param frame_count
1602  * @param callbk_data - channel callback parameter data.
1603  *
1604  * @return  - Success : 0
1605  *            Failure: -EINVAL/-EBUSY
1606  */
1607 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1608                         int elem_count, int frame_count, void *callbk_data)
1609 {
1610         int *channels;
1611         u32 l, lch;
1612         int start_dma = 0;
1613
1614         /*
1615          * if buffer size is less than 1 then there is
1616          * no use of starting the chain
1617          */
1618         if (elem_count < 1) {
1619                 printk(KERN_ERR "Invalid buffer size\n");
1620                 return -EINVAL;
1621         }
1622
1623         /* Check for input params */
1624         if (unlikely((chain_id < 0
1625                         || chain_id >= dma_lch_count))) {
1626                 printk(KERN_ERR "Invalid chain id\n");
1627                 return -EINVAL;
1628         }
1629
1630         /* Check if the chain exists */
1631         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1632                 printk(KERN_ERR "Chain doesn't exist\n");
1633                 return -EINVAL;
1634         }
1635
1636         /* Check if all the channels in chain are in use */
1637         if (OMAP_DMA_CHAIN_QFULL(chain_id))
1638                 return -EBUSY;
1639
1640         /* Frame count may be negative in case of indexed transfers */
1641         channels = dma_linked_lch[chain_id].linked_dmach_q;
1642
1643         /* Get a free channel */
1644         lch = channels[dma_linked_lch[chain_id].q_tail];
1645
1646         /* Store the callback data */
1647         dma_chan[lch].data = callbk_data;
1648
1649         /* Increment the q_tail */
1650         OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1651
1652         /* Set the params to the free channel */
1653         if (src_start != 0)
1654                 dma_write(src_start, CSSA, lch);
1655         if (dest_start != 0)
1656                 dma_write(dest_start, CDSA, lch);
1657
1658         /* Write the buffer size */
1659         dma_write(elem_count, CEN, lch);
1660         dma_write(frame_count, CFN, lch);
1661
1662         /*
1663          * If the chain is dynamically linked,
1664          * then we may have to start the chain if its not active
1665          */
1666         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1667
1668                 /*
1669                  * In Dynamic chain, if the chain is not started,
1670                  * queue the channel
1671                  */
1672                 if (dma_linked_lch[chain_id].chain_state ==
1673                                                 DMA_CHAIN_NOTSTARTED) {
1674                         /* Enable the link in previous channel */
1675                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1676                                                                 DMA_CH_QUEUED)
1677                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1678                         dma_chan[lch].state = DMA_CH_QUEUED;
1679                 }
1680
1681                 /*
1682                  * Chain is already started, make sure its active,
1683                  * if not then start the chain
1684                  */
1685                 else {
1686                         start_dma = 1;
1687
1688                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1689                                                         DMA_CH_STARTED) {
1690                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1691                                 dma_chan[lch].state = DMA_CH_QUEUED;
1692                                 start_dma = 0;
1693                                 if (0 == ((1 << 7) & dma_read(
1694                                         CCR, dma_chan[lch].prev_linked_ch))) {
1695                                         disable_lnk(dma_chan[lch].
1696                                                     prev_linked_ch);
1697                                         pr_debug("\n prev ch is stopped\n");
1698                                         start_dma = 1;
1699                                 }
1700                         }
1701
1702                         else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1703                                                         == DMA_CH_QUEUED) {
1704                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1705                                 dma_chan[lch].state = DMA_CH_QUEUED;
1706                                 start_dma = 0;
1707                         }
1708                         omap_enable_channel_irq(lch);
1709
1710                         l = dma_read(CCR, lch);
1711
1712                         if ((0 == (l & (1 << 24))))
1713                                 l &= ~(1 << 25);
1714                         else
1715                                 l |= (1 << 25);
1716                         if (start_dma == 1) {
1717                                 if (0 == (l & (1 << 7))) {
1718                                         l |= (1 << 7);
1719                                         dma_chan[lch].state = DMA_CH_STARTED;
1720                                         pr_debug("starting %d\n", lch);
1721                                         dma_write(l, CCR, lch);
1722                                 } else
1723                                         start_dma = 0;
1724                         } else {
1725                                 if (0 == (l & (1 << 7)))
1726                                         dma_write(l, CCR, lch);
1727                         }
1728                         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1729                 }
1730         }
1731
1732         return 0;
1733 }
1734 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1735
1736 /**
1737  * @brief omap_start_dma_chain_transfers - Start the chain
1738  *
1739  * @param chain_id
1740  *
1741  * @return - Success : 0
1742  *           Failure : -EINVAL/-EBUSY
1743  */
1744 int omap_start_dma_chain_transfers(int chain_id)
1745 {
1746         int *channels;
1747         u32 l, i;
1748
1749         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1750                 printk(KERN_ERR "Invalid chain id\n");
1751                 return -EINVAL;
1752         }
1753
1754         channels = dma_linked_lch[chain_id].linked_dmach_q;
1755
1756         if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1757                 printk(KERN_ERR "Chain is already started\n");
1758                 return -EBUSY;
1759         }
1760
1761         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1762                 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1763                                                                         i++) {
1764                         enable_lnk(channels[i]);
1765                         omap_enable_channel_irq(channels[i]);
1766                 }
1767         } else {
1768                 omap_enable_channel_irq(channels[0]);
1769         }
1770
1771         l = dma_read(CCR, channels[0]);
1772         l |= (1 << 7);
1773         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1774         dma_chan[channels[0]].state = DMA_CH_STARTED;
1775
1776         if ((0 == (l & (1 << 24))))
1777                 l &= ~(1 << 25);
1778         else
1779                 l |= (1 << 25);
1780         dma_write(l, CCR, channels[0]);
1781
1782         dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1783
1784         return 0;
1785 }
1786 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1787
1788 /**
1789  * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1790  *
1791  * @param chain_id
1792  *
1793  * @return - Success : 0
1794  *           Failure : EINVAL
1795  */
1796 int omap_stop_dma_chain_transfers(int chain_id)
1797 {
1798         int *channels;
1799         u32 l, i;
1800         u32 sys_cf = 0;
1801
1802         /* Check for input params */
1803         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1804                 printk(KERN_ERR "Invalid chain id\n");
1805                 return -EINVAL;
1806         }
1807
1808         /* Check if the chain exists */
1809         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1810                 printk(KERN_ERR "Chain doesn't exists\n");
1811                 return -EINVAL;
1812         }
1813         channels = dma_linked_lch[chain_id].linked_dmach_q;
1814
1815         if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1816                 sys_cf = dma_read(OCP_SYSCONFIG, 0);
1817                 l = sys_cf;
1818                 /* Middle mode reg set no Standby */
1819                 l &= ~((1 << 12)|(1 << 13));
1820                 dma_write(l, OCP_SYSCONFIG, 0);
1821         }
1822
1823         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1824
1825                 /* Stop the Channel transmission */
1826                 l = dma_read(CCR, channels[i]);
1827                 l &= ~(1 << 7);
1828                 dma_write(l, CCR, channels[i]);
1829
1830                 /* Disable the link in all the channels */
1831                 disable_lnk(channels[i]);
1832                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1833
1834         }
1835         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1836
1837         /* Reset the Queue pointers */
1838         OMAP_DMA_CHAIN_QINIT(chain_id);
1839
1840         if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1841                 dma_write(sys_cf, OCP_SYSCONFIG, 0);
1842
1843         return 0;
1844 }
1845 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1846
1847 /* Get the index of the ongoing DMA in chain */
1848 /**
1849  * @brief omap_get_dma_chain_index - Get the element and frame index
1850  * of the ongoing DMA in chain
1851  *
1852  * @param chain_id
1853  * @param ei - Element index
1854  * @param fi - Frame index
1855  *
1856  * @return - Success : 0
1857  *           Failure : -EINVAL
1858  */
1859 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1860 {
1861         int lch;
1862         int *channels;
1863
1864         /* Check for input params */
1865         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1866                 printk(KERN_ERR "Invalid chain id\n");
1867                 return -EINVAL;
1868         }
1869
1870         /* Check if the chain exists */
1871         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1872                 printk(KERN_ERR "Chain doesn't exists\n");
1873                 return -EINVAL;
1874         }
1875         if ((!ei) || (!fi))
1876                 return -EINVAL;
1877
1878         channels = dma_linked_lch[chain_id].linked_dmach_q;
1879
1880         /* Get the current channel */
1881         lch = channels[dma_linked_lch[chain_id].q_head];
1882
1883         *ei = dma_read(CCEN, lch);
1884         *fi = dma_read(CCFN, lch);
1885
1886         return 0;
1887 }
1888 EXPORT_SYMBOL(omap_get_dma_chain_index);
1889
1890 /**
1891  * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1892  * ongoing DMA in chain
1893  *
1894  * @param chain_id
1895  *
1896  * @return - Success : Destination position
1897  *           Failure : -EINVAL
1898  */
1899 int omap_get_dma_chain_dst_pos(int chain_id)
1900 {
1901         int lch;
1902         int *channels;
1903
1904         /* Check for input params */
1905         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1906                 printk(KERN_ERR "Invalid chain id\n");
1907                 return -EINVAL;
1908         }
1909
1910         /* Check if the chain exists */
1911         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1912                 printk(KERN_ERR "Chain doesn't exists\n");
1913                 return -EINVAL;
1914         }
1915
1916         channels = dma_linked_lch[chain_id].linked_dmach_q;
1917
1918         /* Get the current channel */
1919         lch = channels[dma_linked_lch[chain_id].q_head];
1920
1921         return dma_read(CDAC, lch);
1922 }
1923 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1924
1925 /**
1926  * @brief omap_get_dma_chain_src_pos - Get the source position
1927  * of the ongoing DMA in chain
1928  * @param chain_id
1929  *
1930  * @return - Success : Destination position
1931  *           Failure : -EINVAL
1932  */
1933 int omap_get_dma_chain_src_pos(int chain_id)
1934 {
1935         int lch;
1936         int *channels;
1937
1938         /* Check for input params */
1939         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1940                 printk(KERN_ERR "Invalid chain id\n");
1941                 return -EINVAL;
1942         }
1943
1944         /* Check if the chain exists */
1945         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1946                 printk(KERN_ERR "Chain doesn't exists\n");
1947                 return -EINVAL;
1948         }
1949
1950         channels = dma_linked_lch[chain_id].linked_dmach_q;
1951
1952         /* Get the current channel */
1953         lch = channels[dma_linked_lch[chain_id].q_head];
1954
1955         return dma_read(CSAC, lch);
1956 }
1957 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1958 #endif  /* ifndef CONFIG_ARCH_OMAP1 */
1959
1960 /*----------------------------------------------------------------------------*/
1961
1962 #ifdef CONFIG_ARCH_OMAP1
1963
1964 static int omap1_dma_handle_ch(int ch)
1965 {
1966         u32 csr;
1967
1968         if (enable_1510_mode && ch >= 6) {
1969                 csr = dma_chan[ch].saved_csr;
1970                 dma_chan[ch].saved_csr = 0;
1971         } else
1972                 csr = dma_read(CSR, ch);
1973         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1974                 dma_chan[ch + 6].saved_csr = csr >> 7;
1975                 csr &= 0x7f;
1976         }
1977         if ((csr & 0x3f) == 0)
1978                 return 0;
1979         if (unlikely(dma_chan[ch].dev_id == -1)) {
1980                 printk(KERN_WARNING "Spurious interrupt from DMA channel "
1981                        "%d (CSR %04x)\n", ch, csr);
1982                 return 0;
1983         }
1984         if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1985                 printk(KERN_WARNING "DMA timeout with device %d\n",
1986                        dma_chan[ch].dev_id);
1987         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1988                 printk(KERN_WARNING "DMA synchronization event drop occurred "
1989                        "with device %d\n", dma_chan[ch].dev_id);
1990         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1991                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1992         if (likely(dma_chan[ch].callback != NULL))
1993                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1994
1995         return 1;
1996 }
1997
1998 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1999 {
2000         int ch = ((int) dev_id) - 1;
2001         int handled = 0;
2002
2003         for (;;) {
2004                 int handled_now = 0;
2005
2006                 handled_now += omap1_dma_handle_ch(ch);
2007                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
2008                         handled_now += omap1_dma_handle_ch(ch + 6);
2009                 if (!handled_now)
2010                         break;
2011                 handled += handled_now;
2012         }
2013
2014         return handled ? IRQ_HANDLED : IRQ_NONE;
2015 }
2016
2017 #else
2018 #define omap1_dma_irq_handler   NULL
2019 #endif
2020
2021 #ifdef CONFIG_ARCH_OMAP2PLUS
2022
2023 static int omap2_dma_handle_ch(int ch)
2024 {
2025         u32 status = dma_read(CSR, ch);
2026
2027         if (!status) {
2028                 if (printk_ratelimit())
2029                         printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
2030                                 ch);
2031                 dma_write(1 << ch, IRQSTATUS_L0, ch);
2032                 return 0;
2033         }
2034         if (unlikely(dma_chan[ch].dev_id == -1)) {
2035                 if (printk_ratelimit())
2036                         printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
2037                                         "channel %d\n", status, ch);
2038                 return 0;
2039         }
2040         if (unlikely(status & OMAP_DMA_DROP_IRQ))
2041                 printk(KERN_INFO
2042                        "DMA synchronization event drop occurred with device "
2043                        "%d\n", dma_chan[ch].dev_id);
2044         if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
2045                 printk(KERN_INFO "DMA transaction error with device %d\n",
2046                        dma_chan[ch].dev_id);
2047                 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
2048                         u32 ccr;
2049
2050                         ccr = dma_read(CCR, ch);
2051                         ccr &= ~OMAP_DMA_CCR_EN;
2052                         dma_write(ccr, CCR, ch);
2053                         dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
2054                 }
2055         }
2056         if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
2057                 printk(KERN_INFO "DMA secure error with device %d\n",
2058                        dma_chan[ch].dev_id);
2059         if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
2060                 printk(KERN_INFO "DMA misaligned error with device %d\n",
2061                        dma_chan[ch].dev_id);
2062
2063         dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, ch);
2064         dma_write(1 << ch, IRQSTATUS_L0, ch);
2065         /* read back the register to flush the write */
2066         dma_read(IRQSTATUS_L0, ch);
2067
2068         /* If the ch is not chained then chain_id will be -1 */
2069         if (dma_chan[ch].chain_id != -1) {
2070                 int chain_id = dma_chan[ch].chain_id;
2071                 dma_chan[ch].state = DMA_CH_NOTSTARTED;
2072                 if (dma_read(CLNK_CTRL, ch) & (1 << 15))
2073                         dma_chan[dma_chan[ch].next_linked_ch].state =
2074                                                         DMA_CH_STARTED;
2075                 if (dma_linked_lch[chain_id].chain_mode ==
2076                                                 OMAP_DMA_DYNAMIC_CHAIN)
2077                         disable_lnk(ch);
2078
2079                 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
2080                         OMAP_DMA_CHAIN_INCQHEAD(chain_id);
2081
2082                 status = dma_read(CSR, ch);
2083         }
2084
2085         dma_write(status, CSR, ch);
2086
2087         if (likely(dma_chan[ch].callback != NULL))
2088                 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
2089
2090         return 0;
2091 }
2092
2093 /* STATUS register count is from 1-32 while our is 0-31 */
2094 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
2095 {
2096         u32 val, enable_reg;
2097         int i;
2098
2099         val = dma_read(IRQSTATUS_L0, 0);
2100         if (val == 0) {
2101                 if (printk_ratelimit())
2102                         printk(KERN_WARNING "Spurious DMA IRQ\n");
2103                 return IRQ_HANDLED;
2104         }
2105         enable_reg = dma_read(IRQENABLE_L0, 0);
2106         val &= enable_reg; /* Dispatch only relevant interrupts */
2107         for (i = 0; i < dma_lch_count && val != 0; i++) {
2108                 if (val & 1)
2109                         omap2_dma_handle_ch(i);
2110                 val >>= 1;
2111         }
2112
2113         return IRQ_HANDLED;
2114 }
2115
2116 static struct irqaction omap24xx_dma_irq = {
2117         .name = "DMA",
2118         .handler = omap2_dma_irq_handler,
2119         .flags = IRQF_DISABLED
2120 };
2121
2122 #else
2123 static struct irqaction omap24xx_dma_irq;
2124 #endif
2125
2126 /*----------------------------------------------------------------------------*/
2127
2128 void omap_dma_global_context_save(void)
2129 {
2130         omap_dma_global_context.dma_irqenable_l0 =
2131                 dma_read(IRQENABLE_L0, 0);
2132         omap_dma_global_context.dma_ocp_sysconfig =
2133                 dma_read(OCP_SYSCONFIG, 0);
2134         omap_dma_global_context.dma_gcr = dma_read(GCR, 0);
2135 }
2136
2137 void omap_dma_global_context_restore(void)
2138 {
2139         int ch;
2140
2141         dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
2142         dma_write(omap_dma_global_context.dma_ocp_sysconfig,
2143                 OCP_SYSCONFIG, 0);
2144         dma_write(omap_dma_global_context.dma_irqenable_l0,
2145                 IRQENABLE_L0, 0);
2146
2147         if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
2148                 dma_write(0x3 , IRQSTATUS_L0, 0);
2149
2150         for (ch = 0; ch < dma_chan_count; ch++)
2151                 if (dma_chan[ch].dev_id != -1)
2152                         omap_clear_dma(ch);
2153 }
2154
2155 static void configure_dma_errata(void)
2156 {
2157
2158         /*
2159          * Errata applicable for OMAP2430ES1.0 and all omap2420
2160          *
2161          * I.
2162          * Erratum ID: Not Available
2163          * Inter Frame DMA buffering issue DMA will wrongly
2164          * buffer elements if packing and bursting is enabled. This might
2165          * result in data gets stalled in FIFO at the end of the block.
2166          * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
2167          * guarantee no data will stay in the DMA FIFO in case inter frame
2168          * buffering occurs
2169          *
2170          * II.
2171          * Erratum ID: Not Available
2172          * DMA may hang when several channels are used in parallel
2173          * In the following configuration, DMA channel hanging can occur:
2174          * a. Channel i, hardware synchronized, is enabled
2175          * b. Another channel (Channel x), software synchronized, is enabled.
2176          * c. Channel i is disabled before end of transfer
2177          * d. Channel i is reenabled.
2178          * e. Steps 1 to 4 are repeated a certain number of times.
2179          * f. A third channel (Channel y), software synchronized, is enabled.
2180          * Channel x and Channel y may hang immediately after step 'f'.
2181          * Workaround:
2182          * For any channel used - make sure NextLCH_ID is set to the value j.
2183          */
2184         if (cpu_is_omap2420() || (cpu_is_omap2430() &&
2185                                 (omap_type() == OMAP2430_REV_ES1_0))) {
2186                 SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
2187                 SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
2188         }
2189
2190         /*
2191          * Erratum ID: i378: OMAP2plus: sDMA Channel is not disabled
2192          * after a transaction error.
2193          * Workaround: SW should explicitely disable the channel.
2194          */
2195         if (cpu_class_is_omap2())
2196                 SET_DMA_ERRATA(DMA_ERRATA_i378);
2197
2198         /*
2199          * Erratum ID: i541: sDMA FIFO draining does not finish
2200          * If sDMA channel is disabled on the fly, sDMA enters standby even
2201          * through FIFO Drain is still in progress
2202          * Workaround: Put sDMA in NoStandby more before a logical channel is
2203          * disabled, then put it back to SmartStandby right after the channel
2204          * finishes FIFO draining.
2205          */
2206         if (cpu_is_omap34xx())
2207                 SET_DMA_ERRATA(DMA_ERRATA_i541);
2208
2209         /*
2210          * Erratum ID: i88 : Special programming model needed to disable DMA
2211          * before end of block.
2212          * Workaround: software must ensure that the DMA is configured in No
2213          * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
2214          */
2215         if (cpu_is_omap34xx() && (omap_type() == OMAP3430_REV_ES1_0))
2216                 SET_DMA_ERRATA(DMA_ERRATA_i88);
2217
2218         /*
2219          * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
2220          * read before the DMA controller finished disabling the channel.
2221          */
2222         if (!cpu_is_omap15xx())
2223                 SET_DMA_ERRATA(DMA_ERRATA_3_3);
2224
2225         /*
2226          * Erratum ID: Not Available
2227          * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
2228          * after secure sram context save and restore.
2229          * Work around: Hence we need to manually clear those IRQs to avoid
2230          * spurious interrupts. This affects only secure devices.
2231          */
2232         if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
2233                 SET_DMA_ERRATA(DMA_ROMCODE_BUG);
2234 }
2235
2236 /*----------------------------------------------------------------------------*/
2237
2238 static int __init omap_init_dma(void)
2239 {
2240         unsigned long base;
2241         int ch, r;
2242
2243         if (cpu_class_is_omap1()) {
2244                 base = OMAP1_DMA_BASE;
2245                 dma_lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
2246         } else if (cpu_is_omap24xx()) {
2247                 base = OMAP24XX_DMA4_BASE;
2248                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2249         } else if (cpu_is_omap34xx()) {
2250                 base = OMAP34XX_DMA4_BASE;
2251                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2252         } else if (cpu_is_omap44xx()) {
2253                 base = OMAP44XX_DMA4_BASE;
2254                 dma_lch_count = OMAP_DMA4_LOGICAL_DMA_CH_COUNT;
2255         } else {
2256                 pr_err("DMA init failed for unsupported omap\n");
2257                 return -ENODEV;
2258         }
2259
2260         omap_dma_base = ioremap(base, SZ_4K);
2261         BUG_ON(!omap_dma_base);
2262
2263         if (cpu_class_is_omap1()) {
2264                 dma_stride              = 0x40;
2265                 reg_map                 = reg_map_omap1;
2266                 dma_common_ch_start     = CPC;
2267                 dma_common_ch_end       = COLOR;
2268         } else {
2269                 dma_stride              = 0x60;
2270                 reg_map                 = reg_map_omap2;
2271                 dma_common_ch_start     = CSDP;
2272                 if (cpu_is_omap3630() || cpu_is_omap4430())
2273                         dma_common_ch_end = CCDN;
2274                 else
2275                         dma_common_ch_end = CCFN;
2276         }
2277
2278         if (cpu_class_is_omap2() && omap_dma_reserve_channels
2279                         && (omap_dma_reserve_channels <= dma_lch_count))
2280                 dma_lch_count = omap_dma_reserve_channels;
2281
2282         dma_chan = kzalloc(sizeof(struct omap_dma_lch) * dma_lch_count,
2283                                 GFP_KERNEL);
2284         if (!dma_chan) {
2285                 r = -ENOMEM;
2286                 goto out_unmap;
2287         }
2288
2289         if (cpu_class_is_omap2()) {
2290                 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2291                                                 dma_lch_count, GFP_KERNEL);
2292                 if (!dma_linked_lch) {
2293                         r = -ENOMEM;
2294                         goto out_free;
2295                 }
2296         }
2297
2298         if (cpu_is_omap15xx()) {
2299                 printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
2300                 dma_chan_count = 9;
2301                 enable_1510_mode = 1;
2302         } else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
2303                 printk(KERN_INFO "OMAP DMA hardware version %d\n",
2304                        dma_read(HW_ID, 0));
2305                 printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
2306                        dma_read(CAPS_0, 0), dma_read(CAPS_1, 0),
2307                        dma_read(CAPS_2, 0), dma_read(CAPS_3, 0),
2308                        dma_read(CAPS_4, 0));
2309                 if (!enable_1510_mode) {
2310                         u16 w;
2311
2312                         /* Disable OMAP 3.0/3.1 compatibility mode. */
2313                         w = dma_read(GSCR, 0);
2314                         w |= 1 << 3;
2315                         dma_write(w, GSCR, 0);
2316                         dma_chan_count = 16;
2317                 } else
2318                         dma_chan_count = 9;
2319         } else if (cpu_class_is_omap2()) {
2320                 u8 revision = dma_read(REVISION, 0) & 0xff;
2321                 printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
2322                        revision >> 4, revision & 0xf);
2323                 dma_chan_count = dma_lch_count;
2324         } else {
2325                 dma_chan_count = 0;
2326                 return 0;
2327         }
2328
2329         spin_lock_init(&dma_chan_lock);
2330
2331         for (ch = 0; ch < dma_chan_count; ch++) {
2332                 omap_clear_dma(ch);
2333                 if (cpu_class_is_omap2())
2334                         omap2_disable_irq_lch(ch);
2335
2336                 dma_chan[ch].dev_id = -1;
2337                 dma_chan[ch].next_lch = -1;
2338
2339                 if (ch >= 6 && enable_1510_mode)
2340                         continue;
2341
2342                 if (cpu_class_is_omap1()) {
2343                         /*
2344                          * request_irq() doesn't like dev_id (ie. ch) being
2345                          * zero, so we have to kludge around this.
2346                          */
2347                         r = request_irq(omap1_dma_irq[ch],
2348                                         omap1_dma_irq_handler, 0, "DMA",
2349                                         (void *) (ch + 1));
2350                         if (r != 0) {
2351                                 int i;
2352
2353                                 printk(KERN_ERR "unable to request IRQ %d "
2354                                        "for DMA (error %d)\n",
2355                                        omap1_dma_irq[ch], r);
2356                                 for (i = 0; i < ch; i++)
2357                                         free_irq(omap1_dma_irq[i],
2358                                                  (void *) (i + 1));
2359                                 goto out_free;
2360                         }
2361                 }
2362         }
2363
2364         if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx())
2365                 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2366                                 DMA_DEFAULT_FIFO_DEPTH, 0);
2367
2368         if (cpu_class_is_omap2()) {
2369                 int irq;
2370                 if (cpu_is_omap44xx())
2371                         irq = OMAP44XX_IRQ_SDMA_0;
2372                 else
2373                         irq = INT_24XX_SDMA_IRQ0;
2374                 setup_irq(irq, &omap24xx_dma_irq);
2375         }
2376
2377         if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
2378                 /* Enable smartidle idlemodes and autoidle */
2379                 u32 v = dma_read(OCP_SYSCONFIG, 0);
2380                 v &= ~(DMA_SYSCONFIG_MIDLEMODE_MASK |
2381                                 DMA_SYSCONFIG_SIDLEMODE_MASK |
2382                                 DMA_SYSCONFIG_AUTOIDLE);
2383                 v |= (DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2384                         DMA_SYSCONFIG_SIDLEMODE(DMA_IDLEMODE_SMARTIDLE) |
2385                         DMA_SYSCONFIG_AUTOIDLE);
2386                 dma_write(v , OCP_SYSCONFIG, 0);
2387                 /* reserve dma channels 0 and 1 in high security devices */
2388                 if (cpu_is_omap34xx() &&
2389                         (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2390                         printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2391                                         "HS ROM code\n");
2392                         dma_chan[0].dev_id = 0;
2393                         dma_chan[1].dev_id = 1;
2394                 }
2395         }
2396         configure_dma_errata();
2397
2398         return 0;
2399
2400 out_free:
2401         kfree(dma_chan);
2402
2403 out_unmap:
2404         iounmap(omap_dma_base);
2405
2406         return r;
2407 }
2408
2409 arch_initcall(omap_init_dma);
2410
2411 /*
2412  * Reserve the omap SDMA channels using cmdline bootarg
2413  * "omap_dma_reserve_ch=". The valid range is 1 to 32
2414  */
2415 static int __init omap_dma_cmdline_reserve_ch(char *str)
2416 {
2417         if (get_option(&str, &omap_dma_reserve_channels) != 1)
2418                 omap_dma_reserve_channels = 0;
2419         return 1;
2420 }
2421
2422 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2423
2424