]> Pileus Git - ~andy/linux/blob - drivers/dma/imx-sdma.c
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
[~andy/linux] / drivers / dma / imx-sdma.c
1 /*
2  * drivers/dma/imx-sdma.c
3  *
4  * This file contains a driver for the Freescale Smart DMA engine
5  *
6  * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
7  *
8  * Based on code from Freescale:
9  *
10  * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
11  *
12  * The code contained herein is licensed under the GNU General Public
13  * License. You may obtain a copy of the GNU General Public License
14  * Version 2 or later at the following locations:
15  *
16  * http://www.opensource.org/licenses/gpl-license.html
17  * http://www.gnu.org/copyleft/gpl.html
18  */
19
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/bitops.h>
24 #include <linux/mm.h>
25 #include <linux/interrupt.h>
26 #include <linux/clk.h>
27 #include <linux/delay.h>
28 #include <linux/sched.h>
29 #include <linux/semaphore.h>
30 #include <linux/spinlock.h>
31 #include <linux/device.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/firmware.h>
34 #include <linux/slab.h>
35 #include <linux/platform_device.h>
36 #include <linux/dmaengine.h>
37 #include <linux/of.h>
38 #include <linux/of_device.h>
39
40 #include <asm/irq.h>
41 #include <mach/sdma.h>
42 #include <mach/dma.h>
43 #include <mach/hardware.h>
44
45 #include "dmaengine.h"
46
47 /* SDMA registers */
48 #define SDMA_H_C0PTR            0x000
49 #define SDMA_H_INTR             0x004
50 #define SDMA_H_STATSTOP         0x008
51 #define SDMA_H_START            0x00c
52 #define SDMA_H_EVTOVR           0x010
53 #define SDMA_H_DSPOVR           0x014
54 #define SDMA_H_HOSTOVR          0x018
55 #define SDMA_H_EVTPEND          0x01c
56 #define SDMA_H_DSPENBL          0x020
57 #define SDMA_H_RESET            0x024
58 #define SDMA_H_EVTERR           0x028
59 #define SDMA_H_INTRMSK          0x02c
60 #define SDMA_H_PSW              0x030
61 #define SDMA_H_EVTERRDBG        0x034
62 #define SDMA_H_CONFIG           0x038
63 #define SDMA_ONCE_ENB           0x040
64 #define SDMA_ONCE_DATA          0x044
65 #define SDMA_ONCE_INSTR         0x048
66 #define SDMA_ONCE_STAT          0x04c
67 #define SDMA_ONCE_CMD           0x050
68 #define SDMA_EVT_MIRROR         0x054
69 #define SDMA_ILLINSTADDR        0x058
70 #define SDMA_CHN0ADDR           0x05c
71 #define SDMA_ONCE_RTB           0x060
72 #define SDMA_XTRIG_CONF1        0x070
73 #define SDMA_XTRIG_CONF2        0x074
74 #define SDMA_CHNENBL0_IMX35     0x200
75 #define SDMA_CHNENBL0_IMX31     0x080
76 #define SDMA_CHNPRI_0           0x100
77
78 /*
79  * Buffer descriptor status values.
80  */
81 #define BD_DONE  0x01
82 #define BD_WRAP  0x02
83 #define BD_CONT  0x04
84 #define BD_INTR  0x08
85 #define BD_RROR  0x10
86 #define BD_LAST  0x20
87 #define BD_EXTD  0x80
88
89 /*
90  * Data Node descriptor status values.
91  */
92 #define DND_END_OF_FRAME  0x80
93 #define DND_END_OF_XFER   0x40
94 #define DND_DONE          0x20
95 #define DND_UNUSED        0x01
96
97 /*
98  * IPCV2 descriptor status values.
99  */
100 #define BD_IPCV2_END_OF_FRAME  0x40
101
102 #define IPCV2_MAX_NODES        50
103 /*
104  * Error bit set in the CCB status field by the SDMA,
105  * in setbd routine, in case of a transfer error
106  */
107 #define DATA_ERROR  0x10000000
108
109 /*
110  * Buffer descriptor commands.
111  */
112 #define C0_ADDR             0x01
113 #define C0_LOAD             0x02
114 #define C0_DUMP             0x03
115 #define C0_SETCTX           0x07
116 #define C0_GETCTX           0x03
117 #define C0_SETDM            0x01
118 #define C0_SETPM            0x04
119 #define C0_GETDM            0x02
120 #define C0_GETPM            0x08
121 /*
122  * Change endianness indicator in the BD command field
123  */
124 #define CHANGE_ENDIANNESS   0x80
125
126 /*
127  * Mode/Count of data node descriptors - IPCv2
128  */
129 struct sdma_mode_count {
130         u32 count   : 16; /* size of the buffer pointed by this BD */
131         u32 status  :  8; /* E,R,I,C,W,D status bits stored here */
132         u32 command :  8; /* command mostlky used for channel 0 */
133 };
134
135 /*
136  * Buffer descriptor
137  */
138 struct sdma_buffer_descriptor {
139         struct sdma_mode_count  mode;
140         u32 buffer_addr;        /* address of the buffer described */
141         u32 ext_buffer_addr;    /* extended buffer address */
142 } __attribute__ ((packed));
143
144 /**
145  * struct sdma_channel_control - Channel control Block
146  *
147  * @current_bd_ptr      current buffer descriptor processed
148  * @base_bd_ptr         first element of buffer descriptor array
149  * @unused              padding. The SDMA engine expects an array of 128 byte
150  *                      control blocks
151  */
152 struct sdma_channel_control {
153         u32 current_bd_ptr;
154         u32 base_bd_ptr;
155         u32 unused[2];
156 } __attribute__ ((packed));
157
158 /**
159  * struct sdma_state_registers - SDMA context for a channel
160  *
161  * @pc:         program counter
162  * @t:          test bit: status of arithmetic & test instruction
163  * @rpc:        return program counter
164  * @sf:         source fault while loading data
165  * @spc:        loop start program counter
166  * @df:         destination fault while storing data
167  * @epc:        loop end program counter
168  * @lm:         loop mode
169  */
170 struct sdma_state_registers {
171         u32 pc     :14;
172         u32 unused1: 1;
173         u32 t      : 1;
174         u32 rpc    :14;
175         u32 unused0: 1;
176         u32 sf     : 1;
177         u32 spc    :14;
178         u32 unused2: 1;
179         u32 df     : 1;
180         u32 epc    :14;
181         u32 lm     : 2;
182 } __attribute__ ((packed));
183
184 /**
185  * struct sdma_context_data - sdma context specific to a channel
186  *
187  * @channel_state:      channel state bits
188  * @gReg:               general registers
189  * @mda:                burst dma destination address register
190  * @msa:                burst dma source address register
191  * @ms:                 burst dma status register
192  * @md:                 burst dma data register
193  * @pda:                peripheral dma destination address register
194  * @psa:                peripheral dma source address register
195  * @ps:                 peripheral dma status register
196  * @pd:                 peripheral dma data register
197  * @ca:                 CRC polynomial register
198  * @cs:                 CRC accumulator register
199  * @dda:                dedicated core destination address register
200  * @dsa:                dedicated core source address register
201  * @ds:                 dedicated core status register
202  * @dd:                 dedicated core data register
203  */
204 struct sdma_context_data {
205         struct sdma_state_registers  channel_state;
206         u32  gReg[8];
207         u32  mda;
208         u32  msa;
209         u32  ms;
210         u32  md;
211         u32  pda;
212         u32  psa;
213         u32  ps;
214         u32  pd;
215         u32  ca;
216         u32  cs;
217         u32  dda;
218         u32  dsa;
219         u32  ds;
220         u32  dd;
221         u32  scratch0;
222         u32  scratch1;
223         u32  scratch2;
224         u32  scratch3;
225         u32  scratch4;
226         u32  scratch5;
227         u32  scratch6;
228         u32  scratch7;
229 } __attribute__ ((packed));
230
231 #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
232
233 struct sdma_engine;
234
235 /**
236  * struct sdma_channel - housekeeping for a SDMA channel
237  *
238  * @sdma                pointer to the SDMA engine for this channel
239  * @channel             the channel number, matches dmaengine chan_id + 1
240  * @direction           transfer type. Needed for setting SDMA script
241  * @peripheral_type     Peripheral type. Needed for setting SDMA script
242  * @event_id0           aka dma request line
243  * @event_id1           for channels that use 2 events
244  * @word_size           peripheral access size
245  * @buf_tail            ID of the buffer that was processed
246  * @done                channel completion
247  * @num_bd              max NUM_BD. number of descriptors currently handling
248  */
249 struct sdma_channel {
250         struct sdma_engine              *sdma;
251         unsigned int                    channel;
252         enum dma_transfer_direction             direction;
253         enum sdma_peripheral_type       peripheral_type;
254         unsigned int                    event_id0;
255         unsigned int                    event_id1;
256         enum dma_slave_buswidth         word_size;
257         unsigned int                    buf_tail;
258         struct completion               done;
259         unsigned int                    num_bd;
260         struct sdma_buffer_descriptor   *bd;
261         dma_addr_t                      bd_phys;
262         unsigned int                    pc_from_device, pc_to_device;
263         unsigned long                   flags;
264         dma_addr_t                      per_address;
265         unsigned long                   event_mask[2];
266         unsigned long                   watermark_level;
267         u32                             shp_addr, per_addr;
268         struct dma_chan                 chan;
269         spinlock_t                      lock;
270         struct dma_async_tx_descriptor  desc;
271         enum dma_status                 status;
272         unsigned int                    chn_count;
273         unsigned int                    chn_real_count;
274         struct tasklet_struct           tasklet;
275 };
276
277 #define IMX_DMA_SG_LOOP         BIT(0)
278
279 #define MAX_DMA_CHANNELS 32
280 #define MXC_SDMA_DEFAULT_PRIORITY 1
281 #define MXC_SDMA_MIN_PRIORITY 1
282 #define MXC_SDMA_MAX_PRIORITY 7
283
284 #define SDMA_FIRMWARE_MAGIC 0x414d4453
285
286 /**
287  * struct sdma_firmware_header - Layout of the firmware image
288  *
289  * @magic               "SDMA"
290  * @version_major       increased whenever layout of struct sdma_script_start_addrs
291  *                      changes.
292  * @version_minor       firmware minor version (for binary compatible changes)
293  * @script_addrs_start  offset of struct sdma_script_start_addrs in this image
294  * @num_script_addrs    Number of script addresses in this image
295  * @ram_code_start      offset of SDMA ram image in this firmware image
296  * @ram_code_size       size of SDMA ram image
297  * @script_addrs        Stores the start address of the SDMA scripts
298  *                      (in SDMA memory space)
299  */
300 struct sdma_firmware_header {
301         u32     magic;
302         u32     version_major;
303         u32     version_minor;
304         u32     script_addrs_start;
305         u32     num_script_addrs;
306         u32     ram_code_start;
307         u32     ram_code_size;
308 };
309
310 enum sdma_devtype {
311         IMX31_SDMA,     /* runs on i.mx31 */
312         IMX35_SDMA,     /* runs on i.mx35 and later */
313 };
314
315 struct sdma_engine {
316         struct device                   *dev;
317         struct device_dma_parameters    dma_parms;
318         struct sdma_channel             channel[MAX_DMA_CHANNELS];
319         struct sdma_channel_control     *channel_control;
320         void __iomem                    *regs;
321         enum sdma_devtype               devtype;
322         unsigned int                    num_events;
323         struct sdma_context_data        *context;
324         dma_addr_t                      context_phys;
325         struct dma_device               dma_device;
326         struct clk                      *clk;
327         spinlock_t                      channel_0_lock;
328         struct sdma_script_start_addrs  *script_addrs;
329 };
330
331 static struct platform_device_id sdma_devtypes[] = {
332         {
333                 .name = "imx31-sdma",
334                 .driver_data = IMX31_SDMA,
335         }, {
336                 .name = "imx35-sdma",
337                 .driver_data = IMX35_SDMA,
338         }, {
339                 /* sentinel */
340         }
341 };
342 MODULE_DEVICE_TABLE(platform, sdma_devtypes);
343
344 static const struct of_device_id sdma_dt_ids[] = {
345         { .compatible = "fsl,imx31-sdma", .data = &sdma_devtypes[IMX31_SDMA], },
346         { .compatible = "fsl,imx35-sdma", .data = &sdma_devtypes[IMX35_SDMA], },
347         { /* sentinel */ }
348 };
349 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
350
351 #define SDMA_H_CONFIG_DSPDMA    BIT(12) /* indicates if the DSPDMA is used */
352 #define SDMA_H_CONFIG_RTD_PINS  BIT(11) /* indicates if Real-Time Debug pins are enabled */
353 #define SDMA_H_CONFIG_ACR       BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
354 #define SDMA_H_CONFIG_CSM       (3)       /* indicates which context switch mode is selected*/
355
356 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
357 {
358         u32 chnenbl0 = (sdma->devtype == IMX31_SDMA ? SDMA_CHNENBL0_IMX31 :
359                                                       SDMA_CHNENBL0_IMX35);
360         return chnenbl0 + event * 4;
361 }
362
363 static int sdma_config_ownership(struct sdma_channel *sdmac,
364                 bool event_override, bool mcu_override, bool dsp_override)
365 {
366         struct sdma_engine *sdma = sdmac->sdma;
367         int channel = sdmac->channel;
368         unsigned long evt, mcu, dsp;
369
370         if (event_override && mcu_override && dsp_override)
371                 return -EINVAL;
372
373         evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
374         mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
375         dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
376
377         if (dsp_override)
378                 __clear_bit(channel, &dsp);
379         else
380                 __set_bit(channel, &dsp);
381
382         if (event_override)
383                 __clear_bit(channel, &evt);
384         else
385                 __set_bit(channel, &evt);
386
387         if (mcu_override)
388                 __clear_bit(channel, &mcu);
389         else
390                 __set_bit(channel, &mcu);
391
392         writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
393         writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
394         writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
395
396         return 0;
397 }
398
399 static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
400 {
401         writel(BIT(channel), sdma->regs + SDMA_H_START);
402 }
403
404 /*
405  * sdma_run_channel0 - run a channel and wait till it's done
406  */
407 static int sdma_run_channel0(struct sdma_engine *sdma)
408 {
409         int ret;
410         unsigned long timeout = 500;
411
412         sdma_enable_channel(sdma, 0);
413
414         while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
415                 if (timeout-- <= 0)
416                         break;
417                 udelay(1);
418         }
419
420         if (ret) {
421                 /* Clear the interrupt status */
422                 writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
423         } else {
424                 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
425         }
426
427         return ret ? 0 : -ETIMEDOUT;
428 }
429
430 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
431                 u32 address)
432 {
433         struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
434         void *buf_virt;
435         dma_addr_t buf_phys;
436         int ret;
437         unsigned long flags;
438
439         buf_virt = dma_alloc_coherent(NULL,
440                         size,
441                         &buf_phys, GFP_KERNEL);
442         if (!buf_virt) {
443                 return -ENOMEM;
444         }
445
446         spin_lock_irqsave(&sdma->channel_0_lock, flags);
447
448         bd0->mode.command = C0_SETPM;
449         bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
450         bd0->mode.count = size / 2;
451         bd0->buffer_addr = buf_phys;
452         bd0->ext_buffer_addr = address;
453
454         memcpy(buf_virt, buf, size);
455
456         ret = sdma_run_channel0(sdma);
457
458         spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
459
460         dma_free_coherent(NULL, size, buf_virt, buf_phys);
461
462         return ret;
463 }
464
465 static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
466 {
467         struct sdma_engine *sdma = sdmac->sdma;
468         int channel = sdmac->channel;
469         unsigned long val;
470         u32 chnenbl = chnenbl_ofs(sdma, event);
471
472         val = readl_relaxed(sdma->regs + chnenbl);
473         __set_bit(channel, &val);
474         writel_relaxed(val, sdma->regs + chnenbl);
475 }
476
477 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
478 {
479         struct sdma_engine *sdma = sdmac->sdma;
480         int channel = sdmac->channel;
481         u32 chnenbl = chnenbl_ofs(sdma, event);
482         unsigned long val;
483
484         val = readl_relaxed(sdma->regs + chnenbl);
485         __clear_bit(channel, &val);
486         writel_relaxed(val, sdma->regs + chnenbl);
487 }
488
489 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
490 {
491         struct sdma_buffer_descriptor *bd;
492
493         /*
494          * loop mode. Iterate over descriptors, re-setup them and
495          * call callback function.
496          */
497         while (1) {
498                 bd = &sdmac->bd[sdmac->buf_tail];
499
500                 if (bd->mode.status & BD_DONE)
501                         break;
502
503                 if (bd->mode.status & BD_RROR)
504                         sdmac->status = DMA_ERROR;
505                 else
506                         sdmac->status = DMA_IN_PROGRESS;
507
508                 bd->mode.status |= BD_DONE;
509                 sdmac->buf_tail++;
510                 sdmac->buf_tail %= sdmac->num_bd;
511
512                 if (sdmac->desc.callback)
513                         sdmac->desc.callback(sdmac->desc.callback_param);
514         }
515 }
516
517 static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
518 {
519         struct sdma_buffer_descriptor *bd;
520         int i, error = 0;
521
522         sdmac->chn_real_count = 0;
523         /*
524          * non loop mode. Iterate over all descriptors, collect
525          * errors and call callback function
526          */
527         for (i = 0; i < sdmac->num_bd; i++) {
528                 bd = &sdmac->bd[i];
529
530                  if (bd->mode.status & (BD_DONE | BD_RROR))
531                         error = -EIO;
532                  sdmac->chn_real_count += bd->mode.count;
533         }
534
535         if (error)
536                 sdmac->status = DMA_ERROR;
537         else
538                 sdmac->status = DMA_SUCCESS;
539
540         dma_cookie_complete(&sdmac->desc);
541         if (sdmac->desc.callback)
542                 sdmac->desc.callback(sdmac->desc.callback_param);
543 }
544
545 static void sdma_tasklet(unsigned long data)
546 {
547         struct sdma_channel *sdmac = (struct sdma_channel *) data;
548
549         complete(&sdmac->done);
550
551         if (sdmac->flags & IMX_DMA_SG_LOOP)
552                 sdma_handle_channel_loop(sdmac);
553         else
554                 mxc_sdma_handle_channel_normal(sdmac);
555 }
556
557 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
558 {
559         struct sdma_engine *sdma = dev_id;
560         unsigned long stat;
561
562         stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
563         /* not interested in channel 0 interrupts */
564         stat &= ~1;
565         writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
566
567         while (stat) {
568                 int channel = fls(stat) - 1;
569                 struct sdma_channel *sdmac = &sdma->channel[channel];
570
571                 tasklet_schedule(&sdmac->tasklet);
572
573                 __clear_bit(channel, &stat);
574         }
575
576         return IRQ_HANDLED;
577 }
578
579 /*
580  * sets the pc of SDMA script according to the peripheral type
581  */
582 static void sdma_get_pc(struct sdma_channel *sdmac,
583                 enum sdma_peripheral_type peripheral_type)
584 {
585         struct sdma_engine *sdma = sdmac->sdma;
586         int per_2_emi = 0, emi_2_per = 0;
587         /*
588          * These are needed once we start to support transfers between
589          * two peripherals or memory-to-memory transfers
590          */
591         int per_2_per = 0, emi_2_emi = 0;
592
593         sdmac->pc_from_device = 0;
594         sdmac->pc_to_device = 0;
595
596         switch (peripheral_type) {
597         case IMX_DMATYPE_MEMORY:
598                 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
599                 break;
600         case IMX_DMATYPE_DSP:
601                 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
602                 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
603                 break;
604         case IMX_DMATYPE_FIRI:
605                 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
606                 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
607                 break;
608         case IMX_DMATYPE_UART:
609                 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
610                 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
611                 break;
612         case IMX_DMATYPE_UART_SP:
613                 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
614                 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
615                 break;
616         case IMX_DMATYPE_ATA:
617                 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
618                 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
619                 break;
620         case IMX_DMATYPE_CSPI:
621         case IMX_DMATYPE_EXT:
622         case IMX_DMATYPE_SSI:
623                 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
624                 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
625                 break;
626         case IMX_DMATYPE_SSI_SP:
627         case IMX_DMATYPE_MMC:
628         case IMX_DMATYPE_SDHC:
629         case IMX_DMATYPE_CSPI_SP:
630         case IMX_DMATYPE_ESAI:
631         case IMX_DMATYPE_MSHC_SP:
632                 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
633                 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
634                 break;
635         case IMX_DMATYPE_ASRC:
636                 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
637                 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
638                 per_2_per = sdma->script_addrs->per_2_per_addr;
639                 break;
640         case IMX_DMATYPE_MSHC:
641                 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
642                 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
643                 break;
644         case IMX_DMATYPE_CCM:
645                 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
646                 break;
647         case IMX_DMATYPE_SPDIF:
648                 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
649                 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
650                 break;
651         case IMX_DMATYPE_IPU_MEMORY:
652                 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
653                 break;
654         default:
655                 break;
656         }
657
658         sdmac->pc_from_device = per_2_emi;
659         sdmac->pc_to_device = emi_2_per;
660 }
661
662 static int sdma_load_context(struct sdma_channel *sdmac)
663 {
664         struct sdma_engine *sdma = sdmac->sdma;
665         int channel = sdmac->channel;
666         int load_address;
667         struct sdma_context_data *context = sdma->context;
668         struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
669         int ret;
670         unsigned long flags;
671
672         if (sdmac->direction == DMA_DEV_TO_MEM) {
673                 load_address = sdmac->pc_from_device;
674         } else {
675                 load_address = sdmac->pc_to_device;
676         }
677
678         if (load_address < 0)
679                 return load_address;
680
681         dev_dbg(sdma->dev, "load_address = %d\n", load_address);
682         dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
683         dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
684         dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
685         dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
686         dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
687
688         spin_lock_irqsave(&sdma->channel_0_lock, flags);
689
690         memset(context, 0, sizeof(*context));
691         context->channel_state.pc = load_address;
692
693         /* Send by context the event mask,base address for peripheral
694          * and watermark level
695          */
696         context->gReg[0] = sdmac->event_mask[1];
697         context->gReg[1] = sdmac->event_mask[0];
698         context->gReg[2] = sdmac->per_addr;
699         context->gReg[6] = sdmac->shp_addr;
700         context->gReg[7] = sdmac->watermark_level;
701
702         bd0->mode.command = C0_SETDM;
703         bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
704         bd0->mode.count = sizeof(*context) / 4;
705         bd0->buffer_addr = sdma->context_phys;
706         bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
707         ret = sdma_run_channel0(sdma);
708
709         spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
710
711         return ret;
712 }
713
714 static void sdma_disable_channel(struct sdma_channel *sdmac)
715 {
716         struct sdma_engine *sdma = sdmac->sdma;
717         int channel = sdmac->channel;
718
719         writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
720         sdmac->status = DMA_ERROR;
721 }
722
723 static int sdma_config_channel(struct sdma_channel *sdmac)
724 {
725         int ret;
726
727         sdma_disable_channel(sdmac);
728
729         sdmac->event_mask[0] = 0;
730         sdmac->event_mask[1] = 0;
731         sdmac->shp_addr = 0;
732         sdmac->per_addr = 0;
733
734         if (sdmac->event_id0) {
735                 if (sdmac->event_id0 >= sdmac->sdma->num_events)
736                         return -EINVAL;
737                 sdma_event_enable(sdmac, sdmac->event_id0);
738         }
739
740         switch (sdmac->peripheral_type) {
741         case IMX_DMATYPE_DSP:
742                 sdma_config_ownership(sdmac, false, true, true);
743                 break;
744         case IMX_DMATYPE_MEMORY:
745                 sdma_config_ownership(sdmac, false, true, false);
746                 break;
747         default:
748                 sdma_config_ownership(sdmac, true, true, false);
749                 break;
750         }
751
752         sdma_get_pc(sdmac, sdmac->peripheral_type);
753
754         if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
755                         (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
756                 /* Handle multiple event channels differently */
757                 if (sdmac->event_id1) {
758                         sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
759                         if (sdmac->event_id1 > 31)
760                                 __set_bit(31, &sdmac->watermark_level);
761                         sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
762                         if (sdmac->event_id0 > 31)
763                                 __set_bit(30, &sdmac->watermark_level);
764                 } else {
765                         __set_bit(sdmac->event_id0, sdmac->event_mask);
766                 }
767                 /* Watermark Level */
768                 sdmac->watermark_level |= sdmac->watermark_level;
769                 /* Address */
770                 sdmac->shp_addr = sdmac->per_address;
771         } else {
772                 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
773         }
774
775         ret = sdma_load_context(sdmac);
776
777         return ret;
778 }
779
780 static int sdma_set_channel_priority(struct sdma_channel *sdmac,
781                 unsigned int priority)
782 {
783         struct sdma_engine *sdma = sdmac->sdma;
784         int channel = sdmac->channel;
785
786         if (priority < MXC_SDMA_MIN_PRIORITY
787             || priority > MXC_SDMA_MAX_PRIORITY) {
788                 return -EINVAL;
789         }
790
791         writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
792
793         return 0;
794 }
795
796 static int sdma_request_channel(struct sdma_channel *sdmac)
797 {
798         struct sdma_engine *sdma = sdmac->sdma;
799         int channel = sdmac->channel;
800         int ret = -EBUSY;
801
802         sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL);
803         if (!sdmac->bd) {
804                 ret = -ENOMEM;
805                 goto out;
806         }
807
808         memset(sdmac->bd, 0, PAGE_SIZE);
809
810         sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
811         sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
812
813         sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
814
815         init_completion(&sdmac->done);
816
817         sdmac->buf_tail = 0;
818
819         return 0;
820 out:
821
822         return ret;
823 }
824
825 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
826 {
827         return container_of(chan, struct sdma_channel, chan);
828 }
829
830 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
831 {
832         unsigned long flags;
833         struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
834         dma_cookie_t cookie;
835
836         spin_lock_irqsave(&sdmac->lock, flags);
837
838         cookie = dma_cookie_assign(tx);
839
840         spin_unlock_irqrestore(&sdmac->lock, flags);
841
842         return cookie;
843 }
844
845 static int sdma_alloc_chan_resources(struct dma_chan *chan)
846 {
847         struct sdma_channel *sdmac = to_sdma_chan(chan);
848         struct imx_dma_data *data = chan->private;
849         int prio, ret;
850
851         if (!data)
852                 return -EINVAL;
853
854         switch (data->priority) {
855         case DMA_PRIO_HIGH:
856                 prio = 3;
857                 break;
858         case DMA_PRIO_MEDIUM:
859                 prio = 2;
860                 break;
861         case DMA_PRIO_LOW:
862         default:
863                 prio = 1;
864                 break;
865         }
866
867         sdmac->peripheral_type = data->peripheral_type;
868         sdmac->event_id0 = data->dma_request;
869
870         clk_enable(sdmac->sdma->clk);
871
872         ret = sdma_request_channel(sdmac);
873         if (ret)
874                 return ret;
875
876         ret = sdma_set_channel_priority(sdmac, prio);
877         if (ret)
878                 return ret;
879
880         dma_async_tx_descriptor_init(&sdmac->desc, chan);
881         sdmac->desc.tx_submit = sdma_tx_submit;
882         /* txd.flags will be overwritten in prep funcs */
883         sdmac->desc.flags = DMA_CTRL_ACK;
884
885         return 0;
886 }
887
888 static void sdma_free_chan_resources(struct dma_chan *chan)
889 {
890         struct sdma_channel *sdmac = to_sdma_chan(chan);
891         struct sdma_engine *sdma = sdmac->sdma;
892
893         sdma_disable_channel(sdmac);
894
895         if (sdmac->event_id0)
896                 sdma_event_disable(sdmac, sdmac->event_id0);
897         if (sdmac->event_id1)
898                 sdma_event_disable(sdmac, sdmac->event_id1);
899
900         sdmac->event_id0 = 0;
901         sdmac->event_id1 = 0;
902
903         sdma_set_channel_priority(sdmac, 0);
904
905         dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
906
907         clk_disable(sdma->clk);
908 }
909
910 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
911                 struct dma_chan *chan, struct scatterlist *sgl,
912                 unsigned int sg_len, enum dma_transfer_direction direction,
913                 unsigned long flags, void *context)
914 {
915         struct sdma_channel *sdmac = to_sdma_chan(chan);
916         struct sdma_engine *sdma = sdmac->sdma;
917         int ret, i, count;
918         int channel = sdmac->channel;
919         struct scatterlist *sg;
920
921         if (sdmac->status == DMA_IN_PROGRESS)
922                 return NULL;
923         sdmac->status = DMA_IN_PROGRESS;
924
925         sdmac->flags = 0;
926
927         dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
928                         sg_len, channel);
929
930         sdmac->direction = direction;
931         ret = sdma_load_context(sdmac);
932         if (ret)
933                 goto err_out;
934
935         if (sg_len > NUM_BD) {
936                 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
937                                 channel, sg_len, NUM_BD);
938                 ret = -EINVAL;
939                 goto err_out;
940         }
941
942         sdmac->chn_count = 0;
943         for_each_sg(sgl, sg, sg_len, i) {
944                 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
945                 int param;
946
947                 bd->buffer_addr = sg->dma_address;
948
949                 count = sg_dma_len(sg);
950
951                 if (count > 0xffff) {
952                         dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
953                                         channel, count, 0xffff);
954                         ret = -EINVAL;
955                         goto err_out;
956                 }
957
958                 bd->mode.count = count;
959                 sdmac->chn_count += count;
960
961                 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
962                         ret =  -EINVAL;
963                         goto err_out;
964                 }
965
966                 switch (sdmac->word_size) {
967                 case DMA_SLAVE_BUSWIDTH_4_BYTES:
968                         bd->mode.command = 0;
969                         if (count & 3 || sg->dma_address & 3)
970                                 return NULL;
971                         break;
972                 case DMA_SLAVE_BUSWIDTH_2_BYTES:
973                         bd->mode.command = 2;
974                         if (count & 1 || sg->dma_address & 1)
975                                 return NULL;
976                         break;
977                 case DMA_SLAVE_BUSWIDTH_1_BYTE:
978                         bd->mode.command = 1;
979                         break;
980                 default:
981                         return NULL;
982                 }
983
984                 param = BD_DONE | BD_EXTD | BD_CONT;
985
986                 if (i + 1 == sg_len) {
987                         param |= BD_INTR;
988                         param |= BD_LAST;
989                         param &= ~BD_CONT;
990                 }
991
992                 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
993                                 i, count, sg->dma_address,
994                                 param & BD_WRAP ? "wrap" : "",
995                                 param & BD_INTR ? " intr" : "");
996
997                 bd->mode.status = param;
998         }
999
1000         sdmac->num_bd = sg_len;
1001         sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1002
1003         return &sdmac->desc;
1004 err_out:
1005         sdmac->status = DMA_ERROR;
1006         return NULL;
1007 }
1008
1009 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1010                 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1011                 size_t period_len, enum dma_transfer_direction direction,
1012                 void *context)
1013 {
1014         struct sdma_channel *sdmac = to_sdma_chan(chan);
1015         struct sdma_engine *sdma = sdmac->sdma;
1016         int num_periods = buf_len / period_len;
1017         int channel = sdmac->channel;
1018         int ret, i = 0, buf = 0;
1019
1020         dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1021
1022         if (sdmac->status == DMA_IN_PROGRESS)
1023                 return NULL;
1024
1025         sdmac->status = DMA_IN_PROGRESS;
1026
1027         sdmac->flags |= IMX_DMA_SG_LOOP;
1028         sdmac->direction = direction;
1029         ret = sdma_load_context(sdmac);
1030         if (ret)
1031                 goto err_out;
1032
1033         if (num_periods > NUM_BD) {
1034                 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1035                                 channel, num_periods, NUM_BD);
1036                 goto err_out;
1037         }
1038
1039         if (period_len > 0xffff) {
1040                 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1041                                 channel, period_len, 0xffff);
1042                 goto err_out;
1043         }
1044
1045         while (buf < buf_len) {
1046                 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1047                 int param;
1048
1049                 bd->buffer_addr = dma_addr;
1050
1051                 bd->mode.count = period_len;
1052
1053                 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1054                         goto err_out;
1055                 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1056                         bd->mode.command = 0;
1057                 else
1058                         bd->mode.command = sdmac->word_size;
1059
1060                 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1061                 if (i + 1 == num_periods)
1062                         param |= BD_WRAP;
1063
1064                 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
1065                                 i, period_len, dma_addr,
1066                                 param & BD_WRAP ? "wrap" : "",
1067                                 param & BD_INTR ? " intr" : "");
1068
1069                 bd->mode.status = param;
1070
1071                 dma_addr += period_len;
1072                 buf += period_len;
1073
1074                 i++;
1075         }
1076
1077         sdmac->num_bd = num_periods;
1078         sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1079
1080         return &sdmac->desc;
1081 err_out:
1082         sdmac->status = DMA_ERROR;
1083         return NULL;
1084 }
1085
1086 static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1087                 unsigned long arg)
1088 {
1089         struct sdma_channel *sdmac = to_sdma_chan(chan);
1090         struct dma_slave_config *dmaengine_cfg = (void *)arg;
1091
1092         switch (cmd) {
1093         case DMA_TERMINATE_ALL:
1094                 sdma_disable_channel(sdmac);
1095                 return 0;
1096         case DMA_SLAVE_CONFIG:
1097                 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1098                         sdmac->per_address = dmaengine_cfg->src_addr;
1099                         sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1100                                                 dmaengine_cfg->src_addr_width;
1101                         sdmac->word_size = dmaengine_cfg->src_addr_width;
1102                 } else {
1103                         sdmac->per_address = dmaengine_cfg->dst_addr;
1104                         sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1105                                                 dmaengine_cfg->dst_addr_width;
1106                         sdmac->word_size = dmaengine_cfg->dst_addr_width;
1107                 }
1108                 sdmac->direction = dmaengine_cfg->direction;
1109                 return sdma_config_channel(sdmac);
1110         default:
1111                 return -ENOSYS;
1112         }
1113
1114         return -EINVAL;
1115 }
1116
1117 static enum dma_status sdma_tx_status(struct dma_chan *chan,
1118                                             dma_cookie_t cookie,
1119                                             struct dma_tx_state *txstate)
1120 {
1121         struct sdma_channel *sdmac = to_sdma_chan(chan);
1122         dma_cookie_t last_used;
1123
1124         last_used = chan->cookie;
1125
1126         dma_set_tx_state(txstate, chan->completed_cookie, last_used,
1127                         sdmac->chn_count - sdmac->chn_real_count);
1128
1129         return sdmac->status;
1130 }
1131
1132 static void sdma_issue_pending(struct dma_chan *chan)
1133 {
1134         struct sdma_channel *sdmac = to_sdma_chan(chan);
1135         struct sdma_engine *sdma = sdmac->sdma;
1136
1137         if (sdmac->status == DMA_IN_PROGRESS)
1138                 sdma_enable_channel(sdma, sdmac->channel);
1139 }
1140
1141 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1142
1143 static void sdma_add_scripts(struct sdma_engine *sdma,
1144                 const struct sdma_script_start_addrs *addr)
1145 {
1146         s32 *addr_arr = (u32 *)addr;
1147         s32 *saddr_arr = (u32 *)sdma->script_addrs;
1148         int i;
1149
1150         for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1151                 if (addr_arr[i] > 0)
1152                         saddr_arr[i] = addr_arr[i];
1153 }
1154
1155 static void sdma_load_firmware(const struct firmware *fw, void *context)
1156 {
1157         struct sdma_engine *sdma = context;
1158         const struct sdma_firmware_header *header;
1159         const struct sdma_script_start_addrs *addr;
1160         unsigned short *ram_code;
1161
1162         if (!fw) {
1163                 dev_err(sdma->dev, "firmware not found\n");
1164                 return;
1165         }
1166
1167         if (fw->size < sizeof(*header))
1168                 goto err_firmware;
1169
1170         header = (struct sdma_firmware_header *)fw->data;
1171
1172         if (header->magic != SDMA_FIRMWARE_MAGIC)
1173                 goto err_firmware;
1174         if (header->ram_code_start + header->ram_code_size > fw->size)
1175                 goto err_firmware;
1176
1177         addr = (void *)header + header->script_addrs_start;
1178         ram_code = (void *)header + header->ram_code_start;
1179
1180         clk_enable(sdma->clk);
1181         /* download the RAM image for SDMA */
1182         sdma_load_script(sdma, ram_code,
1183                         header->ram_code_size,
1184                         addr->ram_code_start_addr);
1185         clk_disable(sdma->clk);
1186
1187         sdma_add_scripts(sdma, addr);
1188
1189         dev_info(sdma->dev, "loaded firmware %d.%d\n",
1190                         header->version_major,
1191                         header->version_minor);
1192
1193 err_firmware:
1194         release_firmware(fw);
1195 }
1196
1197 static int __init sdma_get_firmware(struct sdma_engine *sdma,
1198                 const char *fw_name)
1199 {
1200         int ret;
1201
1202         ret = request_firmware_nowait(THIS_MODULE,
1203                         FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1204                         GFP_KERNEL, sdma, sdma_load_firmware);
1205
1206         return ret;
1207 }
1208
1209 static int __init sdma_init(struct sdma_engine *sdma)
1210 {
1211         int i, ret;
1212         dma_addr_t ccb_phys;
1213
1214         switch (sdma->devtype) {
1215         case IMX31_SDMA:
1216                 sdma->num_events = 32;
1217                 break;
1218         case IMX35_SDMA:
1219                 sdma->num_events = 48;
1220                 break;
1221         default:
1222                 dev_err(sdma->dev, "Unknown sdma type %d. aborting\n",
1223                         sdma->devtype);
1224                 return -ENODEV;
1225         }
1226
1227         clk_enable(sdma->clk);
1228
1229         /* Be sure SDMA has not started yet */
1230         writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1231
1232         sdma->channel_control = dma_alloc_coherent(NULL,
1233                         MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1234                         sizeof(struct sdma_context_data),
1235                         &ccb_phys, GFP_KERNEL);
1236
1237         if (!sdma->channel_control) {
1238                 ret = -ENOMEM;
1239                 goto err_dma_alloc;
1240         }
1241
1242         sdma->context = (void *)sdma->channel_control +
1243                 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1244         sdma->context_phys = ccb_phys +
1245                 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1246
1247         /* Zero-out the CCB structures array just allocated */
1248         memset(sdma->channel_control, 0,
1249                         MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1250
1251         /* disable all channels */
1252         for (i = 0; i < sdma->num_events; i++)
1253                 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1254
1255         /* All channels have priority 0 */
1256         for (i = 0; i < MAX_DMA_CHANNELS; i++)
1257                 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1258
1259         ret = sdma_request_channel(&sdma->channel[0]);
1260         if (ret)
1261                 goto err_dma_alloc;
1262
1263         sdma_config_ownership(&sdma->channel[0], false, true, false);
1264
1265         /* Set Command Channel (Channel Zero) */
1266         writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1267
1268         /* Set bits of CONFIG register but with static context switching */
1269         /* FIXME: Check whether to set ACR bit depending on clock ratios */
1270         writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1271
1272         writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1273
1274         /* Set bits of CONFIG register with given context switching mode */
1275         writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
1276
1277         /* Initializes channel's priorities */
1278         sdma_set_channel_priority(&sdma->channel[0], 7);
1279
1280         clk_disable(sdma->clk);
1281
1282         return 0;
1283
1284 err_dma_alloc:
1285         clk_disable(sdma->clk);
1286         dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1287         return ret;
1288 }
1289
1290 static int __init sdma_probe(struct platform_device *pdev)
1291 {
1292         const struct of_device_id *of_id =
1293                         of_match_device(sdma_dt_ids, &pdev->dev);
1294         struct device_node *np = pdev->dev.of_node;
1295         const char *fw_name;
1296         int ret;
1297         int irq;
1298         struct resource *iores;
1299         struct sdma_platform_data *pdata = pdev->dev.platform_data;
1300         int i;
1301         struct sdma_engine *sdma;
1302         s32 *saddr_arr;
1303
1304         sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
1305         if (!sdma)
1306                 return -ENOMEM;
1307
1308         spin_lock_init(&sdma->channel_0_lock);
1309
1310         sdma->dev = &pdev->dev;
1311
1312         iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1313         irq = platform_get_irq(pdev, 0);
1314         if (!iores || irq < 0) {
1315                 ret = -EINVAL;
1316                 goto err_irq;
1317         }
1318
1319         if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) {
1320                 ret = -EBUSY;
1321                 goto err_request_region;
1322         }
1323
1324         sdma->clk = clk_get(&pdev->dev, NULL);
1325         if (IS_ERR(sdma->clk)) {
1326                 ret = PTR_ERR(sdma->clk);
1327                 goto err_clk;
1328         }
1329
1330         sdma->regs = ioremap(iores->start, resource_size(iores));
1331         if (!sdma->regs) {
1332                 ret = -ENOMEM;
1333                 goto err_ioremap;
1334         }
1335
1336         ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma);
1337         if (ret)
1338                 goto err_request_irq;
1339
1340         sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1341         if (!sdma->script_addrs) {
1342                 ret = -ENOMEM;
1343                 goto err_alloc;
1344         }
1345
1346         /* initially no scripts available */
1347         saddr_arr = (s32 *)sdma->script_addrs;
1348         for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1349                 saddr_arr[i] = -EINVAL;
1350
1351         if (of_id)
1352                 pdev->id_entry = of_id->data;
1353         sdma->devtype = pdev->id_entry->driver_data;
1354
1355         dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1356         dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1357
1358         INIT_LIST_HEAD(&sdma->dma_device.channels);
1359         /* Initialize channel parameters */
1360         for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1361                 struct sdma_channel *sdmac = &sdma->channel[i];
1362
1363                 sdmac->sdma = sdma;
1364                 spin_lock_init(&sdmac->lock);
1365
1366                 sdmac->chan.device = &sdma->dma_device;
1367                 dma_cookie_init(&sdmac->chan);
1368                 sdmac->channel = i;
1369
1370                 tasklet_init(&sdmac->tasklet, sdma_tasklet,
1371                              (unsigned long) sdmac);
1372                 /*
1373                  * Add the channel to the DMAC list. Do not add channel 0 though
1374                  * because we need it internally in the SDMA driver. This also means
1375                  * that channel 0 in dmaengine counting matches sdma channel 1.
1376                  */
1377                 if (i)
1378                         list_add_tail(&sdmac->chan.device_node,
1379                                         &sdma->dma_device.channels);
1380         }
1381
1382         ret = sdma_init(sdma);
1383         if (ret)
1384                 goto err_init;
1385
1386         if (pdata && pdata->script_addrs)
1387                 sdma_add_scripts(sdma, pdata->script_addrs);
1388
1389         if (pdata) {
1390                 ret = sdma_get_firmware(sdma, pdata->fw_name);
1391                 if (ret)
1392                         dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1393         } else {
1394                 /*
1395                  * Because that device tree does not encode ROM script address,
1396                  * the RAM script in firmware is mandatory for device tree
1397                  * probe, otherwise it fails.
1398                  */
1399                 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1400                                               &fw_name);
1401                 if (ret)
1402                         dev_warn(&pdev->dev, "failed to get firmware name\n");
1403                 else {
1404                         ret = sdma_get_firmware(sdma, fw_name);
1405                         if (ret)
1406                                 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
1407                 }
1408         }
1409
1410         sdma->dma_device.dev = &pdev->dev;
1411
1412         sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1413         sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1414         sdma->dma_device.device_tx_status = sdma_tx_status;
1415         sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1416         sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1417         sdma->dma_device.device_control = sdma_control;
1418         sdma->dma_device.device_issue_pending = sdma_issue_pending;
1419         sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1420         dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1421
1422         ret = dma_async_device_register(&sdma->dma_device);
1423         if (ret) {
1424                 dev_err(&pdev->dev, "unable to register\n");
1425                 goto err_init;
1426         }
1427
1428         dev_info(sdma->dev, "initialized\n");
1429
1430         return 0;
1431
1432 err_init:
1433         kfree(sdma->script_addrs);
1434 err_alloc:
1435         free_irq(irq, sdma);
1436 err_request_irq:
1437         iounmap(sdma->regs);
1438 err_ioremap:
1439         clk_put(sdma->clk);
1440 err_clk:
1441         release_mem_region(iores->start, resource_size(iores));
1442 err_request_region:
1443 err_irq:
1444         kfree(sdma);
1445         return ret;
1446 }
1447
1448 static int __exit sdma_remove(struct platform_device *pdev)
1449 {
1450         return -EBUSY;
1451 }
1452
1453 static struct platform_driver sdma_driver = {
1454         .driver         = {
1455                 .name   = "imx-sdma",
1456                 .of_match_table = sdma_dt_ids,
1457         },
1458         .id_table       = sdma_devtypes,
1459         .remove         = __exit_p(sdma_remove),
1460 };
1461
1462 static int __init sdma_module_init(void)
1463 {
1464         return platform_driver_probe(&sdma_driver, sdma_probe);
1465 }
1466 module_init(sdma_module_init);
1467
1468 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1469 MODULE_DESCRIPTION("i.MX SDMA driver");
1470 MODULE_LICENSE("GPL");