]> Pileus Git - ~andy/linux/blobdiff - drivers/dma/imx-sdma.c
ASoC: Add support for SPEAr ASoC pcm layer.
[~andy/linux] / drivers / dma / imx-sdma.c
index 63540d3e21534ae8cfe596c1d03a4ce850bcc200..fb4f4990f5ebf9f6c8e1f97c9704f8b03642bd18 100644 (file)
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/types.h>
+#include <linux/bitops.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
-#include <linux/wait.h>
+#include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/spinlock.h>
@@ -41,6 +42,8 @@
 #include <mach/dma.h>
 #include <mach/hardware.h>
 
+#include "dmaengine.h"
+
 /* SDMA registers */
 #define SDMA_H_C0PTR           0x000
 #define SDMA_H_INTR            0x004
@@ -259,19 +262,19 @@ struct sdma_channel {
        unsigned int                    pc_from_device, pc_to_device;
        unsigned long                   flags;
        dma_addr_t                      per_address;
-       u32                             event_mask0, event_mask1;
-       u32                             watermark_level;
+       unsigned long                   event_mask[2];
+       unsigned long                   watermark_level;
        u32                             shp_addr, per_addr;
        struct dma_chan                 chan;
        spinlock_t                      lock;
        struct dma_async_tx_descriptor  desc;
-       dma_cookie_t                    last_completed;
        enum dma_status                 status;
        unsigned int                    chn_count;
        unsigned int                    chn_real_count;
+       struct tasklet_struct           tasklet;
 };
 
-#define IMX_DMA_SG_LOOP                (1 << 0)
+#define IMX_DMA_SG_LOOP                BIT(0)
 
 #define MAX_DMA_CHANNELS 32
 #define MXC_SDMA_DEFAULT_PRIORITY 1
@@ -320,8 +323,9 @@ struct sdma_engine {
        struct sdma_context_data        *context;
        dma_addr_t                      context_phys;
        struct dma_device               dma_device;
-       struct clk                      *clk;
-       struct mutex                    channel_0_lock;
+       struct clk                      *clk_ipg;
+       struct clk                      *clk_ahb;
+       spinlock_t                      channel_0_lock;
        struct sdma_script_start_addrs  *script_addrs;
 };
 
@@ -345,9 +349,9 @@ static const struct of_device_id sdma_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, sdma_dt_ids);
 
-#define SDMA_H_CONFIG_DSPDMA   (1 << 12) /* indicates if the DSPDMA is used */
-#define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */
-#define SDMA_H_CONFIG_ACR      (1 << 4)  /* indicates if AHB freq /core freq = 2 or 1 */
+#define SDMA_H_CONFIG_DSPDMA   BIT(12) /* indicates if the DSPDMA is used */
+#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
+#define SDMA_H_CONFIG_ACR      BIT(4)  /* indicates if AHB freq /core freq = 2 or 1 */
 #define SDMA_H_CONFIG_CSM      (3)       /* indicates which context switch mode is selected*/
 
 static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
@@ -362,51 +366,64 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
 {
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
-       u32 evt, mcu, dsp;
+       unsigned long evt, mcu, dsp;
 
        if (event_override && mcu_override && dsp_override)
                return -EINVAL;
 
-       evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR);
-       mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR);
-       dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR);
+       evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
+       mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
+       dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
 
        if (dsp_override)
-               dsp &= ~(1 << channel);
+               __clear_bit(channel, &dsp);
        else
-               dsp |= (1 << channel);
+               __set_bit(channel, &dsp);
 
        if (event_override)
-               evt &= ~(1 << channel);
+               __clear_bit(channel, &evt);
        else
-               evt |= (1 << channel);
+               __set_bit(channel, &evt);
 
        if (mcu_override)
-               mcu &= ~(1 << channel);
+               __clear_bit(channel, &mcu);
        else
-               mcu |= (1 << channel);
+               __set_bit(channel, &mcu);
 
-       __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR);
-       __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR);
-       __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR);
+       writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
+       writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
+       writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
 
        return 0;
 }
 
+static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
+{
+       writel(BIT(channel), sdma->regs + SDMA_H_START);
+}
+
 /*
- * sdma_run_channel - run a channel and wait till it's done
+ * sdma_run_channel0 - run a channel and wait till it's done
  */
-static int sdma_run_channel(struct sdma_channel *sdmac)
+static int sdma_run_channel0(struct sdma_engine *sdma)
 {
-       struct sdma_engine *sdma = sdmac->sdma;
-       int channel = sdmac->channel;
        int ret;
+       unsigned long timeout = 500;
 
-       init_completion(&sdmac->done);
+       sdma_enable_channel(sdma, 0);
 
-       __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
+       while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
+               if (timeout-- <= 0)
+                       break;
+               udelay(1);
+       }
 
-       ret = wait_for_completion_timeout(&sdmac->done, HZ);
+       if (ret) {
+               /* Clear the interrupt status */
+               writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
+       } else {
+               dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
+       }
 
        return ret ? 0 : -ETIMEDOUT;
 }
@@ -418,17 +435,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
        void *buf_virt;
        dma_addr_t buf_phys;
        int ret;
-
-       mutex_lock(&sdma->channel_0_lock);
+       unsigned long flags;
 
        buf_virt = dma_alloc_coherent(NULL,
                        size,
                        &buf_phys, GFP_KERNEL);
        if (!buf_virt) {
-               ret = -ENOMEM;
-               goto err_out;
+               return -ENOMEM;
        }
 
+       spin_lock_irqsave(&sdma->channel_0_lock, flags);
+
        bd0->mode.command = C0_SETPM;
        bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
        bd0->mode.count = size / 2;
@@ -437,12 +454,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
 
        memcpy(buf_virt, buf, size);
 
-       ret = sdma_run_channel(&sdma->channel[0]);
+       ret = sdma_run_channel0(sdma);
 
-       dma_free_coherent(NULL, size, buf_virt, buf_phys);
+       spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 
-err_out:
-       mutex_unlock(&sdma->channel_0_lock);
+       dma_free_coherent(NULL, size, buf_virt, buf_phys);
 
        return ret;
 }
@@ -451,12 +467,12 @@ static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
 {
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
-       u32 val;
+       unsigned long val;
        u32 chnenbl = chnenbl_ofs(sdma, event);
 
-       val = __raw_readl(sdma->regs + chnenbl);
-       val |= (1 << channel);
-       __raw_writel(val, sdma->regs + chnenbl);
+       val = readl_relaxed(sdma->regs + chnenbl);
+       __set_bit(channel, &val);
+       writel_relaxed(val, sdma->regs + chnenbl);
 }
 
 static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
@@ -464,11 +480,11 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
        u32 chnenbl = chnenbl_ofs(sdma, event);
-       u32 val;
+       unsigned long val;
 
-       val = __raw_readl(sdma->regs + chnenbl);
-       val &= ~(1 << channel);
-       __raw_writel(val, sdma->regs + chnenbl);
+       val = readl_relaxed(sdma->regs + chnenbl);
+       __clear_bit(channel, &val);
+       writel_relaxed(val, sdma->regs + chnenbl);
 }
 
 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
@@ -522,18 +538,16 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        else
                sdmac->status = DMA_SUCCESS;
 
-       sdmac->last_completed = sdmac->desc.cookie;
+       dma_cookie_complete(&sdmac->desc);
        if (sdmac->desc.callback)
                sdmac->desc.callback(sdmac->desc.callback_param);
 }
 
-static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
+static void sdma_tasklet(unsigned long data)
 {
-       complete(&sdmac->done);
+       struct sdma_channel *sdmac = (struct sdma_channel *) data;
 
-       /* not interested in channel 0 interrupts */
-       if (sdmac->channel == 0)
-               return;
+       complete(&sdmac->done);
 
        if (sdmac->flags & IMX_DMA_SG_LOOP)
                sdma_handle_channel_loop(sdmac);
@@ -544,18 +558,20 @@ static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
 static irqreturn_t sdma_int_handler(int irq, void *dev_id)
 {
        struct sdma_engine *sdma = dev_id;
-       u32 stat;
+       unsigned long stat;
 
-       stat = __raw_readl(sdma->regs + SDMA_H_INTR);
-       __raw_writel(stat, sdma->regs + SDMA_H_INTR);
+       stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+       /* not interested in channel 0 interrupts */
+       stat &= ~1;
+       writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
 
        while (stat) {
                int channel = fls(stat) - 1;
                struct sdma_channel *sdmac = &sdma->channel[channel];
 
-               mxc_sdma_handle_channel(sdmac);
+               tasklet_schedule(&sdmac->tasklet);
 
-               stat &= ~(1 << channel);
+               __clear_bit(channel, &stat);
        }
 
        return IRQ_HANDLED;
@@ -652,6 +668,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        struct sdma_context_data *context = sdma->context;
        struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
        int ret;
+       unsigned long flags;
 
        if (sdmac->direction == DMA_DEV_TO_MEM) {
                load_address = sdmac->pc_from_device;
@@ -663,13 +680,13 @@ static int sdma_load_context(struct sdma_channel *sdmac)
                return load_address;
 
        dev_dbg(sdma->dev, "load_address = %d\n", load_address);
-       dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level);
+       dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
        dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
        dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
-       dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0);
-       dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1);
+       dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
+       dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
 
-       mutex_lock(&sdma->channel_0_lock);
+       spin_lock_irqsave(&sdma->channel_0_lock, flags);
 
        memset(context, 0, sizeof(*context));
        context->channel_state.pc = load_address;
@@ -677,8 +694,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        /* Send by context the event mask,base address for peripheral
         * and watermark level
         */
-       context->gReg[0] = sdmac->event_mask1;
-       context->gReg[1] = sdmac->event_mask0;
+       context->gReg[0] = sdmac->event_mask[1];
+       context->gReg[1] = sdmac->event_mask[0];
        context->gReg[2] = sdmac->per_addr;
        context->gReg[6] = sdmac->shp_addr;
        context->gReg[7] = sdmac->watermark_level;
@@ -688,10 +705,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        bd0->mode.count = sizeof(*context) / 4;
        bd0->buffer_addr = sdma->context_phys;
        bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
+       ret = sdma_run_channel0(sdma);
 
-       ret = sdma_run_channel(&sdma->channel[0]);
-
-       mutex_unlock(&sdma->channel_0_lock);
+       spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
 
        return ret;
 }
@@ -701,7 +717,7 @@ static void sdma_disable_channel(struct sdma_channel *sdmac)
        struct sdma_engine *sdma = sdmac->sdma;
        int channel = sdmac->channel;
 
-       __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP);
+       writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
        sdmac->status = DMA_ERROR;
 }
 
@@ -711,13 +727,13 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
 
        sdma_disable_channel(sdmac);
 
-       sdmac->event_mask0 = 0;
-       sdmac->event_mask1 = 0;
+       sdmac->event_mask[0] = 0;
+       sdmac->event_mask[1] = 0;
        sdmac->shp_addr = 0;
        sdmac->per_addr = 0;
 
        if (sdmac->event_id0) {
-               if (sdmac->event_id0 > 32)
+               if (sdmac->event_id0 >= sdmac->sdma->num_events)
                        return -EINVAL;
                sdma_event_enable(sdmac, sdmac->event_id0);
        }
@@ -740,15 +756,14 @@ static int sdma_config_channel(struct sdma_channel *sdmac)
                        (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
                /* Handle multiple event channels differently */
                if (sdmac->event_id1) {
-                       sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32);
+                       sdmac->event_mask[1] = BIT(sdmac->event_id1 % 32);
                        if (sdmac->event_id1 > 31)
-                               sdmac->watermark_level |= 1 << 31;
-                       sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32);
+                               __set_bit(31, &sdmac->watermark_level);
+                       sdmac->event_mask[0] = BIT(sdmac->event_id0 % 32);
                        if (sdmac->event_id0 > 31)
-                               sdmac->watermark_level |= 1 << 30;
+                               __set_bit(30, &sdmac->watermark_level);
                } else {
-                       sdmac->event_mask0 = 1 << sdmac->event_id0;
-                       sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32);
+                       __set_bit(sdmac->event_id0, sdmac->event_mask);
                }
                /* Watermark Level */
                sdmac->watermark_level |= sdmac->watermark_level;
@@ -774,7 +789,7 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
                return -EINVAL;
        }
 
-       __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
+       writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
 
        return 0;
 }
@@ -796,8 +811,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
        sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
        sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
 
-       clk_enable(sdma->clk);
-
        sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
 
        init_completion(&sdmac->done);
@@ -810,24 +823,6 @@ out:
        return ret;
 }
 
-static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
-{
-       __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
-}
-
-static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
-{
-       dma_cookie_t cookie = sdmac->chan.cookie;
-
-       if (++cookie < 0)
-               cookie = 1;
-
-       sdmac->chan.cookie = cookie;
-       sdmac->desc.cookie = cookie;
-
-       return cookie;
-}
-
 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 {
        return container_of(chan, struct sdma_channel, chan);
@@ -837,14 +832,11 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
        unsigned long flags;
        struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
-       struct sdma_engine *sdma = sdmac->sdma;
        dma_cookie_t cookie;
 
        spin_lock_irqsave(&sdmac->lock, flags);
 
-       cookie = sdma_assign_cookie(sdmac);
-
-       sdma_enable_channel(sdma, sdmac->channel);
+       cookie = dma_cookie_assign(tx);
 
        spin_unlock_irqrestore(&sdmac->lock, flags);
 
@@ -875,11 +867,15 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
 
        sdmac->peripheral_type = data->peripheral_type;
        sdmac->event_id0 = data->dma_request;
-       ret = sdma_set_channel_priority(sdmac, prio);
+
+       clk_enable(sdmac->sdma->clk_ipg);
+       clk_enable(sdmac->sdma->clk_ahb);
+
+       ret = sdma_request_channel(sdmac);
        if (ret)
                return ret;
 
-       ret = sdma_request_channel(sdmac);
+       ret = sdma_set_channel_priority(sdmac, prio);
        if (ret)
                return ret;
 
@@ -910,13 +906,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
        dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
 
-       clk_disable(sdma->clk);
+       clk_disable(sdma->clk_ipg);
+       clk_disable(sdma->clk_ahb);
 }
 
 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
                unsigned int sg_len, enum dma_transfer_direction direction,
-               unsigned long flags)
+               unsigned long flags, void *context)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
@@ -952,7 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
 
                bd->buffer_addr = sg->dma_address;
 
-               count = sg->length;
+               count = sg_dma_len(sg);
 
                if (count > 0xffff) {
                        dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
@@ -1014,7 +1011,8 @@ err_out:
 
 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_transfer_direction direction)
+               size_t period_len, enum dma_transfer_direction direction,
+               void *context)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
@@ -1128,7 +1126,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
 
        last_used = chan->cookie;
 
-       dma_set_tx_state(txstate, sdmac->last_completed, last_used,
+       dma_set_tx_state(txstate, chan->completed_cookie, last_used,
                        sdmac->chn_count - sdmac->chn_real_count);
 
        return sdmac->status;
@@ -1136,9 +1134,11 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
 
 static void sdma_issue_pending(struct dma_chan *chan)
 {
-       /*
-        * Nothing to do. We only have a single descriptor
-        */
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct sdma_engine *sdma = sdmac->sdma;
+
+       if (sdmac->status == DMA_IN_PROGRESS)
+               sdma_enable_channel(sdma, sdmac->channel);
 }
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1        34
@@ -1180,12 +1180,14 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
        addr = (void *)header + header->script_addrs_start;
        ram_code = (void *)header + header->ram_code_start;
 
-       clk_enable(sdma->clk);
+       clk_enable(sdma->clk_ipg);
+       clk_enable(sdma->clk_ahb);
        /* download the RAM image for SDMA */
        sdma_load_script(sdma, ram_code,
                        header->ram_code_size,
                        addr->ram_code_start_addr);
-       clk_disable(sdma->clk);
+       clk_disable(sdma->clk_ipg);
+       clk_disable(sdma->clk_ahb);
 
        sdma_add_scripts(sdma, addr);
 
@@ -1227,10 +1229,11 @@ static int __init sdma_init(struct sdma_engine *sdma)
                return -ENODEV;
        }
 
-       clk_enable(sdma->clk);
+       clk_enable(sdma->clk_ipg);
+       clk_enable(sdma->clk_ahb);
 
        /* Be sure SDMA has not started yet */
-       __raw_writel(0, sdma->regs + SDMA_H_C0PTR);
+       writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
 
        sdma->channel_control = dma_alloc_coherent(NULL,
                        MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
@@ -1253,11 +1256,11 @@ static int __init sdma_init(struct sdma_engine *sdma)
 
        /* disable all channels */
        for (i = 0; i < sdma->num_events; i++)
-               __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i));
+               writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
 
        /* All channels have priority 0 */
        for (i = 0; i < MAX_DMA_CHANNELS; i++)
-               __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
+               writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
 
        ret = sdma_request_channel(&sdma->channel[0]);
        if (ret)
@@ -1266,26 +1269,28 @@ static int __init sdma_init(struct sdma_engine *sdma)
        sdma_config_ownership(&sdma->channel[0], false, true, false);
 
        /* Set Command Channel (Channel Zero) */
-       __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR);
+       writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
 
        /* Set bits of CONFIG register but with static context switching */
        /* FIXME: Check whether to set ACR bit depending on clock ratios */
-       __raw_writel(0, sdma->regs + SDMA_H_CONFIG);
+       writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
 
-       __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR);
+       writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
 
        /* Set bits of CONFIG register with given context switching mode */
-       __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+       writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
 
        /* Initializes channel's priorities */
        sdma_set_channel_priority(&sdma->channel[0], 7);
 
-       clk_disable(sdma->clk);
+       clk_disable(sdma->clk_ipg);
+       clk_disable(sdma->clk_ahb);
 
        return 0;
 
 err_dma_alloc:
-       clk_disable(sdma->clk);
+       clk_disable(sdma->clk_ipg);
+       clk_disable(sdma->clk_ahb);
        dev_err(sdma->dev, "initialisation failed with %d\n", ret);
        return ret;
 }
@@ -1308,7 +1313,7 @@ static int __init sdma_probe(struct platform_device *pdev)
        if (!sdma)
                return -ENOMEM;
 
-       mutex_init(&sdma->channel_0_lock);
+       spin_lock_init(&sdma->channel_0_lock);
 
        sdma->dev = &pdev->dev;
 
@@ -1324,12 +1329,21 @@ static int __init sdma_probe(struct platform_device *pdev)
                goto err_request_region;
        }
 
-       sdma->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(sdma->clk)) {
-               ret = PTR_ERR(sdma->clk);
+       sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+       if (IS_ERR(sdma->clk_ipg)) {
+               ret = PTR_ERR(sdma->clk_ipg);
+               goto err_clk;
+       }
+
+       sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+       if (IS_ERR(sdma->clk_ahb)) {
+               ret = PTR_ERR(sdma->clk_ahb);
                goto err_clk;
        }
 
+       clk_prepare(sdma->clk_ipg);
+       clk_prepare(sdma->clk_ahb);
+
        sdma->regs = ioremap(iores->start, resource_size(iores));
        if (!sdma->regs) {
                ret = -ENOMEM;
@@ -1367,8 +1381,11 @@ static int __init sdma_probe(struct platform_device *pdev)
                spin_lock_init(&sdmac->lock);
 
                sdmac->chan.device = &sdma->dma_device;
+               dma_cookie_init(&sdmac->chan);
                sdmac->channel = i;
 
+               tasklet_init(&sdmac->tasklet, sdma_tasklet,
+                            (unsigned long) sdmac);
                /*
                 * Add the channel to the DMAC list. Do not add channel 0 though
                 * because we need it internally in the SDMA driver. This also means
@@ -1387,7 +1404,9 @@ static int __init sdma_probe(struct platform_device *pdev)
                sdma_add_scripts(sdma, pdata->script_addrs);
 
        if (pdata) {
-               sdma_get_firmware(sdma, pdata->fw_name);
+               ret = sdma_get_firmware(sdma, pdata->fw_name);
+               if (ret)
+                       dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
        } else {
                /*
                 * Because that device tree does not encode ROM script address,
@@ -1396,15 +1415,12 @@ static int __init sdma_probe(struct platform_device *pdev)
                 */
                ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
                                              &fw_name);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to get firmware name\n");
-                       goto err_init;
-               }
-
-               ret = sdma_get_firmware(sdma, fw_name);
-               if (ret) {
-                       dev_err(&pdev->dev, "failed to get firmware\n");
-                       goto err_init;
+               if (ret)
+                       dev_warn(&pdev->dev, "failed to get firmware name\n");
+               else {
+                       ret = sdma_get_firmware(sdma, fw_name);
+                       if (ret)
+                               dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
                }
        }
 
@@ -1437,7 +1453,6 @@ err_alloc:
 err_request_irq:
        iounmap(sdma->regs);
 err_ioremap:
-       clk_put(sdma->clk);
 err_clk:
        release_mem_region(iores->start, resource_size(iores));
 err_request_region: