]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 30 Jan 2014 04:27:23 +0000 (20:27 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 30 Jan 2014 04:27:23 +0000 (20:27 -0800)
Pull slave-dma updates from Vinod Koul:
 - new driver for BCM2835 used in R-pi
 - new driver for MOXA ART
 - dma_get_any_slave_channel API for DT based systems
 - minor fixes and updates spread acrooss driver

[ The fsl-ssi dual fifo mode support addition clashed badly with the
  other changes to fsl-ssi that came in through the sound merge.  I did
  a very rough cut at fixing up the conflict, but Nicolin Chen (author
  of both sides) will need to verify and check things ]

* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (36 commits)
  dmaengine: mmp_pdma: fix mismerge
  dma: pl08x: Export pl08x_filter_id
  acpi-dma: align documentation with kernel-doc format
  dma: fix vchan_cookie_complete() debug print
  DMA: dmatest: extend the "device" module parameter to 32 characters
  drivers/dma: fix error return code
  dma: omap: Set debug level to debugging messages
  dmaengine: fix kernel-doc style typos for few comments
  dma: tegra: add support for Tegra148/124
  dma: dw: use %pad instead of casting dma_addr_t
  dma: dw: join split up messages
  dma: dw: fix style of multiline comment
  dmaengine: k3dma: fix sparse warnings
  dma: pl330: Use dma_get_slave_channel() in the of xlate callback
  dma: pl330: Differentiate between submitted and issued descriptors
  dmaengine: sirf: Add device_slave_caps interface
  DMA: Freescale: change BWC from 256 bytes to 1024 bytes
  dmaengine: Add MOXA ART DMA engine driver
  dmaengine: Add DMA_PRIVATE to BCM2835 driver
  dma: imx-sdma: Assign a default script number for ROM firmware cases
  ...

30 files changed:
Documentation/devicetree/bindings/dma/bcm2835-dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt [new file with mode: 0644]
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/acpi-dma.c
drivers/dma/amba-pl08x.c
drivers/dma/bcm2835-dma.c [new file with mode: 0644]
drivers/dma/cppi41.c
drivers/dma/dmatest.c
drivers/dma/dw/core.c
drivers/dma/edma.c
drivers/dma/fsldma.h
drivers/dma/imx-sdma.c
drivers/dma/k3dma.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/moxart-dma.c [new file with mode: 0644]
drivers/dma/omap-dma.c
drivers/dma/pl330.c
drivers/dma/ppc4xx/adma.c
drivers/dma/sirf-dma.c
drivers/dma/tegra20-apb-dma.c
drivers/dma/virt-dma.h
include/linux/dmaengine.h
include/linux/platform_data/dma-imx-sdma.h
include/linux/platform_data/dma-imx.h
include/linux/platform_data/dma-mmp_tdma.h
include/linux/platform_data/dma-mv_xor.h
sound/soc/fsl/fsl_ssi.c

diff --git a/Documentation/devicetree/bindings/dma/bcm2835-dma.txt b/Documentation/devicetree/bindings/dma/bcm2835-dma.txt
new file mode 100644 (file)
index 0000000..1396078
--- /dev/null
@@ -0,0 +1,57 @@
+* BCM2835 DMA controller
+
+The BCM2835 DMA controller has 16 channels in total.
+Only the lower 13 channels have an associated IRQ.
+Some arbitrary channels are used by the firmware
+(1,3,6,7 in the current firmware version).
+The channels 0,2 and 3 have special functionality
+and should not be used by the driver.
+
+Required properties:
+- compatible: Should be "brcm,bcm2835-dma".
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain the DMA interrupts associated
+               to the DMA channels in ascending order.
+- #dma-cells: Must be <1>, the cell in the dmas property of the
+               client device represents the DREQ number.
+- brcm,dma-channel-mask: Bit mask representing the channels
+                        not used by the firmware in ascending order,
+                        i.e. first channel corresponds to LSB.
+
+Example:
+
+dma: dma@7e007000 {
+       compatible = "brcm,bcm2835-dma";
+       reg = <0x7e007000 0xf00>;
+       interrupts = <1 16>,
+                    <1 17>,
+                    <1 18>,
+                    <1 19>,
+                    <1 20>,
+                    <1 21>,
+                    <1 22>,
+                    <1 23>,
+                    <1 24>,
+                    <1 25>,
+                    <1 26>,
+                    <1 27>,
+                    <1 28>;
+
+       #dma-cells = <1>;
+       brcm,dma-channel-mask = <0x7f35>;
+};
+
+DMA clients connected to the BCM2835 DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel.
+
+Example:
+
+bcm2835_i2s: i2s@7e203000 {
+       compatible = "brcm,bcm2835-i2s";
+       reg = < 0x7e203000 0x20>,
+             < 0x7e101098 0x02>;
+
+       dmas = <&dma 2>,
+              <&dma 3>;
+       dma-names = "tx", "rx";
+};
index 4fa814d3832124adb80f29ee777849739acbb7e4..68b83ecc385007216d391f1a0edf06527dad5fb9 100644 (file)
@@ -42,6 +42,7 @@ The full ID of peripheral types can be found below.
        19      IPU Memory
        20      ASRC
        21      ESAI
+       22      SSI Dual FIFO   (needs firmware ver >= 2)
 
 The third cell specifies the transfer priority as below.
 
diff --git a/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt b/Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
new file mode 100644 (file)
index 0000000..8a9f355
--- /dev/null
@@ -0,0 +1,45 @@
+MOXA ART DMA Controller
+
+See dma.txt first
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-dma"
+- reg :                Should contain registers location and length
+- interrupts : Should contain an interrupt-specifier for the sole
+               interrupt generated by the device
+- #dma-cells : Should be 1, a single cell holding a line request number
+
+Example:
+
+       dma: dma@90500000 {
+               compatible = "moxa,moxart-dma";
+               reg = <0x90500080 0x40>;
+               interrupts = <24 0>;
+               #dma-cells = <1>;
+       };
+
+
+Clients:
+
+DMA clients connected to the MOXA ART DMA controller must use the format
+described in the dma.txt file, using a two-cell specifier for each channel:
+a phandle plus one integer cells.
+The two cells in order are:
+
+1. A phandle pointing to the DMA controller.
+2. Peripheral identifier for the hardware handshaking interface.
+
+Example:
+Use specific request line passing from dma
+For example, MMC request line is 5
+
+       sdhci: sdhci@98e00000 {
+               compatible = "moxa,moxart-sdhci";
+               reg = <0x98e00000 0x5C>;
+               interrupts = <5 0>;
+               clocks = <&clk_apb>;
+               dmas =  <&dma 5>,
+                       <&dma 5>;
+               dma-names = "tx", "rx";
+       };
index c10eb89a3c1bdd388da699f3ac4f8e00c92cc63b..9bed1a2a67a12e44cde304995b6895e3f8296c2a 100644 (file)
@@ -306,6 +306,12 @@ config DMA_OMAP
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
 
+config DMA_BCM2835
+       tristate "BCM2835 DMA engine support"
+       depends on (ARCH_BCM2835 || MACH_BCM2708)
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+
 config TI_CPPI41
        tristate "AM33xx CPPI41 DMA support"
        depends on ARCH_OMAP
@@ -336,6 +342,14 @@ config K3_DMA
          Support the DMA engine for Hisilicon K3 platform
          devices.
 
+config MOXART_DMA
+       tristate "MOXART DMA support"
+       depends on ARCH_MOXART
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Enable support for the MOXA ART SoC DMA controller.
+
 config DMA_ENGINE
        bool
 
index 0ce2da97e42972b82a61197a4fe4bb52c35ceac4..a029d0f4a1be8088c00c459373f469580925d981 100644 (file)
@@ -38,7 +38,9 @@ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
 obj-$(CONFIG_DMA_OMAP) += omap-dma.o
+obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o
 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
+obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
index e69b03c0fa50cfae7ca6528f5eef0d9eaf1d8d53..1e506afa33f5e5a9b95753ce0e98ceb2f4ce7e66 100644 (file)
@@ -30,11 +30,12 @@ static DEFINE_MUTEX(acpi_dma_lock);
  * @adev:      ACPI device to match with
  * @adma:      struct acpi_dma of the given DMA controller
  *
- * Returns 1 on success, 0 when no information is available, or appropriate
- * errno value on error.
- *
  * In order to match a device from DSDT table to the corresponding CSRT device
  * we use MMIO address and IRQ.
+ *
+ * Return:
+ * 1 on success, 0 when no information is available, or appropriate errno value
+ * on error.
  */
 static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
                struct acpi_device *adev, struct acpi_dma *adma)
@@ -101,7 +102,6 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
  *
  * We are using this table to get the request line range of the specific DMA
  * controller to be used later.
- *
  */
 static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
 {
@@ -141,10 +141,11 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
  * @data               pointer to controller specific data to be used by
  *                     translation function
  *
- * Returns 0 on success or appropriate errno value on error.
- *
  * Allocated memory should be freed with appropriate acpi_dma_controller_free()
  * call.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int acpi_dma_controller_register(struct device *dev,
                struct dma_chan *(*acpi_dma_xlate)
@@ -188,6 +189,9 @@ EXPORT_SYMBOL_GPL(acpi_dma_controller_register);
  * @dev:       struct device of DMA controller
  *
  * Memory allocated by acpi_dma_controller_register() is freed here.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int acpi_dma_controller_free(struct device *dev)
 {
@@ -225,6 +229,9 @@ static void devm_acpi_dma_release(struct device *dev, void *res)
  * Managed acpi_dma_controller_register(). DMA controller registered by this
  * function are automatically freed on driver detach. See
  * acpi_dma_controller_register() for more information.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
  */
 int devm_acpi_dma_controller_register(struct device *dev,
                struct dma_chan *(*acpi_dma_xlate)
@@ -267,8 +274,6 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
  * @adma:      struct acpi_dma of DMA controller
  * @dma_spec:  dma specifier to update
  *
- * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
- *
  * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
  * Descriptor":
  *     DMA Request Line bits is a platform-relative number uniquely
@@ -276,6 +281,9 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
  *     mapping is done in a controller-specific OS driver.
  * That's why we can safely adjust slave_id when the appropriate controller is
  * found.
+ *
+ * Return:
+ * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
  */
 static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
                struct acpi_dma_spec *dma_spec)
@@ -334,7 +342,8 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
  * @dev:       struct device to get DMA request from
  * @index:     index of FixedDMA descriptor for @dev
  *
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
                size_t index)
@@ -403,7 +412,8 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
  * translate the names "tx" and "rx" here based on the most common case where
  * the first FixedDMA descriptor is TX and second is RX.
  *
- * Returns pointer to appropriate dma channel on success or NULL on error.
+ * Return:
+ * Pointer to appropriate dma channel on success or NULL on error.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
                const char *name)
@@ -427,8 +437,10 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name);
  * @adma: pointer to ACPI DMA controller data
  *
  * A simple translation function for ACPI based devices. Passes &struct
- * dma_spec to the DMA controller driver provided filter function. Returns
- * pointer to the channel if found or %NULL otherwise.
+ * dma_spec to the DMA controller driver provided filter function.
+ *
+ * Return:
+ * Pointer to the channel if found or %NULL otherwise.
  */
 struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec,
                struct acpi_dma *adma)
index ec4ee5c1fe9dc2115e029d0c472bd32f48cb281c..8114731a1c62d6450cd5f8bbd490f87c7800b1ea 100644 (file)
@@ -83,6 +83,7 @@
 #include <linux/dmaengine.h>
 #include <linux/dmapool.h>
 #include <linux/dma-mapping.h>
+#include <linux/export.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
@@ -1771,6 +1772,7 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
 
        return false;
 }
+EXPORT_SYMBOL_GPL(pl08x_filter_id);
 
 /*
  * Just check that the device is there and active
@@ -2167,7 +2169,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
        /* Register slave channels */
        ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
                        pl08x->pd->num_slave_channels, true);
-       if (ret <= 0) {
+       if (ret < 0) {
                dev_warn(&pl08x->adev->dev,
                        "%s failed to enumerate slave channels - %d\n",
                                __func__, ret);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
new file mode 100644 (file)
index 0000000..a036021
--- /dev/null
@@ -0,0 +1,707 @@
+/*
+ * BCM2835 DMA engine support
+ *
+ * This driver only supports cyclic DMA transfers
+ * as needed for the I2S module.
+ *
+ * Author:      Florian Meier <florian.meier@koalo.de>
+ *              Copyright 2013
+ *
+ * Based on
+ *     OMAP DMAengine support by Russell King
+ *
+ *     BCM2708 DMA Driver
+ *     Copyright (C) 2010 Broadcom
+ *
+ *     Raspberry Pi PCM I2S ALSA Driver
+ *     Copyright (c) by Phil Poole 2013
+ *
+ *     MARVELL MMP Peripheral DMA Driver
+ *     Copyright 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+struct bcm2835_dmadev {
+       struct dma_device ddev;
+       spinlock_t lock;
+       void __iomem *base;
+       struct device_dma_parameters dma_parms;
+};
+
+struct bcm2835_dma_cb {
+       uint32_t info;
+       uint32_t src;
+       uint32_t dst;
+       uint32_t length;
+       uint32_t stride;
+       uint32_t next;
+       uint32_t pad[2];
+};
+
+struct bcm2835_chan {
+       struct virt_dma_chan vc;
+       struct list_head node;
+
+       struct dma_slave_config cfg;
+       bool cyclic;
+       unsigned int dreq;
+
+       int ch;
+       struct bcm2835_desc *desc;
+
+       void __iomem *chan_base;
+       int irq_number;
+};
+
+struct bcm2835_desc {
+       struct virt_dma_desc vd;
+       enum dma_transfer_direction dir;
+
+       unsigned int control_block_size;
+       struct bcm2835_dma_cb *control_block_base;
+       dma_addr_t control_block_base_phys;
+
+       unsigned int frames;
+       size_t size;
+};
+
+#define BCM2835_DMA_CS         0x00
+#define BCM2835_DMA_ADDR       0x04
+#define BCM2835_DMA_SOURCE_AD  0x0c
+#define BCM2835_DMA_DEST_AD    0x10
+#define BCM2835_DMA_NEXTCB     0x1C
+
+/* DMA CS Control and Status bits */
+#define BCM2835_DMA_ACTIVE     BIT(0)
+#define BCM2835_DMA_INT        BIT(2)
+#define BCM2835_DMA_ISPAUSED   BIT(4)  /* Pause requested or not active */
+#define BCM2835_DMA_ISHELD     BIT(5)  /* Is held by DREQ flow control */
+#define BCM2835_DMA_ERR        BIT(8)
+#define BCM2835_DMA_ABORT      BIT(30) /* Stop current CB, go to next, WO */
+#define BCM2835_DMA_RESET      BIT(31) /* WO, self clearing */
+
+#define BCM2835_DMA_INT_EN     BIT(0)
+#define BCM2835_DMA_D_INC      BIT(4)
+#define BCM2835_DMA_D_DREQ     BIT(6)
+#define BCM2835_DMA_S_INC      BIT(8)
+#define BCM2835_DMA_S_DREQ     BIT(10)
+
+#define BCM2835_DMA_PER_MAP(x) ((x) << 16)
+
+#define BCM2835_DMA_DATA_TYPE_S8       1
+#define BCM2835_DMA_DATA_TYPE_S16      2
+#define BCM2835_DMA_DATA_TYPE_S32      4
+#define BCM2835_DMA_DATA_TYPE_S128     16
+
+#define BCM2835_DMA_BULK_MASK  BIT(0)
+#define BCM2835_DMA_FIQ_MASK   (BIT(2) | BIT(3))
+
+/* Valid only for channels 0 - 14, 15 has its own base address */
+#define BCM2835_DMA_CHAN(n)    ((n) << 8) /* Base address */
+#define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n))
+
+static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d)
+{
+       return container_of(d, struct bcm2835_dmadev, ddev);
+}
+
+static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct bcm2835_chan, vc.chan);
+}
+
+static inline struct bcm2835_desc *to_bcm2835_dma_desc(
+               struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct bcm2835_desc, vd.tx);
+}
+
+static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
+{
+       struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
+       dma_free_coherent(desc->vd.tx.chan->device->dev,
+                       desc->control_block_size,
+                       desc->control_block_base,
+                       desc->control_block_base_phys);
+       kfree(desc);
+}
+
+static int bcm2835_dma_abort(void __iomem *chan_base)
+{
+       unsigned long cs;
+       long int timeout = 10000;
+
+       cs = readl(chan_base + BCM2835_DMA_CS);
+       if (!(cs & BCM2835_DMA_ACTIVE))
+               return 0;
+
+       /* Write 0 to the active bit - Pause the DMA */
+       writel(0, chan_base + BCM2835_DMA_CS);
+
+       /* Wait for any current AXI transfer to complete */
+       while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+               cpu_relax();
+               cs = readl(chan_base + BCM2835_DMA_CS);
+       }
+
+       /* We'll un-pause when we set of our next DMA */
+       if (!timeout)
+               return -ETIMEDOUT;
+
+       if (!(cs & BCM2835_DMA_ACTIVE))
+               return 0;
+
+       /* Terminate the control block chain */
+       writel(0, chan_base + BCM2835_DMA_NEXTCB);
+
+       /* Abort the whole DMA */
+       writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
+              chan_base + BCM2835_DMA_CS);
+
+       return 0;
+}
+
+static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
+{
+       struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
+       struct bcm2835_desc *d;
+
+       if (!vd) {
+               c->desc = NULL;
+               return;
+       }
+
+       list_del(&vd->node);
+
+       c->desc = d = to_bcm2835_dma_desc(&vd->tx);
+
+       writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
+       writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+}
+
+static irqreturn_t bcm2835_dma_callback(int irq, void *data)
+{
+       struct bcm2835_chan *c = data;
+       struct bcm2835_desc *d;
+       unsigned long flags;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+
+       /* Acknowledge interrupt */
+       writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+
+       d = c->desc;
+
+       if (d) {
+               /* TODO Only works for cyclic DMA */
+               vchan_cyclic_callback(&d->vd);
+       }
+
+       /* Keep the DMA engine running */
+       writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+
+       return IRQ_HANDLED;
+}
+
+static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+       dev_dbg(c->vc.chan.device->dev,
+                       "Allocating DMA channel %d\n", c->ch);
+
+       return request_irq(c->irq_number,
+                       bcm2835_dma_callback, 0, "DMA IRQ", c);
+}
+
+static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+       vchan_free_chan_resources(&c->vc);
+       free_irq(c->irq_number, c);
+
+       dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
+}
+
+static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d)
+{
+       return d->size;
+}
+
+static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
+{
+       unsigned int i;
+       size_t size;
+
+       for (size = i = 0; i < d->frames; i++) {
+               struct bcm2835_dma_cb *control_block =
+                       &d->control_block_base[i];
+               size_t this_size = control_block->length;
+               dma_addr_t dma;
+
+               if (d->dir == DMA_DEV_TO_MEM)
+                       dma = control_block->dst;
+               else
+                       dma = control_block->src;
+
+               if (size)
+                       size += this_size;
+               else if (addr >= dma && addr < dma + this_size)
+                       size += dma + this_size - addr;
+       }
+
+       return size;
+}
+
+static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan,
+       dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+       struct virt_dma_desc *vd;
+       enum dma_status ret;
+       unsigned long flags;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE || !txstate)
+               return ret;
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       vd = vchan_find_desc(&c->vc, cookie);
+       if (vd) {
+               txstate->residue =
+                       bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx));
+       } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
+               struct bcm2835_desc *d = c->desc;
+               dma_addr_t pos;
+
+               if (d->dir == DMA_MEM_TO_DEV)
+                       pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD);
+               else if (d->dir == DMA_DEV_TO_MEM)
+                       pos = readl(c->chan_base + BCM2835_DMA_DEST_AD);
+               else
+                       pos = 0;
+
+               txstate->residue = bcm2835_dma_desc_size_pos(d, pos);
+       } else {
+               txstate->residue = 0;
+       }
+
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+
+       return ret;
+}
+
+static void bcm2835_dma_issue_pending(struct dma_chan *chan)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+       unsigned long flags;
+
+       c->cyclic = true; /* Nothing else is implemented */
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+       if (vchan_issue_pending(&c->vc) && !c->desc)
+               bcm2835_dma_start_desc(c);
+
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+}
+
+static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
+       struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
+       size_t period_len, enum dma_transfer_direction direction,
+       unsigned long flags, void *context)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+       enum dma_slave_buswidth dev_width;
+       struct bcm2835_desc *d;
+       dma_addr_t dev_addr;
+       unsigned int es, sync_type;
+       unsigned int frame;
+
+       /* Grab configuration */
+       if (!is_slave_direction(direction)) {
+               dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
+               return NULL;
+       }
+
+       if (direction == DMA_DEV_TO_MEM) {
+               dev_addr = c->cfg.src_addr;
+               dev_width = c->cfg.src_addr_width;
+               sync_type = BCM2835_DMA_S_DREQ;
+       } else {
+               dev_addr = c->cfg.dst_addr;
+               dev_width = c->cfg.dst_addr_width;
+               sync_type = BCM2835_DMA_D_DREQ;
+       }
+
+       /* Bus width translates to the element size (ES) */
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               es = BCM2835_DMA_DATA_TYPE_S32;
+               break;
+       default:
+               return NULL;
+       }
+
+       /* Now allocate and setup the descriptor. */
+       d = kzalloc(sizeof(*d), GFP_NOWAIT);
+       if (!d)
+               return NULL;
+
+       d->dir = direction;
+       d->frames = buf_len / period_len;
+
+       /* Allocate memory for control blocks */
+       d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
+       d->control_block_base = dma_zalloc_coherent(chan->device->dev,
+                       d->control_block_size, &d->control_block_base_phys,
+                       GFP_NOWAIT);
+
+       if (!d->control_block_base) {
+               kfree(d);
+               return NULL;
+       }
+
+       /*
+        * Iterate over all frames, create a control block
+        * for each frame and link them together.
+        */
+       for (frame = 0; frame < d->frames; frame++) {
+               struct bcm2835_dma_cb *control_block =
+                       &d->control_block_base[frame];
+
+               /* Setup adresses */
+               if (d->dir == DMA_DEV_TO_MEM) {
+                       control_block->info = BCM2835_DMA_D_INC;
+                       control_block->src = dev_addr;
+                       control_block->dst = buf_addr + frame * period_len;
+               } else {
+                       control_block->info = BCM2835_DMA_S_INC;
+                       control_block->src = buf_addr + frame * period_len;
+                       control_block->dst = dev_addr;
+               }
+
+               /* Enable interrupt */
+               control_block->info |= BCM2835_DMA_INT_EN;
+
+               /* Setup synchronization */
+               if (sync_type != 0)
+                       control_block->info |= sync_type;
+
+               /* Setup DREQ channel */
+               if (c->dreq != 0)
+                       control_block->info |=
+                               BCM2835_DMA_PER_MAP(c->dreq);
+
+               /* Length of a frame */
+               control_block->length = period_len;
+               d->size += control_block->length;
+
+               /*
+                * Next block is the next frame.
+                * This DMA engine driver currently only supports cyclic DMA.
+                * Therefore, wrap around at number of frames.
+                */
+               control_block->next = d->control_block_base_phys +
+                       sizeof(struct bcm2835_dma_cb)
+                       * ((frame + 1) % d->frames);
+       }
+
+       return vchan_tx_prep(&c->vc, &d->vd, flags);
+}
+
+static int bcm2835_dma_slave_config(struct bcm2835_chan *c,
+               struct dma_slave_config *cfg)
+{
+       if ((cfg->direction == DMA_DEV_TO_MEM &&
+            cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+           (cfg->direction == DMA_MEM_TO_DEV &&
+            cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+           !is_slave_direction(cfg->direction)) {
+               return -EINVAL;
+       }
+
+       c->cfg = *cfg;
+
+       return 0;
+}
+
+static int bcm2835_dma_terminate_all(struct bcm2835_chan *c)
+{
+       struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
+       unsigned long flags;
+       int timeout = 10000;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&c->vc.lock, flags);
+
+       /* Prevent this channel being scheduled */
+       spin_lock(&d->lock);
+       list_del_init(&c->node);
+       spin_unlock(&d->lock);
+
+       /*
+        * Stop DMA activity: we assume the callback will not be called
+        * after bcm_dma_abort() returns (even if it does, it will see
+        * c->desc is NULL and exit.)
+        */
+       if (c->desc) {
+               c->desc = NULL;
+               bcm2835_dma_abort(c->chan_base);
+
+               /* Wait for stopping */
+               while (--timeout) {
+                       if (!(readl(c->chan_base + BCM2835_DMA_CS) &
+                                               BCM2835_DMA_ACTIVE))
+                               break;
+
+                       cpu_relax();
+               }
+
+               if (!timeout)
+                       dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+       }
+
+       vchan_get_all_descriptors(&c->vc, &head);
+       spin_unlock_irqrestore(&c->vc.lock, flags);
+       vchan_dma_desc_free_list(&c->vc, &head);
+
+       return 0;
+}
+
+static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+       unsigned long arg)
+{
+       struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+
+       switch (cmd) {
+       case DMA_SLAVE_CONFIG:
+               return bcm2835_dma_slave_config(c,
+                               (struct dma_slave_config *)arg);
+
+       case DMA_TERMINATE_ALL:
+               return bcm2835_dma_terminate_all(c);
+
+       default:
+               return -ENXIO;
+       }
+}
+
+static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
+{
+       struct bcm2835_chan *c;
+
+       c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL);
+       if (!c)
+               return -ENOMEM;
+
+       c->vc.desc_free = bcm2835_dma_desc_free;
+       vchan_init(&c->vc, &d->ddev);
+       INIT_LIST_HEAD(&c->node);
+
+       d->ddev.chancnt++;
+
+       c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
+       c->ch = chan_id;
+       c->irq_number = irq;
+
+       return 0;
+}
+
+static void bcm2835_dma_free(struct bcm2835_dmadev *od)
+{
+       struct bcm2835_chan *c, *next;
+
+       list_for_each_entry_safe(c, next, &od->ddev.channels,
+                                vc.chan.device_node) {
+               list_del(&c->vc.chan.device_node);
+               tasklet_kill(&c->vc.task);
+       }
+}
+
+static const struct of_device_id bcm2835_dma_of_match[] = {
+       { .compatible = "brcm,bcm2835-dma", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match);
+
+static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
+                                          struct of_dma *ofdma)
+{
+       struct bcm2835_dmadev *d = ofdma->of_dma_data;
+       struct dma_chan *chan;
+
+       chan = dma_get_any_slave_channel(&d->ddev);
+       if (!chan)
+               return NULL;
+
+       /* Set DREQ from param */
+       to_bcm2835_dma_chan(chan)->dreq = spec->args[0];
+
+       return chan;
+}
+
+static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan,
+       struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = false;
+       caps->cmd_terminate = true;
+
+       return 0;
+}
+
+static int bcm2835_dma_probe(struct platform_device *pdev)
+{
+       struct bcm2835_dmadev *od;
+       struct resource *res;
+       void __iomem *base;
+       int rc;
+       int i;
+       int irq;
+       uint32_t chans_available;
+
+       if (!pdev->dev.dma_mask)
+               pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+
+       rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rc)
+               return rc;
+
+       od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
+       if (!od)
+               return -ENOMEM;
+
+       pdev->dev.dma_parms = &od->dma_parms;
+       dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       od->base = base;
+
+       dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
+       dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
+       dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+       od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
+       od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
+       od->ddev.device_tx_status = bcm2835_dma_tx_status;
+       od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
+       od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps;
+       od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
+       od->ddev.device_control = bcm2835_dma_control;
+       od->ddev.dev = &pdev->dev;
+       INIT_LIST_HEAD(&od->ddev.channels);
+       spin_lock_init(&od->lock);
+
+       platform_set_drvdata(pdev, od);
+
+       /* Request DMA channel mask from device tree */
+       if (of_property_read_u32(pdev->dev.of_node,
+                       "brcm,dma-channel-mask",
+                       &chans_available)) {
+               dev_err(&pdev->dev, "Failed to get channel mask\n");
+               rc = -EINVAL;
+               goto err_no_dma;
+       }
+
+       /*
+        * Do not use the FIQ and BULK channels,
+        * because they are used by the GPU.
+        */
+       chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK);
+
+       for (i = 0; i < pdev->num_resources; i++) {
+               irq = platform_get_irq(pdev, i);
+               if (irq < 0)
+                       break;
+
+               if (chans_available & (1 << i)) {
+                       rc = bcm2835_dma_chan_init(od, i, irq);
+                       if (rc)
+                               goto err_no_dma;
+               }
+       }
+
+       dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i);
+
+       /* Device-tree DMA controller registration */
+       rc = of_dma_controller_register(pdev->dev.of_node,
+                       bcm2835_dma_xlate, od);
+       if (rc) {
+               dev_err(&pdev->dev, "Failed to register DMA controller\n");
+               goto err_no_dma;
+       }
+
+       rc = dma_async_device_register(&od->ddev);
+       if (rc) {
+               dev_err(&pdev->dev,
+                       "Failed to register slave DMA engine device: %d\n", rc);
+               goto err_no_dma;
+       }
+
+       dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n");
+
+       return 0;
+
+err_no_dma:
+       bcm2835_dma_free(od);
+       return rc;
+}
+
+static int bcm2835_dma_remove(struct platform_device *pdev)
+{
+       struct bcm2835_dmadev *od = platform_get_drvdata(pdev);
+
+       dma_async_device_unregister(&od->ddev);
+       bcm2835_dma_free(od);
+
+       return 0;
+}
+
+static struct platform_driver bcm2835_dma_driver = {
+       .probe  = bcm2835_dma_probe,
+       .remove = bcm2835_dma_remove,
+       .driver = {
+               .name = "bcm2835-dma",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(bcm2835_dma_of_match),
+       },
+};
+
+module_platform_driver(bcm2835_dma_driver);
+
+MODULE_ALIAS("platform:bcm2835-dma");
+MODULE_DESCRIPTION("BCM2835 DMA engine driver");
+MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
+MODULE_LICENSE("GPL v2");
index c29dacff66fa951f136657536a628b6743ccda30..c18aebf7d5aa9a23199b556bc9b54c8b3237253d 100644 (file)
@@ -972,8 +972,10 @@ static int cppi41_dma_probe(struct platform_device *pdev)
                goto err_chans;
 
        irq = irq_of_parse_and_map(dev->of_node, 0);
-       if (!irq)
+       if (!irq) {
+               ret = -EINVAL;
                goto err_irq;
+       }
 
        cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
 
index 9dfcaf5c12888d3de80483329ffccc7fbbacd02a..05b6dea770a407fc94e614b82bdbdf0ef0212593 100644 (file)
@@ -31,7 +31,7 @@ module_param_string(channel, test_channel, sizeof(test_channel),
                S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)");
 
-static char test_device[20];
+static char test_device[32];
 module_param_string(device, test_device, sizeof(test_device),
                S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)");
@@ -89,7 +89,7 @@ MODULE_PARM_DESC(verbose, "Enable \"success\" result messages (default: off)");
 struct dmatest_params {
        unsigned int    buf_size;
        char            channel[20];
-       char            device[20];
+       char            device[32];
        unsigned int    threads_per_chan;
        unsigned int    max_channels;
        unsigned int    iterations;
index 7516be4677cf7e778ba3bef2482cae9179ff41a3..13ac3f240e7963127c713f44f296bd421edf8999 100644 (file)
@@ -218,8 +218,10 @@ static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
        struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
        u32             ctllo;
 
-       /* Software emulation of LLP mode relies on interrupts to continue
-        * multi block transfer. */
+       /*
+        * Software emulation of LLP mode relies on interrupts to continue
+        * multi block transfer.
+        */
        ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN;
 
        channel_writel(dwc, SAR, desc->lli.sar);
@@ -253,8 +255,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
                                                &dwc->flags);
                if (was_soft_llp) {
                        dev_err(chan2dev(&dwc->chan),
-                               "BUG: Attempted to start new LLP transfer "
-                               "inside ongoing one\n");
+                               "BUG: Attempted to start new LLP transfer inside ongoing one\n");
                        return;
                }
 
@@ -420,8 +421,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
                return;
        }
 
-       dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
-                       (unsigned long long)llp);
+       dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
 
        list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
                /* Initial residue value */
@@ -567,9 +567,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
                        unlikely(status_xfer & dwc->mask)) {
                int i;
 
-               dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
-                               "interrupt, stopping DMA transfer\n",
-                               status_xfer ? "xfer" : "error");
+               dev_err(chan2dev(&dwc->chan),
+                       "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n",
+                       status_xfer ? "xfer" : "error");
 
                spin_lock_irqsave(&dwc->lock, flags);
 
@@ -711,9 +711,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
        u32                     ctllo;
 
        dev_vdbg(chan2dev(chan),
-                       "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
-                       (unsigned long long)dest, (unsigned long long)src,
-                       len, flags);
+                       "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
+                       &dest, &src, len, flags);
 
        if (unlikely(!len)) {
                dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1401,9 +1400,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
        /* Let's make a cyclic list */
        last->lli.llp = cdesc->desc[0]->txd.phys;
 
-       dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
-                       "period %zu periods %d\n", (unsigned long long)buf_addr,
-                       buf_len, period_len, periods);
+       dev_dbg(chan2dev(&dwc->chan),
+                       "cyclic prepared buf %pad len %zu period %zu periods %d\n",
+                       &buf_addr, buf_len, period_len, periods);
 
        cdesc->periods = periods;
        dwc->cdesc = cdesc;
@@ -1603,9 +1602,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
                        dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
                                           dwc_params);
 
-                       /* Decode maximum block size for given channel. The
+                       /*
+                        * Decode maximum block size for given channel. The
                         * stored 4 bit value represents blocks from 0x00 for 3
-                        * up to 0x0a for 4095. */
+                        * up to 0x0a for 4095.
+                        */
                        dwc->block_size =
                                (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1;
                        dwc->nollp =
index 2539ea0cbc6394f918fb849ffd3c6f6ca73f33a0..cd8da451d1995fef8b6d076005b17ad1a17b44d1 100644 (file)
@@ -699,8 +699,8 @@ static int edma_alloc_chan_resources(struct dma_chan *chan)
        echan->alloced = true;
        echan->slot[0] = echan->ch_num;
 
-       dev_info(dev, "allocated channel for %u:%u\n",
-                EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
+       dev_dbg(dev, "allocated channel for %u:%u\n",
+               EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num));
 
        return 0;
 
@@ -736,7 +736,7 @@ static void edma_free_chan_resources(struct dma_chan *chan)
                echan->alloced = false;
        }
 
-       dev_info(dev, "freeing channel for %u\n", echan->ch_num);
+       dev_dbg(dev, "freeing channel for %u\n", echan->ch_num);
 }
 
 /* Send pending descriptor to hardware */
index 1ffc24484d23cdb0edd1e6011605aff5c9a6eb07..d56e83599825b16666960f78b91ea08620be4cef 100644 (file)
@@ -41,7 +41,7 @@
  * channel is allowed to transfer before the DMA engine pauses
  * the current channel and switches to the next channel
  */
-#define FSL_DMA_MR_BWC         0x08000000
+#define FSL_DMA_MR_BWC         0x0A000000
 
 /* Special MR definition for MPC8349 */
 #define FSL_DMA_MR_EOTIE       0x00000080
index c75679d420286c522679cdb9c344549c97e7a0c4..4e7918339b1263a2720c3da11d271714dd9669ad 100644 (file)
@@ -323,6 +323,7 @@ struct sdma_engine {
        struct clk                      *clk_ipg;
        struct clk                      *clk_ahb;
        spinlock_t                      channel_0_lock;
+       u32                             script_number;
        struct sdma_script_start_addrs  *script_addrs;
        const struct sdma_driver_data   *drvdata;
 };
@@ -724,6 +725,10 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
                per_2_emi = sdma->script_addrs->app_2_mcu_addr;
                emi_2_per = sdma->script_addrs->mcu_2_app_addr;
                break;
+       case IMX_DMATYPE_SSI_DUAL:
+               per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
+               emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
+               break;
        case IMX_DMATYPE_SSI_SP:
        case IMX_DMATYPE_MMC:
        case IMX_DMATYPE_SDHC:
@@ -1238,6 +1243,7 @@ static void sdma_issue_pending(struct dma_chan *chan)
 }
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1        34
+#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2        38
 
 static void sdma_add_scripts(struct sdma_engine *sdma,
                const struct sdma_script_start_addrs *addr)
@@ -1246,7 +1252,11 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
        s32 *saddr_arr = (u32 *)sdma->script_addrs;
        int i;
 
-       for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
+       /* use the default firmware in ROM if missing external firmware */
+       if (!sdma->script_number)
+               sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+
+       for (i = 0; i < sdma->script_number; i++)
                if (addr_arr[i] > 0)
                        saddr_arr[i] = addr_arr[i];
 }
@@ -1272,6 +1282,17 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
                goto err_firmware;
        if (header->ram_code_start + header->ram_code_size > fw->size)
                goto err_firmware;
+       switch (header->version_major) {
+               case 1:
+                       sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+                       break;
+               case 2:
+                       sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
+                       break;
+               default:
+                       dev_err(sdma->dev, "unknown firmware version\n");
+                       goto err_firmware;
+       }
 
        addr = (void *)header + header->script_addrs_start;
        ram_code = (void *)header + header->ram_code_start;
index e26075408e9b95a365dfd188cad786593604412f..a1f911aaf220e0b107c1009a74edfaca1ff06634 100644 (file)
@@ -477,7 +477,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
        dma_addr_t addr, src = 0, dst = 0;
        int num = sglen, i;
 
-       if (sgl == 0)
+       if (sgl == NULL)
                return NULL;
 
        for_each_sg(sgl, sg, sglen, i) {
@@ -817,7 +817,7 @@ static int k3_dma_resume(struct device *dev)
        return 0;
 }
 
-SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
+static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
 
 static struct platform_driver k3_pdma_driver = {
        .driver         = {
index c6a01ea8bc591c289777d2ab837ef48887cf64d1..b439679f4126e98dcd6484607971276b2baf512f 100644 (file)
@@ -5,6 +5,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #define DTADR          0x0208
 #define DCMD           0x020c
 
-#define DCSR_RUN       (1 << 31)       /* Run Bit (read / write) */
-#define DCSR_NODESC    (1 << 30)       /* No-Descriptor Fetch (read / write) */
-#define DCSR_STOPIRQEN (1 << 29)       /* Stop Interrupt Enable (read / write) */
-#define DCSR_REQPEND   (1 << 8)        /* Request Pending (read-only) */
-#define DCSR_STOPSTATE (1 << 3)        /* Stop State (read-only) */
-#define DCSR_ENDINTR   (1 << 2)        /* End Interrupt (read / write) */
-#define DCSR_STARTINTR (1 << 1)        /* Start Interrupt (read / write) */
-#define DCSR_BUSERR    (1 << 0)        /* Bus Error Interrupt (read / write) */
-
-#define DCSR_EORIRQEN  (1 << 28)       /* End of Receive Interrupt Enable (R/W) */
-#define DCSR_EORJMPEN  (1 << 27)       /* Jump to next descriptor on EOR */
-#define DCSR_EORSTOPEN (1 << 26)       /* STOP on an EOR */
-#define DCSR_SETCMPST  (1 << 25)       /* Set Descriptor Compare Status */
-#define DCSR_CLRCMPST  (1 << 24)       /* Clear Descriptor Compare Status */
-#define DCSR_CMPST     (1 << 10)       /* The Descriptor Compare Status */
-#define DCSR_EORINTR   (1 << 9)        /* The end of Receive */
-
-#define DRCMR(n)       ((((n) < 64) ? 0x0100 : 0x1100) + \
-                                (((n) & 0x3f) << 2))
-#define DRCMR_MAPVLD   (1 << 7)        /* Map Valid (read / write) */
-#define DRCMR_CHLNUM   0x1f            /* mask for Channel Number (read / write) */
+#define DCSR_RUN       BIT(31) /* Run Bit (read / write) */
+#define DCSR_NODESC    BIT(30) /* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND   BIT(8)  /* Request Pending (read-only) */
+#define DCSR_STOPSTATE BIT(3)  /* Stop State (read-only) */
+#define DCSR_ENDINTR   BIT(2)  /* End Interrupt (read / write) */
+#define DCSR_STARTINTR BIT(1)  /* Start Interrupt (read / write) */
+#define DCSR_BUSERR    BIT(0)  /* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN  BIT(28) /* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN  BIT(27) /* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
+#define DCSR_SETCMPST  BIT(25) /* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST  BIT(24) /* Clear Descriptor Compare Status */
+#define DCSR_CMPST     BIT(10) /* The Descriptor Compare Status */
+#define DCSR_EORINTR   BIT(9)  /* The end of Receive */
+
+#define DRCMR(n)       ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
+#define DRCMR_MAPVLD   BIT(7)  /* Map Valid (read / write) */
+#define DRCMR_CHLNUM   0x1f    /* mask for Channel Number (read / write) */
 
 #define DDADR_DESCADDR 0xfffffff0      /* Address of next descriptor (mask) */
-#define DDADR_STOP     (1 << 0)        /* Stop (read / write) */
-
-#define DCMD_INCSRCADDR        (1 << 31)       /* Source Address Increment Setting. */
-#define DCMD_INCTRGADDR        (1 << 30)       /* Target Address Increment Setting. */
-#define DCMD_FLOWSRC   (1 << 29)       /* Flow Control by the source. */
-#define DCMD_FLOWTRG   (1 << 28)       /* Flow Control by the target. */
-#define DCMD_STARTIRQEN        (1 << 22)       /* Start Interrupt Enable */
-#define DCMD_ENDIRQEN  (1 << 21)       /* End Interrupt Enable */
-#define DCMD_ENDIAN    (1 << 18)       /* Device Endian-ness. */
+#define DDADR_STOP     BIT(0)  /* Stop (read / write) */
+
+#define DCMD_INCSRCADDR        BIT(31) /* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR        BIT(30) /* Target Address Increment Setting. */
+#define DCMD_FLOWSRC   BIT(29) /* Flow Control by the source. */
+#define DCMD_FLOWTRG   BIT(28) /* Flow Control by the target. */
+#define DCMD_STARTIRQEN        BIT(22) /* Start Interrupt Enable */
+#define DCMD_ENDIRQEN  BIT(21) /* End Interrupt Enable */
+#define DCMD_ENDIAN    BIT(18) /* Device Endian-ness. */
 #define DCMD_BURST8    (1 << 16)       /* 8 byte burst */
 #define DCMD_BURST16   (2 << 16)       /* 16 byte burst */
 #define DCMD_BURST32   (3 << 16)       /* 32 byte burst */
@@ -132,10 +132,14 @@ struct mmp_pdma_device {
        spinlock_t phy_lock; /* protect alloc/free phy channels */
 };
 
-#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
-#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
-#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
-#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+#define tx_to_mmp_pdma_desc(tx)                                        \
+       container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh)                                   \
+       container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan)                                        \
+       container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev)                                        \
+       container_of(dmadev, struct mmp_pdma_device, device)
 
 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
 {
@@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy)
        writel(dalgn, phy->base + DALGN);
 
        reg = (phy->idx << 2) + DCSR;
-       writel(readl(phy->base + reg) | DCSR_RUN,
-                                       phy->base + reg);
+       writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg);
 }
 
 static void disable_chan(struct mmp_pdma_phy *phy)
 {
        u32 reg;
 
-       if (phy) {
-               reg = (phy->idx << 2) + DCSR;
-               writel(readl(phy->base + reg) & ~DCSR_RUN,
-                                               phy->base + reg);
-       }
+       if (!phy)
+               return;
+
+       reg = (phy->idx << 2) + DCSR;
+       writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg);
 }
 
 static int clear_chan_irq(struct mmp_pdma_phy *phy)
@@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy)
        u32 dint = readl(phy->base + DINT);
        u32 reg = (phy->idx << 2) + DCSR;
 
-       if (dint & BIT(phy->idx)) {
-               /* clear irq */
-               dcsr = readl(phy->base + reg);
-               writel(dcsr, phy->base + reg);
-               if ((dcsr & DCSR_BUSERR) && (phy->vchan))
-                       dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
-               return 0;
-       }
-       return -EAGAIN;
+       if (!(dint & BIT(phy->idx)))
+               return -EAGAIN;
+
+       /* clear irq */
+       dcsr = readl(phy->base + reg);
+       writel(dcsr, phy->base + reg);
+       if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+               dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+
+       return 0;
 }
 
 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
 {
        struct mmp_pdma_phy *phy = dev_id;
 
-       if (clear_chan_irq(phy) == 0) {
-               tasklet_schedule(&phy->vchan->tasklet);
-               return IRQ_HANDLED;
-       } else
+       if (clear_chan_irq(phy) != 0)
                return IRQ_NONE;
+
+       tasklet_schedule(&phy->vchan->tasklet);
+       return IRQ_HANDLED;
 }
 
 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
@@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
 
        if (irq_num)
                return IRQ_HANDLED;
-       else
-               return IRQ_NONE;
+
+       return IRQ_NONE;
 }
 
 /* lookup free phy channel as descending priority */
@@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
         */
 
        spin_lock_irqsave(&pdev->phy_lock, flags);
-       for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+       for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
                for (i = 0; i < pdev->dma_channels; i++) {
-                       if (prio != ((i & 0xf) >> 2))
+                       if (prio != (i & 0xf) >> 2)
                                continue;
                        phy = &pdev->phy[i];
                        if (!phy->vchan) {
@@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
        if (chan->desc_pool)
                return 1;
 
-       chan->desc_pool =
-               dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
-                                 sizeof(struct mmp_pdma_desc_sw),
-                                 __alignof__(struct mmp_pdma_desc_sw), 0);
+       chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
+                                         chan->dev,
+                                         sizeof(struct mmp_pdma_desc_sw),
+                                         __alignof__(struct mmp_pdma_desc_sw),
+                                         0);
        if (!chan->desc_pool) {
                dev_err(chan->dev, "unable to allocate descriptor pool\n");
                return -ENOMEM;
        }
+
        mmp_pdma_free_phy(chan);
        chan->idle = true;
        chan->dev_addr = 0;
@@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
 }
 
 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
-                                 struct list_head *list)
+                                   struct list_head *list)
 {
        struct mmp_pdma_desc_sw *desc, *_desc;
 
@@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
 
 static struct dma_async_tx_descriptor *
 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
-       dma_addr_t dma_dst, dma_addr_t dma_src,
-       size_t len, unsigned long flags)
+                    dma_addr_t dma_dst, dma_addr_t dma_src,
+                    size_t len, unsigned long flags)
 {
        struct mmp_pdma_chan *chan;
        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -515,8 +521,8 @@ fail:
 
 static struct dma_async_tx_descriptor *
 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
-                        unsigned int sg_len, enum dma_transfer_direction dir,
-                        unsigned long flags, void *context)
+                      unsigned int sg_len, enum dma_transfer_direction dir,
+                      unsigned long flags, void *context)
 {
        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
@@ -591,10 +597,11 @@ fail:
        return NULL;
 }
 
-static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
-       struct dma_chan *dchan, dma_addr_t buf_addr, size_t len,
-       size_t period_len, enum dma_transfer_direction direction,
-       unsigned long flags, void *context)
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
+                        dma_addr_t buf_addr, size_t len, size_t period_len,
+                        enum dma_transfer_direction direction,
+                        unsigned long flags, void *context)
 {
        struct mmp_pdma_chan *chan;
        struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
@@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic(
                        goto fail;
                }
 
-               new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN |
-                                       (DCMD_LENGTH & period_len);
+               new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
+                                 (DCMD_LENGTH & period_len));
                new->desc.dsadr = dma_src;
                new->desc.dtadr = dma_dst;
 
@@ -677,12 +684,11 @@ fail:
 }
 
 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
-               unsigned long arg)
+                           unsigned long arg)
 {
        struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
        struct dma_slave_config *cfg = (void *)arg;
        unsigned long flags;
-       int ret = 0;
        u32 maxburst = 0, addr = 0;
        enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
 
@@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
                return -ENOSYS;
        }
 
-       return ret;
+       return 0;
 }
 
 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
-                       dma_cookie_t cookie, struct dma_tx_state *txstate)
+                                         dma_cookie_t cookie,
+                                         struct dma_tx_state *txstate)
 {
        return dma_cookie_status(dchan, cookie, txstate);
 }
@@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op)
        return 0;
 }
 
-static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
-                                                       int idx, int irq)
+static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
 {
        struct mmp_pdma_phy *phy  = &pdev->phy[idx];
        struct mmp_pdma_chan *chan;
        int ret;
 
-       chan = devm_kzalloc(pdev->dev,
-                       sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+       chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan),
+                           GFP_KERNEL);
        if (chan == NULL)
                return -ENOMEM;
 
@@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
        phy->base = pdev->base;
 
        if (irq) {
-               ret = devm_request_irq(pdev->dev, irq,
-                       mmp_pdma_chan_handler, 0, "pdma", phy);
+               ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
+                                      "pdma", phy);
                if (ret) {
                        dev_err(pdev->dev, "channel request irq fail!\n");
                        return ret;
@@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
        INIT_LIST_HEAD(&chan->chain_running);
 
        /* register virt channel to dma engine */
-       list_add_tail(&chan->chan.device_node,
-                       &pdev->device.channels);
+       list_add_tail(&chan->chan.device_node, &pdev->device.channels);
 
        return 0;
 }
@@ -894,14 +899,12 @@ static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
 {
        struct mmp_pdma_device *d = ofdma->of_dma_data;
        struct dma_chan *chan;
-       struct mmp_pdma_chan *c;
 
        chan = dma_get_any_slave_channel(&d->device);
        if (!chan)
                return NULL;
 
-       c = to_mmp_pdma_chan(chan);
-       c->drcmr = dma_spec->args[0];
+       to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
 
        return chan;
 }
@@ -918,6 +921,7 @@ static int mmp_pdma_probe(struct platform_device *op)
        pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
        if (!pdev)
                return -ENOMEM;
+
        pdev->dev = &op->dev;
 
        spin_lock_init(&pdev->phy_lock);
@@ -929,8 +933,8 @@ static int mmp_pdma_probe(struct platform_device *op)
 
        of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
        if (of_id)
-               of_property_read_u32(pdev->dev->of_node,
-                               "#dma-channels", &dma_channels);
+               of_property_read_u32(pdev->dev->of_node, "#dma-channels",
+                                    &dma_channels);
        else if (pdata && pdata->dma_channels)
                dma_channels = pdata->dma_channels;
        else
@@ -942,8 +946,9 @@ static int mmp_pdma_probe(struct platform_device *op)
                        irq_num++;
        }
 
-       pdev->phy = devm_kzalloc(pdev->dev,
-               dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+       pdev->phy = devm_kcalloc(pdev->dev,
+                                dma_channels, sizeof(struct mmp_pdma_chan),
+                                GFP_KERNEL);
        if (pdev->phy == NULL)
                return -ENOMEM;
 
@@ -952,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
        if (irq_num != dma_channels) {
                /* all chan share one irq, demux inside */
                irq = platform_get_irq(op, 0);
-               ret = devm_request_irq(pdev->dev, irq,
-                       mmp_pdma_int_handler, 0, "pdma", pdev);
+               ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
+                                      "pdma", pdev);
                if (ret)
                        return ret;
        }
@@ -1029,7 +1034,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
        if (chan->device->dev->driver != &mmp_pdma_driver.driver)
                return false;
 
-       c->drcmr = *(unsigned int *) param;
+       c->drcmr = *(unsigned int *)param;
 
        return true;
 }
@@ -1037,6 +1042,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
 
 module_platform_driver(mmp_pdma_driver);
 
-MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
 MODULE_AUTHOR("Marvell International Ltd.");
 MODULE_LICENSE("GPL v2");
index 3ddacc14a7366611ffb089d21c70fb4ba3c896c5..33f96aaa80c759aff2f8098e2135dd6f1b67b90a 100644 (file)
@@ -121,11 +121,13 @@ struct mmp_tdma_chan {
        int                             idx;
        enum mmp_tdma_type              type;
        int                             irq;
-       unsigned long                   reg_base;
+       void __iomem                    *reg_base;
 
        size_t                          buf_len;
        size_t                          period_len;
        size_t                          pos;
+
+       struct gen_pool                 *pool;
 };
 
 #define TDMA_CHANNEL_NUM 2
@@ -182,7 +184,7 @@ static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
 
 static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
 {
-       unsigned int tdcr;
+       unsigned int tdcr = 0;
 
        mmp_tdma_disable_chan(tdmac);
 
@@ -324,7 +326,7 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
        struct gen_pool *gpool;
        int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
 
-       gpool = sram_get_gpool("asram");
+       gpool = tdmac->pool;
        if (tdmac->desc_arr)
                gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
                                size);
@@ -374,7 +376,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
        struct gen_pool *gpool;
        int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
 
-       gpool = sram_get_gpool("asram");
+       gpool = tdmac->pool;
        if (!gpool)
                return NULL;
 
@@ -505,7 +507,8 @@ static int mmp_tdma_remove(struct platform_device *pdev)
 }
 
 static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
-                                               int idx, int irq, int type)
+                                       int idx, int irq,
+                                       int type, struct gen_pool *pool)
 {
        struct mmp_tdma_chan *tdmac;
 
@@ -526,7 +529,8 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
        tdmac->chan.device = &tdev->device;
        tdmac->idx         = idx;
        tdmac->type        = type;
-       tdmac->reg_base    = (unsigned long)tdev->base + idx * 4;
+       tdmac->reg_base    = tdev->base + idx * 4;
+       tdmac->pool        = pool;
        tdmac->status = DMA_COMPLETE;
        tdev->tdmac[tdmac->idx] = tdmac;
        tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
@@ -553,6 +557,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        int i, ret;
        int irq = 0, irq_num = 0;
        int chan_num = TDMA_CHANNEL_NUM;
+       struct gen_pool *pool;
 
        of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
        if (of_id)
@@ -579,6 +584,15 @@ static int mmp_tdma_probe(struct platform_device *pdev)
 
        INIT_LIST_HEAD(&tdev->device.channels);
 
+       if (pdev->dev.of_node)
+               pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0);
+       else
+               pool = sram_get_gpool("asram");
+       if (!pool) {
+               dev_err(&pdev->dev, "asram pool not available\n");
+               return -ENOMEM;
+       }
+
        if (irq_num != chan_num) {
                irq = platform_get_irq(pdev, 0);
                ret = devm_request_irq(&pdev->dev, irq,
@@ -590,7 +604,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
        /* initialize channel parameters */
        for (i = 0; i < chan_num; i++) {
                irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
-               ret = mmp_tdma_chan_init(tdev, i, irq, type);
+               ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
                if (ret)
                        return ret;
        }
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
new file mode 100644 (file)
index 0000000..3258e48
--- /dev/null
@@ -0,0 +1,699 @@
+/*
+ * MOXA ART SoCs DMA Engine support.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/bitops.h>
+
+#include <asm/cacheflush.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+#define APB_DMA_MAX_CHANNEL                    4
+
+#define REG_OFF_ADDRESS_SOURCE                 0
+#define REG_OFF_ADDRESS_DEST                   4
+#define REG_OFF_CYCLES                         8
+#define REG_OFF_CTRL                           12
+#define REG_OFF_CHAN_SIZE                      16
+
+#define APB_DMA_ENABLE                         BIT(0)
+#define APB_DMA_FIN_INT_STS                    BIT(1)
+#define APB_DMA_FIN_INT_EN                     BIT(2)
+#define APB_DMA_BURST_MODE                     BIT(3)
+#define APB_DMA_ERR_INT_STS                    BIT(4)
+#define APB_DMA_ERR_INT_EN                     BIT(5)
+
+/*
+ * Unset: APB
+ * Set:   AHB
+ */
+#define APB_DMA_SOURCE_SELECT                  0x40
+#define APB_DMA_DEST_SELECT                    0x80
+
+#define APB_DMA_SOURCE                         0x100
+#define APB_DMA_DEST                           0x1000
+
+#define APB_DMA_SOURCE_MASK                    0x700
+#define APB_DMA_DEST_MASK                      0x7000
+
+/*
+ * 000: No increment
+ * 001: +1 (Burst=0), +4  (Burst=1)
+ * 010: +2 (Burst=0), +8  (Burst=1)
+ * 011: +4 (Burst=0), +16 (Burst=1)
+ * 101: -1 (Burst=0), -4  (Burst=1)
+ * 110: -2 (Burst=0), -8  (Burst=1)
+ * 111: -4 (Burst=0), -16 (Burst=1)
+ */
+#define APB_DMA_SOURCE_INC_0                   0
+#define APB_DMA_SOURCE_INC_1_4                 0x100
+#define APB_DMA_SOURCE_INC_2_8                 0x200
+#define APB_DMA_SOURCE_INC_4_16                        0x300
+#define APB_DMA_SOURCE_DEC_1_4                 0x500
+#define APB_DMA_SOURCE_DEC_2_8                 0x600
+#define APB_DMA_SOURCE_DEC_4_16                        0x700
+#define APB_DMA_DEST_INC_0                     0
+#define APB_DMA_DEST_INC_1_4                   0x1000
+#define APB_DMA_DEST_INC_2_8                   0x2000
+#define APB_DMA_DEST_INC_4_16                  0x3000
+#define APB_DMA_DEST_DEC_1_4                   0x5000
+#define APB_DMA_DEST_DEC_2_8                   0x6000
+#define APB_DMA_DEST_DEC_4_16                  0x7000
+
+/*
+ * Request signal select source/destination address for DMA hardware handshake.
+ *
+ * The request line number is a property of the DMA controller itself,
+ * e.g. MMC must always request channels where dma_slave_config->slave_id is 5.
+ *
+ * 0:    No request / Grant signal
+ * 1-15: Request    / Grant signal
+ */
+#define APB_DMA_SOURCE_REQ_NO                  0x1000000
+#define APB_DMA_SOURCE_REQ_NO_MASK             0xf000000
+#define APB_DMA_DEST_REQ_NO                    0x10000
+#define APB_DMA_DEST_REQ_NO_MASK               0xf0000
+
+#define APB_DMA_DATA_WIDTH                     0x100000
+#define APB_DMA_DATA_WIDTH_MASK                        0x300000
+/*
+ * Data width of transfer:
+ *
+ * 00: Word
+ * 01: Half
+ * 10: Byte
+ */
+#define APB_DMA_DATA_WIDTH_4                   0
+#define APB_DMA_DATA_WIDTH_2                   0x100000
+#define APB_DMA_DATA_WIDTH_1                   0x200000
+
+#define APB_DMA_CYCLES_MASK                    0x00ffffff
+
+#define MOXART_DMA_DATA_TYPE_S8                        0x00
+#define MOXART_DMA_DATA_TYPE_S16               0x01
+#define MOXART_DMA_DATA_TYPE_S32               0x02
+
+struct moxart_sg {
+       dma_addr_t addr;
+       uint32_t len;
+};
+
+struct moxart_desc {
+       enum dma_transfer_direction     dma_dir;
+       dma_addr_t                      dev_addr;
+       unsigned int                    sglen;
+       unsigned int                    dma_cycles;
+       struct virt_dma_desc            vd;
+       uint8_t                         es;
+       struct moxart_sg                sg[0];
+};
+
+struct moxart_chan {
+       struct virt_dma_chan            vc;
+
+       void __iomem                    *base;
+       struct moxart_desc              *desc;
+
+       struct dma_slave_config         cfg;
+
+       bool                            allocated;
+       bool                            error;
+       int                             ch_num;
+       unsigned int                    line_reqno;
+       unsigned int                    sgidx;
+};
+
+struct moxart_dmadev {
+       struct dma_device               dma_slave;
+       struct moxart_chan              slave_chans[APB_DMA_MAX_CHANNEL];
+};
+
+struct moxart_filter_data {
+       struct moxart_dmadev            *mdc;
+       struct of_phandle_args          *dma_spec;
+};
+
+static const unsigned int es_bytes[] = {
+       [MOXART_DMA_DATA_TYPE_S8] = 1,
+       [MOXART_DMA_DATA_TYPE_S16] = 2,
+       [MOXART_DMA_DATA_TYPE_S32] = 4,
+};
+
+static struct device *chan2dev(struct dma_chan *chan)
+{
+       return &chan->dev->device;
+}
+
+static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct moxart_chan, vc.chan);
+}
+
+static inline struct moxart_desc *to_moxart_dma_desc(
+       struct dma_async_tx_descriptor *t)
+{
+       return container_of(t, struct moxart_desc, vd.tx);
+}
+
+static void moxart_dma_desc_free(struct virt_dma_desc *vd)
+{
+       kfree(container_of(vd, struct moxart_desc, vd));
+}
+
+static int moxart_terminate_all(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+       u32 ctrl;
+
+       dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch);
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+
+       if (ch->desc)
+               ch->desc = NULL;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+
+       vchan_get_all_descriptors(&ch->vc, &head);
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+       vchan_dma_desc_free_list(&ch->vc, &head);
+
+       return 0;
+}
+
+static int moxart_slave_config(struct dma_chan *chan,
+                              struct dma_slave_config *cfg)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       u32 ctrl;
+
+       ch->cfg = *cfg;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl |= APB_DMA_BURST_MODE;
+       ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK);
+       ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK);
+
+       switch (ch->cfg.src_addr_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               ctrl |= APB_DMA_DATA_WIDTH_1;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_1_4;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_1_4;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               ctrl |= APB_DMA_DATA_WIDTH_2;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_2_8;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_2_8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               ctrl &= ~APB_DMA_DATA_WIDTH;
+               if (ch->cfg.direction != DMA_MEM_TO_DEV)
+                       ctrl |= APB_DMA_DEST_INC_4_16;
+               else
+                       ctrl |= APB_DMA_SOURCE_INC_4_16;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (ch->cfg.direction == DMA_MEM_TO_DEV) {
+               ctrl &= ~APB_DMA_DEST_SELECT;
+               ctrl |= APB_DMA_SOURCE_SELECT;
+               ctrl |= (ch->line_reqno << 16 &
+                        APB_DMA_DEST_REQ_NO_MASK);
+       } else {
+               ctrl |= APB_DMA_DEST_SELECT;
+               ctrl &= ~APB_DMA_SOURCE_SELECT;
+               ctrl |= (ch->line_reqno << 24 &
+                        APB_DMA_SOURCE_REQ_NO_MASK);
+       }
+
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+
+       return 0;
+}
+
+static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+                         unsigned long arg)
+{
+       int ret = 0;
+
+       switch (cmd) {
+       case DMA_PAUSE:
+       case DMA_RESUME:
+               return -EINVAL;
+       case DMA_TERMINATE_ALL:
+               moxart_terminate_all(chan);
+               break;
+       case DMA_SLAVE_CONFIG:
+               ret = moxart_slave_config(chan, (struct dma_slave_config *)arg);
+               break;
+       default:
+               ret = -ENOSYS;
+       }
+
+       return ret;
+}
+
+static struct dma_async_tx_descriptor *moxart_prep_slave_sg(
+       struct dma_chan *chan, struct scatterlist *sgl,
+       unsigned int sg_len, enum dma_transfer_direction dir,
+       unsigned long tx_flags, void *context)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct moxart_desc *d;
+       enum dma_slave_buswidth dev_width;
+       dma_addr_t dev_addr;
+       struct scatterlist *sgent;
+       unsigned int es;
+       unsigned int i;
+
+       if (!is_slave_direction(dir)) {
+               dev_err(chan2dev(chan), "%s: invalid DMA direction\n",
+                       __func__);
+               return NULL;
+       }
+
+       if (dir == DMA_DEV_TO_MEM) {
+               dev_addr = ch->cfg.src_addr;
+               dev_width = ch->cfg.src_addr_width;
+       } else {
+               dev_addr = ch->cfg.dst_addr;
+               dev_width = ch->cfg.dst_addr_width;
+       }
+
+       switch (dev_width) {
+       case DMA_SLAVE_BUSWIDTH_1_BYTE:
+               es = MOXART_DMA_DATA_TYPE_S8;
+               break;
+       case DMA_SLAVE_BUSWIDTH_2_BYTES:
+               es = MOXART_DMA_DATA_TYPE_S16;
+               break;
+       case DMA_SLAVE_BUSWIDTH_4_BYTES:
+               es = MOXART_DMA_DATA_TYPE_S32;
+               break;
+       default:
+               dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n",
+                       __func__, dev_width);
+               return NULL;
+       }
+
+       d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC);
+       if (!d)
+               return NULL;
+
+       d->dma_dir = dir;
+       d->dev_addr = dev_addr;
+       d->es = es;
+
+       for_each_sg(sgl, sgent, sg_len, i) {
+               d->sg[i].addr = sg_dma_address(sgent);
+               d->sg[i].len = sg_dma_len(sgent);
+       }
+
+       d->sglen = sg_len;
+
+       ch->error = 0;
+
+       return vchan_tx_prep(&ch->vc, &d->vd, tx_flags);
+}
+
+static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec,
+                                       struct of_dma *ofdma)
+{
+       struct moxart_dmadev *mdc = ofdma->of_dma_data;
+       struct dma_chan *chan;
+       struct moxart_chan *ch;
+
+       chan = dma_get_any_slave_channel(&mdc->dma_slave);
+       if (!chan)
+               return NULL;
+
+       ch = to_moxart_dma_chan(chan);
+       ch->line_reqno = dma_spec->args[0];
+
+       return chan;
+}
+
+static int moxart_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+       dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n",
+               __func__, ch->ch_num);
+       ch->allocated = 1;
+
+       return 0;
+}
+
+static void moxart_free_chan_resources(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+
+       vchan_free_chan_resources(&ch->vc);
+
+       dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n",
+               __func__, ch->ch_num);
+       ch->allocated = 0;
+}
+
+static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr,
+                                 dma_addr_t dst_addr)
+{
+       writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE);
+       writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST);
+}
+
+static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len)
+{
+       struct moxart_desc *d = ch->desc;
+       unsigned int sglen_div = es_bytes[d->es];
+
+       d->dma_cycles = len >> sglen_div;
+
+       /*
+        * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16
+        * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ).
+        */
+       writel(d->dma_cycles, ch->base + REG_OFF_CYCLES);
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n",
+               __func__, d->dma_cycles, len);
+}
+
+static void moxart_start_dma(struct moxart_chan *ch)
+{
+       u32 ctrl;
+
+       ctrl = readl(ch->base + REG_OFF_CTRL);
+       ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN);
+       writel(ctrl, ch->base + REG_OFF_CTRL);
+}
+
+static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx)
+{
+       struct moxart_desc *d = ch->desc;
+       struct moxart_sg *sg = ch->desc->sg + idx;
+
+       if (ch->desc->dma_dir == DMA_MEM_TO_DEV)
+               moxart_dma_set_params(ch, sg->addr, d->dev_addr);
+       else if (ch->desc->dma_dir == DMA_DEV_TO_MEM)
+               moxart_dma_set_params(ch, d->dev_addr, sg->addr);
+
+       moxart_set_transfer_params(ch, sg->len);
+
+       moxart_start_dma(ch);
+}
+
+static void moxart_dma_start_desc(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct virt_dma_desc *vd;
+
+       vd = vchan_next_desc(&ch->vc);
+
+       if (!vd) {
+               ch->desc = NULL;
+               return;
+       }
+
+       list_del(&vd->node);
+
+       ch->desc = to_moxart_dma_desc(&vd->tx);
+       ch->sgidx = 0;
+
+       moxart_dma_start_sg(ch, 0);
+}
+
+static void moxart_issue_pending(struct dma_chan *chan)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+       if (vchan_issue_pending(&ch->vc) && !ch->desc)
+               moxart_dma_start_desc(chan);
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+}
+
+static size_t moxart_dma_desc_size(struct moxart_desc *d,
+                                  unsigned int completed_sgs)
+{
+       unsigned int i;
+       size_t size;
+
+       for (size = i = completed_sgs; i < d->sglen; i++)
+               size += d->sg[i].len;
+
+       return size;
+}
+
+static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch)
+{
+       size_t size;
+       unsigned int completed_cycles, cycles;
+
+       size = moxart_dma_desc_size(ch->desc, ch->sgidx);
+       cycles = readl(ch->base + REG_OFF_CYCLES);
+       completed_cycles = (ch->desc->dma_cycles - cycles);
+       size -= completed_cycles << es_bytes[ch->desc->es];
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size);
+
+       return size;
+}
+
+static enum dma_status moxart_tx_status(struct dma_chan *chan,
+                                       dma_cookie_t cookie,
+                                       struct dma_tx_state *txstate)
+{
+       struct moxart_chan *ch = to_moxart_dma_chan(chan);
+       struct virt_dma_desc *vd;
+       struct moxart_desc *d;
+       enum dma_status ret;
+       unsigned long flags;
+
+       /*
+        * dma_cookie_status() assigns initial residue value.
+        */
+       ret = dma_cookie_status(chan, cookie, txstate);
+
+       spin_lock_irqsave(&ch->vc.lock, flags);
+       vd = vchan_find_desc(&ch->vc, cookie);
+       if (vd) {
+               d = to_moxart_dma_desc(&vd->tx);
+               txstate->residue = moxart_dma_desc_size(d, 0);
+       } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) {
+               txstate->residue = moxart_dma_desc_size_in_flight(ch);
+       }
+       spin_unlock_irqrestore(&ch->vc.lock, flags);
+
+       if (ch->error)
+               return DMA_ERROR;
+
+       return ret;
+}
+
+static void moxart_dma_init(struct dma_device *dma, struct device *dev)
+{
+       dma->device_prep_slave_sg               = moxart_prep_slave_sg;
+       dma->device_alloc_chan_resources        = moxart_alloc_chan_resources;
+       dma->device_free_chan_resources         = moxart_free_chan_resources;
+       dma->device_issue_pending               = moxart_issue_pending;
+       dma->device_tx_status                   = moxart_tx_status;
+       dma->device_control                     = moxart_control;
+       dma->dev                                = dev;
+
+       INIT_LIST_HEAD(&dma->channels);
+}
+
+static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
+{
+       struct moxart_dmadev *mc = devid;
+       struct moxart_chan *ch = &mc->slave_chans[0];
+       unsigned int i;
+       unsigned long flags;
+       u32 ctrl;
+
+       dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
+
+       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+               if (!ch->allocated)
+                       continue;
+
+               ctrl = readl(ch->base + REG_OFF_CTRL);
+
+               dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n",
+                       __func__, ch, ch->base, ctrl);
+
+               if (ctrl & APB_DMA_FIN_INT_STS) {
+                       ctrl &= ~APB_DMA_FIN_INT_STS;
+                       if (ch->desc) {
+                               spin_lock_irqsave(&ch->vc.lock, flags);
+                               if (++ch->sgidx < ch->desc->sglen) {
+                                       moxart_dma_start_sg(ch, ch->sgidx);
+                               } else {
+                                       vchan_cookie_complete(&ch->desc->vd);
+                                       moxart_dma_start_desc(&ch->vc.chan);
+                               }
+                               spin_unlock_irqrestore(&ch->vc.lock, flags);
+                       }
+               }
+
+               if (ctrl & APB_DMA_ERR_INT_STS) {
+                       ctrl &= ~APB_DMA_ERR_INT_STS;
+                       ch->error = 1;
+               }
+
+               writel(ctrl, ch->base + REG_OFF_CTRL);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int moxart_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *node = dev->of_node;
+       struct resource *res;
+       static void __iomem *dma_base_addr;
+       int ret, i;
+       unsigned int irq;
+       struct moxart_chan *ch;
+       struct moxart_dmadev *mdc;
+
+       mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL);
+       if (!mdc) {
+               dev_err(dev, "can't allocate DMA container\n");
+               return -ENOMEM;
+       }
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (irq == NO_IRQ) {
+               dev_err(dev, "no IRQ resource\n");
+               return -EINVAL;
+       }
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       dma_base_addr = devm_ioremap_resource(dev, res);
+       if (IS_ERR(dma_base_addr))
+               return PTR_ERR(dma_base_addr);
+
+       dma_cap_zero(mdc->dma_slave.cap_mask);
+       dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask);
+       dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask);
+
+       moxart_dma_init(&mdc->dma_slave, dev);
+
+       ch = &mdc->slave_chans[0];
+       for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) {
+               ch->ch_num = i;
+               ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE;
+               ch->allocated = 0;
+
+               ch->vc.desc_free = moxart_dma_desc_free;
+               vchan_init(&ch->vc, &mdc->dma_slave);
+
+               dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n",
+                       __func__, i, ch->ch_num, ch->base);
+       }
+
+       platform_set_drvdata(pdev, mdc);
+
+       ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0,
+                              "moxart-dma-engine", mdc);
+       if (ret) {
+               dev_err(dev, "devm_request_irq failed\n");
+               return ret;
+       }
+
+       ret = dma_async_device_register(&mdc->dma_slave);
+       if (ret) {
+               dev_err(dev, "dma_async_device_register failed\n");
+               return ret;
+       }
+
+       ret = of_dma_controller_register(node, moxart_of_xlate, mdc);
+       if (ret) {
+               dev_err(dev, "of_dma_controller_register failed\n");
+               dma_async_device_unregister(&mdc->dma_slave);
+               return ret;
+       }
+
+       dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq);
+
+       return 0;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+       struct moxart_dmadev *m = platform_get_drvdata(pdev);
+
+       dma_async_device_unregister(&m->dma_slave);
+
+       if (pdev->dev.of_node)
+               of_dma_controller_free(pdev->dev.of_node);
+
+       return 0;
+}
+
+static const struct of_device_id moxart_dma_match[] = {
+       { .compatible = "moxa,moxart-dma" },
+       { }
+};
+
+static struct platform_driver moxart_driver = {
+       .probe  = moxart_probe,
+       .remove = moxart_remove,
+       .driver = {
+               .name           = "moxart-dma-engine",
+               .owner          = THIS_MODULE,
+               .of_match_table = moxart_dma_match,
+       },
+};
+
+static int moxart_init(void)
+{
+       return platform_driver_register(&moxart_driver);
+}
+subsys_initcall(moxart_init);
+
+static void __exit moxart_exit(void)
+{
+       platform_driver_unregister(&moxart_driver);
+}
+module_exit(moxart_exit);
+
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
+MODULE_DESCRIPTION("MOXART DMA engine driver");
+MODULE_LICENSE("GPL v2");
index 2f66cf4e54fe367754378c3c8be213fe20bb8f64..362e7c49f2e1ad9d264eef1acff1b3102f0e1212 100644 (file)
@@ -190,7 +190,7 @@ static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
 {
        struct omap_chan *c = to_omap_dma_chan(chan);
 
-       dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
+       dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig);
 
        return omap_request_dma(c->dma_sig, "DMA engine",
                omap_dma_callback, c, &c->dma_ch);
@@ -203,7 +203,7 @@ static void omap_dma_free_chan_resources(struct dma_chan *chan)
        vchan_free_chan_resources(&c->vc);
        omap_free_dma(c->dma_ch);
 
-       dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
+       dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig);
 }
 
 static size_t omap_dma_sg_size(struct omap_sg *sg)
index c90edecee4633c3b7d9b76777534ab424a66ac7a..73fa9b7a10ab36b05dbc54c2850325c1e1a0b566 100644 (file)
@@ -543,7 +543,9 @@ struct dma_pl330_chan {
        /* DMA-Engine Channel */
        struct dma_chan chan;
 
-       /* List of to be xfered descriptors */
+       /* List of submitted descriptors */
+       struct list_head submitted_list;
+       /* List of issued descriptors */
        struct list_head work_list;
        /* List of completed descriptors */
        struct list_head completed_list;
@@ -578,12 +580,16 @@ struct dma_pl330_dmac {
        /* DMA-Engine Device */
        struct dma_device ddma;
 
+       /* Holds info about sg limitations */
+       struct device_dma_parameters dma_parms;
+
        /* Pool of descriptors available for the DMAC's channels */
        struct list_head desc_pool;
        /* To protect desc_pool manipulation */
        spinlock_t pool_lock;
 
        /* Peripheral channels connected to this DMAC */
+       unsigned int num_peripherals;
        struct dma_pl330_chan *peripherals; /* keep at end */
 };
 
@@ -606,11 +612,6 @@ struct dma_pl330_desc {
        struct dma_pl330_chan *pchan;
 };
 
-struct dma_pl330_filter_args {
-       struct dma_pl330_dmac *pdmac;
-       unsigned int chan_id;
-};
-
 static inline void _callback(struct pl330_req *r, enum pl330_op_err err)
 {
        if (r && r->xfer_cb)
@@ -2298,16 +2299,6 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
        tasklet_schedule(&pch->task);
 }
 
-static bool pl330_dt_filter(struct dma_chan *chan, void *param)
-{
-       struct dma_pl330_filter_args *fargs = param;
-
-       if (chan->device != &fargs->pdmac->ddma)
-               return false;
-
-       return (chan->chan_id == fargs->chan_id);
-}
-
 bool pl330_filter(struct dma_chan *chan, void *param)
 {
        u8 *peri_id;
@@ -2325,23 +2316,16 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
 {
        int count = dma_spec->args_count;
        struct dma_pl330_dmac *pdmac = ofdma->of_dma_data;
-       struct dma_pl330_filter_args fargs;
-       dma_cap_mask_t cap;
-
-       if (!pdmac)
-               return NULL;
+       unsigned int chan_id;
 
        if (count != 1)
                return NULL;
 
-       fargs.pdmac = pdmac;
-       fargs.chan_id = dma_spec->args[0];
-
-       dma_cap_zero(cap);
-       dma_cap_set(DMA_SLAVE, cap);
-       dma_cap_set(DMA_CYCLIC, cap);
+       chan_id = dma_spec->args[0];
+       if (chan_id >= pdmac->num_peripherals)
+               return NULL;
 
-       return dma_request_channel(cap, pl330_dt_filter, &fargs);
+       return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan);
 }
 
 static int pl330_alloc_chan_resources(struct dma_chan *chan)
@@ -2385,6 +2369,11 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
                pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
 
                /* Mark all desc done */
+               list_for_each_entry(desc, &pch->submitted_list, node) {
+                       desc->status = FREE;
+                       dma_cookie_complete(&desc->txd);
+               }
+
                list_for_each_entry(desc, &pch->work_list , node) {
                        desc->status = FREE;
                        dma_cookie_complete(&desc->txd);
@@ -2395,6 +2384,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
                        dma_cookie_complete(&desc->txd);
                }
 
+               list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool);
                list_splice_tail_init(&pch->work_list, &pdmac->desc_pool);
                list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool);
                spin_unlock_irqrestore(&pch->lock, flags);
@@ -2453,7 +2443,14 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 
 static void pl330_issue_pending(struct dma_chan *chan)
 {
-       pl330_tasklet((unsigned long) to_pchan(chan));
+       struct dma_pl330_chan *pch = to_pchan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pch->lock, flags);
+       list_splice_tail_init(&pch->submitted_list, &pch->work_list);
+       spin_unlock_irqrestore(&pch->lock, flags);
+
+       pl330_tasklet((unsigned long)pch);
 }
 
 /*
@@ -2480,11 +2477,11 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
 
                dma_cookie_assign(&desc->txd);
 
-               list_move_tail(&desc->node, &pch->work_list);
+               list_move_tail(&desc->node, &pch->submitted_list);
        }
 
        cookie = dma_cookie_assign(&last->txd);
-       list_add_tail(&last->node, &pch->work_list);
+       list_add_tail(&last->node, &pch->submitted_list);
        spin_unlock_irqrestore(&pch->lock, flags);
 
        return cookie;
@@ -2960,6 +2957,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        else
                num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan);
 
+       pdmac->num_peripherals = num_chan;
+
        pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
        if (!pdmac->peripherals) {
                ret = -ENOMEM;
@@ -2974,6 +2973,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
                else
                        pch->chan.private = adev->dev.of_node;
 
+               INIT_LIST_HEAD(&pch->submitted_list);
                INIT_LIST_HEAD(&pch->work_list);
                INIT_LIST_HEAD(&pch->completed_list);
                spin_lock_init(&pch->lock);
@@ -3021,6 +3021,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
                        "unable to register DMA to the generic DT DMA helpers\n");
                }
        }
+
+       adev->dev.dma_parms = &pdmac->dma_parms;
+
        /*
         * This is the limit for transfers with a buswidth of 1, larger
         * buswidths will have larger limits.
index 8bba298535b0984e241793403af6920399d7366d..ce7a8d7564ba6f96649407a5840323eb92669350 100644 (file)
@@ -4114,6 +4114,7 @@ static int ppc440spe_adma_probe(struct platform_device *ofdev)
        regs = ioremap(res.start, resource_size(&res));
        if (!regs) {
                dev_err(&ofdev->dev, "failed to ioremap regs!\n");
+               ret = -ENOMEM;
                goto err_regs_alloc;
        }
 
index 6aec3ad814d37f16b69c51f44347d9826e411885..d4d3a3109b163f3c3a4a471cfdbc82c838024437 100644 (file)
@@ -640,6 +640,25 @@ bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
 }
 EXPORT_SYMBOL(sirfsoc_dma_filter_id);
 
+#define SIRFSOC_DMA_BUSWIDTHS \
+       (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
+       BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+       BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+       BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+
+static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
+       struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+       caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = true;
+       caps->cmd_terminate = true;
+
+       return 0;
+}
+
 static int sirfsoc_dma_probe(struct platform_device *op)
 {
        struct device_node *dn = op->dev.of_node;
@@ -712,6 +731,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
        dma->device_tx_status = sirfsoc_dma_tx_status;
        dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
        dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+       dma->device_slave_caps = sirfsoc_dma_device_slave_caps;
 
        INIT_LIST_HEAD(&dma->channels);
        dma_cap_set(DMA_SLAVE, dma->cap_mask);
index d11bb3620f2783115b7a91058a297dfbf657033d..03ad64ecaaf043a4325dd6d7d325e676672a16b4 100644 (file)
 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP          BIT(27)
 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1                (1 << 16)
 
+/* Tegra148 specific registers */
+#define TEGRA_APBDMA_CHAN_WCOUNT               0x20
+
+#define TEGRA_APBDMA_CHAN_WORD_TRANSFER                0x24
+
 /*
  * If any burst is in flight and DMA paused then this is the time to complete
  * on-flight burst and update DMA status register.
 /* Channel base address offset from APBDMA base address */
 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET   0x1000
 
-/* DMA channel register space size */
-#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE     0x20
-
 struct tegra_dma;
 
 /*
  * tegra_dma_chip_data Tegra chip specific DMA data
  * @nr_channels: Number of channels available in the controller.
+ * @channel_reg_size: Channel register size/stride.
  * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
  * @support_channel_pause: Support channel wise pause of dma.
+ * @support_separate_wcount_reg: Support separate word count register.
  */
 struct tegra_dma_chip_data {
        int nr_channels;
+       int channel_reg_size;
        int max_dma_count;
        bool support_channel_pause;
+       bool support_separate_wcount_reg;
 };
 
 /* DMA channel registers */
@@ -133,6 +139,7 @@ struct tegra_dma_channel_regs {
        unsigned long   apb_ptr;
        unsigned long   ahb_seq;
        unsigned long   apb_seq;
+       unsigned long   wcount;
 };
 
 /*
@@ -426,6 +433,8 @@ static void tegra_dma_start(struct tegra_dma_channel *tdc,
        tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
+       if (tdc->tdma->chip_data->support_separate_wcount_reg)
+               tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
 
        /* Start DMA */
        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
@@ -465,6 +474,9 @@ static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
        /* Safe to program new configuration */
        tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
        tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
+       if (tdc->tdma->chip_data->support_separate_wcount_reg)
+               tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
+                                               nsg_req->ch_regs.wcount);
        tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
                                nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
        nsg_req->configured = true;
@@ -718,6 +730,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
        struct tegra_dma_desc *dma_desc;
        unsigned long flags;
        unsigned long status;
+       unsigned long wcount;
        bool was_busy;
 
        spin_lock_irqsave(&tdc->lock, flags);
@@ -738,6 +751,10 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
                tdc->isr_handler(tdc, true);
                status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
        }
+       if (tdc->tdma->chip_data->support_separate_wcount_reg)
+               wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
+       else
+               wcount = status;
 
        was_busy = tdc->busy;
        tegra_dma_stop(tdc);
@@ -746,7 +763,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc)
                sgreq = list_first_entry(&tdc->pending_sg_req,
                                        typeof(*sgreq), node);
                sgreq->dma_desc->bytes_transferred +=
-                               get_current_xferred_count(tdc, sgreq, status);
+                               get_current_xferred_count(tdc, sgreq, wcount);
        }
        tegra_dma_resume(tdc);
 
@@ -908,6 +925,17 @@ static int get_transfer_param(struct tegra_dma_channel *tdc,
        return -EINVAL;
 }
 
+static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
+       struct tegra_dma_channel_regs *ch_regs, u32 len)
+{
+       u32 len_field = (len - 4) & 0xFFFC;
+
+       if (tdc->tdma->chip_data->support_separate_wcount_reg)
+               ch_regs->wcount = len_field;
+       else
+               ch_regs->csr |= len_field;
+}
+
 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
        struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
        enum dma_transfer_direction direction, unsigned long flags,
@@ -991,7 +1019,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
 
                sg_req->ch_regs.apb_ptr = apb_ptr;
                sg_req->ch_regs.ahb_ptr = mem;
-               sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+               sg_req->ch_regs.csr = csr;
+               tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
                sg_req->ch_regs.apb_seq = apb_seq;
                sg_req->ch_regs.ahb_seq = ahb_seq;
                sg_req->configured = false;
@@ -1120,7 +1149,8 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
                ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
                sg_req->ch_regs.apb_ptr = apb_ptr;
                sg_req->ch_regs.ahb_ptr = mem;
-               sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+               sg_req->ch_regs.csr = csr;
+               tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
                sg_req->ch_regs.apb_seq = apb_seq;
                sg_req->ch_regs.ahb_seq = ahb_seq;
                sg_req->configured = false;
@@ -1234,27 +1264,45 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
 /* Tegra20 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
        .nr_channels            = 16,
+       .channel_reg_size       = 0x20,
        .max_dma_count          = 1024UL * 64,
        .support_channel_pause  = false,
+       .support_separate_wcount_reg = false,
 };
 
 /* Tegra30 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
        .nr_channels            = 32,
+       .channel_reg_size       = 0x20,
        .max_dma_count          = 1024UL * 64,
        .support_channel_pause  = false,
+       .support_separate_wcount_reg = false,
 };
 
 /* Tegra114 specific DMA controller information */
 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
        .nr_channels            = 32,
+       .channel_reg_size       = 0x20,
        .max_dma_count          = 1024UL * 64,
        .support_channel_pause  = true,
+       .support_separate_wcount_reg = false,
+};
+
+/* Tegra148 specific DMA controller information */
+static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
+       .nr_channels            = 32,
+       .channel_reg_size       = 0x40,
+       .max_dma_count          = 1024UL * 64,
+       .support_channel_pause  = true,
+       .support_separate_wcount_reg = true,
 };
 
 
 static const struct of_device_id tegra_dma_of_match[] = {
        {
+               .compatible = "nvidia,tegra148-apbdma",
+               .data = &tegra148_dma_chip_data,
+       }, {
                .compatible = "nvidia,tegra114-apbdma",
                .data = &tegra114_dma_chip_data,
        }, {
@@ -1348,7 +1396,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
                struct tegra_dma_channel *tdc = &tdma->channels[i];
 
                tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
-                                       i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
+                                       i * cdata->channel_reg_size;
 
                res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
                if (!res) {
index 85c19d63f9fbe9b6392b73238a209d6f55411215..181b95267866b605f521860f973aa3860d694fa0 100644 (file)
@@ -84,10 +84,12 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
 static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
 {
        struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
+       dma_cookie_t cookie;
 
+       cookie = vd->tx.cookie;
        dma_cookie_complete(&vd->tx);
        dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
-               vd, vd->tx.cookie);
+                vd, cookie);
        list_add_tail(&vd->node, &vc->desc_completed);
 
        tasklet_schedule(&vc->task);
index 6fd9390ccf91ab541f4aba92ac80da639bdf58ad..c5c92d59e5316820d0ae078fe7ce78e8407f2ede 100644 (file)
@@ -257,7 +257,7 @@ struct dma_chan_percpu {
  * @dev: class device for sysfs
  * @device_node: used to add this to the device chan list
  * @local: per-cpu pointer to a struct dma_chan_percpu
- * @client-count: how many clients are using this channel
+ * @client_count: how many clients are using this channel
  * @table_count: number of appearances in the mem-to-mem allocation table
  * @private: private data for certain client-channel associations
  */
@@ -279,10 +279,10 @@ struct dma_chan {
 
 /**
  * struct dma_chan_dev - relate sysfs device node to backing channel device
- * @chan - driver channel device
- * @device - sysfs device
- * @dev_id - parent dma_device dev_id
- * @idr_ref - reference count to gate release of dma_device dev_id
+ * @chan: driver channel device
+ * @device: sysfs device
+ * @dev_id: parent dma_device dev_id
+ * @idr_ref: reference count to gate release of dma_device dev_id
  */
 struct dma_chan_dev {
        struct dma_chan *chan;
@@ -306,9 +306,8 @@ enum dma_slave_buswidth {
 /**
  * struct dma_slave_config - dma slave channel runtime config
  * @direction: whether the data shall go in or out on this slave
- * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are
- * legal values, DMA_BIDIRECTIONAL is not acceptable since we
- * need to differentiate source and target addresses.
+ * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
+ * legal values.
  * @src_addr: this is the physical address where DMA slave data
  * should be read (RX), if the source is memory this argument is
  * ignored.
index 3a3942823c209163501b1dcd49a550d1664c56da..eabac4e2fc993b114ae940b1be960f04015ade0f 100644 (file)
@@ -43,6 +43,11 @@ struct sdma_script_start_addrs {
        s32 dptc_dvfs_addr;
        s32 utra_addr;
        s32 ram_code_start_addr;
+       /* End of v1 array */
+       s32 mcu_2_ssish_addr;
+       s32 ssish_2_mcu_addr;
+       s32 hdmi_dma_addr;
+       /* End of v2 array */
 };
 
 /**
index beac6b8b6a7b3846cf3925cab343da187fc686e1..bcbc6c3c14c0da82547b861e7108ccee41351f71 100644 (file)
@@ -39,6 +39,7 @@ enum sdma_peripheral_type {
        IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */
        IMX_DMATYPE_ASRC,       /* ASRC */
        IMX_DMATYPE_ESAI,       /* ESAI */
+       IMX_DMATYPE_SSI_DUAL,   /* SSI Dual FIFO */
 };
 
 enum imx_dma_prio {
index 239e0fc1bb1f9af55a1abecaf0a68b01d0faa214..66574ea39f97dc8b22e49d6625e0a5c16292b821 100644 (file)
@@ -1,6 +1,4 @@
 /*
- *  linux/arch/arm/mach-mmp/include/mach/sram.h
- *
  *  SRAM Memory Management
  *
  *  Copyright (c) 2011 Marvell Semiconductors Inc.
@@ -11,8 +9,8 @@
  *
  */
 
-#ifndef __ASM_ARCH_SRAM_H
-#define __ASM_ARCH_SRAM_H
+#ifndef __DMA_MMP_TDMA_H
+#define __DMA_MMP_TDMA_H
 
 #include <linux/genalloc.h>
 
@@ -32,4 +30,4 @@ struct sram_platdata {
 
 extern struct gen_pool *sram_get_gpool(char *pool_name);
 
-#endif /* __ASM_ARCH_SRAM_H */
+#endif /* __DMA_MMP_TDMA_H */
index 8ec18f64e3965e68fcf6ffbfae436687987f3466..92ffd3245f76c67bc2c02a36067a77425338be9f 100644 (file)
@@ -1,11 +1,9 @@
 /*
- * arch/arm/plat-orion/include/plat/mv_xor.h
- *
  * Marvell XOR platform device data definition file.
  */
 
-#ifndef __PLAT_MV_XOR_H
-#define __PLAT_MV_XOR_H
+#ifndef __DMA_MV_XOR_H
+#define __DMA_MV_XOR_H
 
 #include <linux/dmaengine.h>
 #include <linux/mbus.h>
index f9090b167ad7c1ba2c3e91f1af2e042356c25841..6404e1ef20d076bc693e198aab3afb3dde9bf08d 100644 (file)
@@ -164,6 +164,7 @@ struct fsl_ssi_private {
        bool baudclk_locked;
        bool irq_stats;
        bool offline_config;
+       bool use_dual_fifo;
        u8 i2s_mode;
        spinlock_t baudclk_lock;
        struct clk *baudclk;
@@ -721,6 +722,12 @@ static int fsl_ssi_setup(struct fsl_ssi_private *ssi_private)
                                CCSR_SSI_SxCCR_DC(2));
        }
 
+       if (ssi_private->use_dual_fifo) {
+               write_ssi_mask(&ssi->srcr, 0, CCSR_SSI_SRCR_RFEN1);
+               write_ssi_mask(&ssi->stcr, 0, CCSR_SSI_STCR_TFEN1);
+               write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_TCH_EN);
+       }
+
        return 0;
 }
 
@@ -752,6 +759,15 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
                spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags);
        }
 
+       /* When using dual fifo mode, it is safer to ensure an even period
+        * size. If appearing to an odd number while DMA always starts its
+        * task from fifo0, fifo1 would be neglected at the end of each
+        * period. But SSI would still access fifo1 with an invalid data.
+        */
+       if (ssi_private->use_dual_fifo)
+               snd_pcm_hw_constraint_step(substream->runtime, 0,
+                               SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2);
+
        return 0;
 }
 
@@ -1370,7 +1386,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
 
        if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 ||
                        hw_type == FSL_SSI_MX35) {
-               u32 dma_events[2];
+               u32 dma_events[2], dmas[4];
                ssi_private->ssi_on_imx = true;
 
                ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
@@ -1426,6 +1442,16 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                                goto error_clk;
                        }
                }
+               /* Should this be merge with the above? */
+               if (!of_property_read_u32_array(pdev->dev.of_node, "dmas", dmas, 4)
+                               && dmas[2] == IMX_DMATYPE_SSI_DUAL) {
+                       ssi_private->use_dual_fifo = true;
+                       /* When using dual fifo mode, we need to keep watermark
+                        * as even numbers due to dma script limitation.
+                        */
+                       ssi_private->dma_params_tx.maxburst &= ~0x1;
+                       ssi_private->dma_params_rx.maxburst &= ~0x1;
+               }
 
                shared = of_device_is_compatible(of_get_parent(np),
                            "fsl,spba-bus");