#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/sa11x0-dma.h>
#include <net/irda/irda.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
-#include <mach/dma.h>
#include <mach/hardware.h>
#include <asm/mach/irda.h>
static int max_rate = 4000000;
struct sa1100_buf {
+ struct device *dev;
struct sk_buff *skb;
- dma_addr_t dma;
- dma_regs_t *regs;
+ struct scatterlist sg;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
};
struct sa1100_irda {
#define HPSIR_MAX_RXLEN 2047
+static struct dma_slave_config sa1100_irda_sir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2UTDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 4,
+};
+
+static struct dma_slave_config sa1100_irda_fir_rx = {
+ .direction = DMA_FROM_DEVICE,
+ .src_addr = __PREG(Ser2HSDR),
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .src_maxburst = 8,
+};
+
+static struct dma_slave_config sa1100_irda_fir_tx = {
+ .direction = DMA_TO_DEVICE,
+ .dst_addr = __PREG(Ser2HSDR),
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .dst_maxburst = 8,
+};
+
+static unsigned sa1100_irda_dma_xferred(struct sa1100_buf *buf)
+{
+ struct dma_chan *chan = buf->chan;
+ struct dma_tx_state state;
+ enum dma_status status;
+
+ status = chan->device->device_tx_status(chan, buf->cookie, &state);
+ if (status != DMA_PAUSED)
+ return 0;
+
+ return sg_dma_len(&buf->sg) - state.residue;
+}
+
+static int sa1100_irda_dma_request(struct device *dev, struct sa1100_buf *buf,
+ const char *name, struct dma_slave_config *cfg)
+{
+ dma_cap_mask_t m;
+ int ret;
+
+ dma_cap_zero(m);
+ dma_cap_set(DMA_SLAVE, m);
+
+ buf->chan = dma_request_channel(m, sa11x0_dma_filter_fn, (void *)name);
+ if (!buf->chan) {
+ dev_err(dev, "unable to request DMA channel for %s\n",
+ name);
+ return -ENOENT;
+ }
+
+ ret = dmaengine_slave_config(buf->chan, cfg);
+ if (ret)
+ dev_warn(dev, "DMA slave_config for %s returned %d\n",
+ name, ret);
+
+ buf->dev = buf->chan->device->dev;
+
+ return 0;
+}
+
+static void sa1100_irda_dma_start(struct sa1100_buf *buf,
+ enum dma_transfer_direction dir, dma_async_tx_callback cb, void *cb_p)
+{
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = buf->chan;
+
+ desc = dmaengine_prep_slave_sg(chan, &buf->sg, 1, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (desc) {
+ desc->callback = cb;
+ desc->callback_param = cb_p;
+ buf->cookie = dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+ }
+}
+
/*
* Allocate and map the receive buffer, unless it is already allocated.
*/
*/
skb_reserve(si->dma_rx.skb, 1);
- si->dma_rx.dma = dma_map_single(si->dev, si->dma_rx.skb->data,
- HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(si->dev, si->dma_rx.dma)) {
+ sg_set_buf(&si->dma_rx.sg, si->dma_rx.skb->data, HPSIR_MAX_RXLEN);
+ if (dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE) == 0) {
dev_kfree_skb_any(si->dma_rx.skb);
return -ENOMEM;
}
/*
* Enable the DMA, receiver and receive interrupt.
*/
- sa1100_clear_dma(si->dma_rx.regs);
- sa1100_start_dma(si->dma_rx.regs, si->dma_rx.dma, HPSIR_MAX_RXLEN);
+ dmaengine_terminate_all(si->dma_rx.chan);
+ sa1100_irda_dma_start(&si->dma_rx, DMA_DEV_TO_MEM, NULL, NULL);
+
Ser2HSCR0 = HSCR0_HSSP | HSCR0_RXE;
}
/*
* HP-SIR format support.
*/
+static void sa1100_irda_sirtxdma_irq(void *id)
+{
+ struct net_device *dev = id;
+ struct sa1100_irda *si = netdev_priv(dev);
+
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE);
+ dev_kfree_skb(si->dma_tx.skb);
+ si->dma_tx.skb = NULL;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += sg_dma_len(&si->dma_tx.sg);
+
+ /* We need to ensure that the transmitter has finished. */
+ do
+ rmb();
+ while (Ser2UTSR1 & UTSR1_TBY);
+
+ /*
+ * Ok, we've finished transmitting. Now enable the receiver.
+ * Sometimes we get a receive IRQ immediately after a transmit...
+ */
+ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
+ Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
+
+ sa1100_irda_check_speed(si);
+
+ /* I'm hungry! */
+ netif_wake_queue(dev);
+}
+
static int sa1100_irda_sir_tx_start(struct sk_buff *skb, struct net_device *dev,
struct sa1100_irda *si)
{
si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data,
si->tx_buff.truesize);
+ si->dma_tx.skb = skb;
+ sg_set_buf(&si->dma_tx.sg, si->tx_buff.data, si->tx_buff.len);
+ if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
+ si->dma_tx.skb = NULL;
+ netif_wake_queue(dev);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+
+ sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_sirtxdma_irq, dev);
+
/*
- * Set the transmit interrupt enable. This will fire off an
- * interrupt immediately. Note that we disable the receiver
- * so we won't get spurious characters received.
+ * The mean turn-around time is enforced by XBOF padding,
+ * so we don't have to do anything special here.
*/
- Ser2UTCR3 = UTCR3_TIE | UTCR3_TXE;
-
- dev_kfree_skb(skb);
+ Ser2UTCR3 = UTCR3_TXE;
return NETDEV_TX_OK;
}
}
- if (status & UTSR0_TFS && si->tx_buff.len) {
- /*
- * Transmitter FIFO is not full
- */
- do {
- Ser2UTDR = *si->tx_buff.data++;
- si->tx_buff.len -= 1;
- } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len);
-
- if (si->tx_buff.len == 0) {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += si->tx_buff.data -
- si->tx_buff.head;
-
- /*
- * We need to ensure that the transmitter has
- * finished.
- */
- do
- rmb();
- while (Ser2UTSR1 & UTSR1_TBY);
-
- /*
- * Ok, we've finished transmitting. Now enable
- * the receiver. Sometimes we get a receive IRQ
- * immediately after a transmit...
- */
- Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID;
- Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE;
-
- sa1100_irda_check_speed(si);
-
- /* I'm hungry! */
- netif_wake_queue(dev);
- }
- }
-
return IRQ_HANDLED;
}
/* Account and free the packet. */
skb = si->dma_tx.skb;
if (skb) {
- dma_unmap_single(si->dev, si->dma_tx.dma, skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
dev->stats.tx_packets ++;
dev->stats.tx_bytes += skb->len;
dev_kfree_skb_irq(skb);
int mtt = irda_get_mtt(skb);
si->dma_tx.skb = skb;
- si->dma_tx.dma = dma_map_single(si->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(si->dev, si->dma_tx.dma)) {
+ sg_set_buf(&si->dma_tx.sg, skb->data, skb->len);
+ if (dma_map_sg(si->dma_tx.dev, &si->dma_tx.sg, 1, DMA_TO_DEVICE) == 0) {
si->dma_tx.skb = NULL;
netif_wake_queue(dev);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
- sa1100_start_dma(si->dma_tx.regs, si->dma_tx.dma, skb->len);
+ sa1100_irda_dma_start(&si->dma_tx, DMA_MEM_TO_DEV, sa1100_irda_firtxdma_irq, dev);
/*
* If we have a mean turn-around time, impose the specified
static void sa1100_irda_fir_error(struct sa1100_irda *si, struct net_device *dev)
{
struct sk_buff *skb = si->dma_rx.skb;
- dma_addr_t dma_addr;
unsigned int len, stat, data;
if (!skb) {
/*
* Get the current data position.
*/
- dma_addr = sa1100_get_dma_pos(si->dma_rx.regs);
- len = dma_addr - si->dma_rx.dma;
+ len = sa1100_irda_dma_xferred(&si->dma_rx);
if (len > HPSIR_MAX_RXLEN)
len = HPSIR_MAX_RXLEN;
- dma_unmap_single(si->dev, si->dma_rx.dma, len, DMA_FROM_DEVICE);
+ dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
do {
/*
* Remap the buffer - it was previously mapped, and we
* hope that this succeeds.
*/
- si->dma_rx.dma = dma_map_single(si->dev, si->dma_rx.skb->data,
- HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
+ dma_map_sg(si->dma_rx.dev, &si->dma_rx.sg, 1, DMA_FROM_DEVICE);
}
}
/*
* Stop RX DMA
*/
- sa1100_stop_dma(si->dma_rx.regs);
+ dmaengine_pause(si->dma_rx.chan);
/*
* Framing error - we throw away the packet completely.
case 57600: case 115200:
brd = 3686400 / (16 * speed) - 1;
- /*
- * Stop the receive DMA.
- */
- if (IS_FIR(si))
- sa1100_stop_dma(si->dma_rx.regs);
+ /* Stop the receive DMA, and configure transmit. */
+ if (IS_FIR(si)) {
+ dmaengine_terminate_all(si->dma_rx.chan);
+ dmaengine_slave_config(si->dma_tx.chan,
+ &sa1100_irda_sir_tx);
+ }
local_irq_save(flags);
break;
case 4000000:
+ if (!IS_FIR(si))
+ dmaengine_slave_config(si->dma_tx.chan,
+ &sa1100_irda_fir_tx);
+
local_irq_save(flags);
Ser2HSSR0 = 0xff;
/*
* Stop all DMA activity.
*/
- sa1100_stop_dma(si->dma_rx.regs);
- sa1100_stop_dma(si->dma_tx.regs);
+ dmaengine_terminate_all(si->dma_rx.chan);
+ dmaengine_terminate_all(si->dma_tx.chan);
/* Disable the port. */
Ser2UTCR3 = 0;
si->speed = 9600;
- err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive",
- NULL, NULL, &si->dma_rx.regs);
+ err = sa1100_irda_dma_request(si->dev, &si->dma_rx, "Ser2ICPRc",
+ &sa1100_irda_fir_rx);
if (err)
goto err_rx_dma;
- err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit",
- sa1100_irda_firtxdma_irq, dev,
- &si->dma_tx.regs);
+ err = sa1100_irda_dma_request(si->dev, &si->dma_tx, "Ser2ICPTr",
+ &sa1100_irda_sir_tx);
if (err)
goto err_tx_dma;
si->open = 0;
sa1100_irda_shutdown(si);
err_startup:
- sa1100_free_dma(si->dma_tx.regs);
+ dma_release_channel(si->dma_tx.chan);
err_tx_dma:
- sa1100_free_dma(si->dma_rx.regs);
+ dma_release_channel(si->dma_rx.chan);
err_rx_dma:
return err;
}
*/
skb = si->dma_rx.skb;
if (skb) {
- dma_unmap_single(si->dev, si->dma_rx.dma, HPSIR_MAX_RXLEN,
- DMA_FROM_DEVICE);
+ dma_unmap_sg(si->dma_rx.dev, &si->dma_rx.sg, 1,
+ DMA_FROM_DEVICE);
dev_kfree_skb(skb);
si->dma_rx.skb = NULL;
}
skb = si->dma_tx.skb;
if (skb) {
- dma_unmap_single(si->dev, si->dma_tx.dma, skb->len,
- DMA_TO_DEVICE);
+ dma_unmap_sg(si->dma_tx.dev, &si->dma_tx.sg, 1,
+ DMA_TO_DEVICE);
dev_kfree_skb(skb);
si->dma_tx.skb = NULL;
}
/*
* Free resources
*/
- sa1100_free_dma(si->dma_tx.regs);
- sa1100_free_dma(si->dma_rx.regs);
+ dma_release_channel(si->dma_tx.chan);
+ dma_release_channel(si->dma_rx.chan);
free_irq(dev->irq, dev);
sa1100_set_power(si, 0);
si->dev = &pdev->dev;
si->pdata = pdev->dev.platform_data;
+ sg_init_table(&si->dma_rx.sg, 1);
+ sg_init_table(&si->dma_tx.sg, 1);
+
/*
* Initialise the HP-SIR buffers
*/
err = sa1100_irda_init_iobuf(&si->rx_buff, 14384);
if (err)
goto err_mem_5;
- err = sa1100_irda_init_iobuf(&si->tx_buff, 4000);
+ err = sa1100_irda_init_iobuf(&si->tx_buff, IRDA_SIR_MAX_FRAME);
if (err)
goto err_mem_5;