summary refs log tree commit diff
path: root/drivers/dma/mmp_pdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/mmp_pdma.c')
-rw-r--r--drivers/dma/mmp_pdma.c875
1 files changed, 875 insertions, 0 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
new file mode 100644
index 000000000000..14da1f403edf
--- /dev/null
+++ b/drivers/dma/mmp_pdma.c
@@ -0,0 +1,875 @@
+/*
+ * Copyright 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <linux/platform_data/mmp_dma.h>
+#include <linux/dmapool.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+
+#include "dmaengine.h"
+
+#define DCSR		0x0000
+#define DALGN		0x00a0
+#define DINT		0x00f0
+#define DDADR		0x0200
+#define DSADR		0x0204
+#define DTADR		0x0208
+#define DCMD		0x020c
+
+#define DCSR_RUN	(1 << 31)	/* Run Bit (read / write) */
+#define DCSR_NODESC	(1 << 30)	/* No-Descriptor Fetch (read / write) */
+#define DCSR_STOPIRQEN	(1 << 29)	/* Stop Interrupt Enable (read / write) */
+#define DCSR_REQPEND	(1 << 8)	/* Request Pending (read-only) */
+#define DCSR_STOPSTATE	(1 << 3)	/* Stop State (read-only) */
+#define DCSR_ENDINTR	(1 << 2)	/* End Interrupt (read / write) */
+#define DCSR_STARTINTR	(1 << 1)	/* Start Interrupt (read / write) */
+#define DCSR_BUSERR	(1 << 0)	/* Bus Error Interrupt (read / write) */
+
+#define DCSR_EORIRQEN	(1 << 28)       /* End of Receive Interrupt Enable (R/W) */
+#define DCSR_EORJMPEN	(1 << 27)       /* Jump to next descriptor on EOR */
+#define DCSR_EORSTOPEN	(1 << 26)       /* STOP on an EOR */
+#define DCSR_SETCMPST	(1 << 25)       /* Set Descriptor Compare Status */
+#define DCSR_CLRCMPST	(1 << 24)       /* Clear Descriptor Compare Status */
+#define DCSR_CMPST	(1 << 10)       /* The Descriptor Compare Status */
+#define DCSR_EORINTR	(1 << 9)        /* The end of Receive */
+
+#define DRCMR_MAPVLD	(1 << 7)	/* Map Valid (read / write) */
+#define DRCMR_CHLNUM	0x1f		/* mask for Channel Number (read / write) */
+
+#define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
+#define DDADR_STOP	(1 << 0)	/* Stop (read / write) */
+
+#define DCMD_INCSRCADDR	(1 << 31)	/* Source Address Increment Setting. */
+#define DCMD_INCTRGADDR	(1 << 30)	/* Target Address Increment Setting. */
+#define DCMD_FLOWSRC	(1 << 29)	/* Flow Control by the source. */
+#define DCMD_FLOWTRG	(1 << 28)	/* Flow Control by the target. */
+#define DCMD_STARTIRQEN	(1 << 22)	/* Start Interrupt Enable */
+#define DCMD_ENDIRQEN	(1 << 21)	/* End Interrupt Enable */
+#define DCMD_ENDIAN	(1 << 18)	/* Device Endian-ness. */
+#define DCMD_BURST8	(1 << 16)	/* 8 byte burst */
+#define DCMD_BURST16	(2 << 16)	/* 16 byte burst */
+#define DCMD_BURST32	(3 << 16)	/* 32 byte burst */
+#define DCMD_WIDTH1	(1 << 14)	/* 1 byte width */
+#define DCMD_WIDTH2	(2 << 14)	/* 2 byte width (HalfWord) */
+#define DCMD_WIDTH4	(3 << 14)	/* 4 byte width (Word) */
+#define DCMD_LENGTH	0x01fff		/* length mask (max = 8K - 1) */
+
+#define PDMA_ALIGNMENT		3
+#define PDMA_MAX_DESC_BYTES	0x1000
+
+struct mmp_pdma_desc_hw {
+	u32 ddadr;	/* Points to the next descriptor + flags */
+	u32 dsadr;	/* DSADR value for the current transfer */
+	u32 dtadr;	/* DTADR value for the current transfer */
+	u32 dcmd;	/* DCMD value for the current transfer */
+} __aligned(32);
+
+struct mmp_pdma_desc_sw {
+	struct mmp_pdma_desc_hw desc;
+	struct list_head node;
+	struct list_head tx_list;
+	struct dma_async_tx_descriptor async_tx;
+};
+
+struct mmp_pdma_phy;
+
+struct mmp_pdma_chan {
+	struct device *dev;
+	struct dma_chan chan;
+	struct dma_async_tx_descriptor desc;
+	struct mmp_pdma_phy *phy;
+	enum dma_transfer_direction dir;
+
+	/* channel's basic info */
+	struct tasklet_struct tasklet;
+	u32 dcmd;
+	u32 drcmr;
+	u32 dev_addr;
+
+	/* list for desc */
+	spinlock_t desc_lock;		/* Descriptor list lock */
+	struct list_head chain_pending;	/* Link descriptors queue for pending */
+	struct list_head chain_running;	/* Link descriptors queue for running */
+	bool idle;			/* channel statue machine */
+
+	struct dma_pool *desc_pool;	/* Descriptors pool */
+};
+
+struct mmp_pdma_phy {
+	int idx;
+	void __iomem *base;
+	struct mmp_pdma_chan *vchan;
+};
+
+struct mmp_pdma_device {
+	int				dma_channels;
+	void __iomem			*base;
+	struct device			*dev;
+	struct dma_device		device;
+	struct mmp_pdma_phy		*phy;
+};
+
+#define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
+#define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
+#define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
+#define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
+
+static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
+{
+	u32 reg = (phy->idx << 4) + DDADR;
+
+	writel(addr, phy->base + reg);
+}
+
+static void enable_chan(struct mmp_pdma_phy *phy)
+{
+	u32 reg;
+
+	if (!phy->vchan)
+		return;
+
+	reg = phy->vchan->drcmr;
+	reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2);
+	writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+
+	reg = (phy->idx << 2) + DCSR;
+	writel(readl(phy->base + reg) | DCSR_RUN,
+					phy->base + reg);
+}
+
+static void disable_chan(struct mmp_pdma_phy *phy)
+{
+	u32 reg;
+
+	if (phy) {
+		reg = (phy->idx << 2) + DCSR;
+		writel(readl(phy->base + reg) & ~DCSR_RUN,
+						phy->base + reg);
+	}
+}
+
+static int clear_chan_irq(struct mmp_pdma_phy *phy)
+{
+	u32 dcsr;
+	u32 dint = readl(phy->base + DINT);
+	u32 reg = (phy->idx << 2) + DCSR;
+
+	if (dint & BIT(phy->idx)) {
+		/* clear irq */
+		dcsr = readl(phy->base + reg);
+		writel(dcsr, phy->base + reg);
+		if ((dcsr & DCSR_BUSERR) && (phy->vchan))
+			dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
+		return 0;
+	}
+	return -EAGAIN;
+}
+
+static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
+{
+	struct mmp_pdma_phy *phy = dev_id;
+
+	if (clear_chan_irq(phy) == 0) {
+		tasklet_schedule(&phy->vchan->tasklet);
+		return IRQ_HANDLED;
+	} else
+		return IRQ_NONE;
+}
+
+static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
+{
+	struct mmp_pdma_device *pdev = dev_id;
+	struct mmp_pdma_phy *phy;
+	u32 dint = readl(pdev->base + DINT);
+	int i, ret;
+	int irq_num = 0;
+
+	while (dint) {
+		i = __ffs(dint);
+		dint &= (dint - 1);
+		phy = &pdev->phy[i];
+		ret = mmp_pdma_chan_handler(irq, phy);
+		if (ret == IRQ_HANDLED)
+			irq_num++;
+	}
+
+	if (irq_num)
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
+}
+
+/* lookup free phy channel as descending priority */
+static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
+{
+	int prio, i;
+	struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
+	struct mmp_pdma_phy *phy;
+
+	/*
+	 * dma channel priorities
+	 * ch 0 - 3,  16 - 19  <--> (0)
+	 * ch 4 - 7,  20 - 23  <--> (1)
+	 * ch 8 - 11, 24 - 27  <--> (2)
+	 * ch 12 - 15, 28 - 31  <--> (3)
+	 */
+	for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
+		for (i = 0; i < pdev->dma_channels; i++) {
+			if (prio != ((i & 0xf) >> 2))
+				continue;
+			phy = &pdev->phy[i];
+			if (!phy->vchan) {
+				phy->vchan = pchan;
+				return phy;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+/* desc->tx_list ==> pending list */
+static void append_pending_queue(struct mmp_pdma_chan *chan,
+					struct mmp_pdma_desc_sw *desc)
+{
+	struct mmp_pdma_desc_sw *tail =
+				to_mmp_pdma_desc(chan->chain_pending.prev);
+
+	if (list_empty(&chan->chain_pending))
+		goto out_splice;
+
+	/* one irq per queue, even appended */
+	tail->desc.ddadr = desc->async_tx.phys;
+	tail->desc.dcmd &= ~DCMD_ENDIRQEN;
+
+	/* softly link to pending list */
+out_splice:
+	list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
+}
+
+/**
+ * start_pending_queue - transfer any pending transactions
+ * pending list ==> running list
+ */
+static void start_pending_queue(struct mmp_pdma_chan *chan)
+{
+	struct mmp_pdma_desc_sw *desc;
+
+	/* still in running, irq will start the pending list */
+	if (!chan->idle) {
+		dev_dbg(chan->dev, "DMA controller still busy\n");
+		return;
+	}
+
+	if (list_empty(&chan->chain_pending)) {
+		/* chance to re-fetch phy channel with higher prio */
+		if (chan->phy) {
+			chan->phy->vchan = NULL;
+			chan->phy = NULL;
+		}
+		dev_dbg(chan->dev, "no pending list\n");
+		return;
+	}
+
+	if (!chan->phy) {
+		chan->phy = lookup_phy(chan);
+		if (!chan->phy) {
+			dev_dbg(chan->dev, "no free dma channel\n");
+			return;
+		}
+	}
+
+	/*
+	 * pending -> running
+	 * reintilize pending list
+	 */
+	desc = list_first_entry(&chan->chain_pending,
+				struct mmp_pdma_desc_sw, node);
+	list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
+
+	/*
+	 * Program the descriptor's address into the DMA controller,
+	 * then start the DMA transaction
+	 */
+	set_desc(chan->phy, desc->async_tx.phys);
+	enable_chan(chan->phy);
+	chan->idle = false;
+}
+
+
+/* desc->tx_list ==> pending list */
+static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
+	struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
+	struct mmp_pdma_desc_sw *child;
+	unsigned long flags;
+	dma_cookie_t cookie = -EBUSY;
+
+	spin_lock_irqsave(&chan->desc_lock, flags);
+
+	list_for_each_entry(child, &desc->tx_list, node) {
+		cookie = dma_cookie_assign(&child->async_tx);
+	}
+
+	append_pending_queue(chan, desc);
+
+	spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+	return cookie;
+}
+
+struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
+{
+	struct mmp_pdma_desc_sw *desc;
+	dma_addr_t pdesc;
+
+	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+	if (!desc) {
+		dev_err(chan->dev, "out of memory for link descriptor\n");
+		return NULL;
+	}
+
+	memset(desc, 0, sizeof(*desc));
+	INIT_LIST_HEAD(&desc->tx_list);
+	dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
+	/* each desc has submit */
+	desc->async_tx.tx_submit = mmp_pdma_tx_submit;
+	desc->async_tx.phys = pdesc;
+
+	return desc;
+}
+
+/**
+ * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
+ *
+ * This function will create a dma pool for descriptor allocation.
+ * Request irq only when channel is requested
+ * Return - The number of allocated descriptors.
+ */
+
+static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
+{
+	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+
+	if (chan->desc_pool)
+		return 1;
+
+	chan->desc_pool =
+		dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
+				  sizeof(struct mmp_pdma_desc_sw),
+				  __alignof__(struct mmp_pdma_desc_sw), 0);
+	if (!chan->desc_pool) {
+		dev_err(chan->dev, "unable to allocate descriptor pool\n");
+		return -ENOMEM;
+	}
+	if (chan->phy) {
+		chan->phy->vchan = NULL;
+		chan->phy = NULL;
+	}
+	chan->idle = true;
+	chan->dev_addr = 0;
+	return 1;
+}
+
+static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
+				  struct list_head *list)
+{
+	struct mmp_pdma_desc_sw *desc, *_desc;
+
+	list_for_each_entry_safe(desc, _desc, list, node) {
+		list_del(&desc->node);
+		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+	}
+}
+
+static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
+{
+	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->desc_lock, flags);
+	mmp_pdma_free_desc_list(chan, &chan->chain_pending);
+	mmp_pdma_free_desc_list(chan, &chan->chain_running);
+	spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+	dma_pool_destroy(chan->desc_pool);
+	chan->desc_pool = NULL;
+	chan->idle = true;
+	chan->dev_addr = 0;
+	if (chan->phy) {
+		chan->phy->vchan = NULL;
+		chan->phy = NULL;
+	}
+	return;
+}
+
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_memcpy(struct dma_chan *dchan,
+	dma_addr_t dma_dst, dma_addr_t dma_src,
+	size_t len, unsigned long flags)
+{
+	struct mmp_pdma_chan *chan;
+	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
+	size_t copy = 0;
+
+	if (!dchan)
+		return NULL;
+
+	if (!len)
+		return NULL;
+
+	chan = to_mmp_pdma_chan(dchan);
+
+	if (!chan->dir) {
+		chan->dir = DMA_MEM_TO_MEM;
+		chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
+		chan->dcmd |= DCMD_BURST32;
+	}
+
+	do {
+		/* Allocate the link descriptor from DMA pool */
+		new = mmp_pdma_alloc_descriptor(chan);
+		if (!new) {
+			dev_err(chan->dev, "no memory for desc\n");
+			goto fail;
+		}
+
+		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
+
+		new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
+		new->desc.dsadr = dma_src;
+		new->desc.dtadr = dma_dst;
+
+		if (!first)
+			first = new;
+		else
+			prev->desc.ddadr = new->async_tx.phys;
+
+		new->async_tx.cookie = 0;
+		async_tx_ack(&new->async_tx);
+
+		prev = new;
+		len -= copy;
+
+		if (chan->dir == DMA_MEM_TO_DEV) {
+			dma_src += copy;
+		} else if (chan->dir == DMA_DEV_TO_MEM) {
+			dma_dst += copy;
+		} else if (chan->dir == DMA_MEM_TO_MEM) {
+			dma_src += copy;
+			dma_dst += copy;
+		}
+
+		/* Insert the link descriptor to the LD ring */
+		list_add_tail(&new->node, &first->tx_list);
+	} while (len);
+
+	first->async_tx.flags = flags; /* client is in control of this ack */
+	first->async_tx.cookie = -EBUSY;
+
+	/* last desc and fire IRQ */
+	new->desc.ddadr = DDADR_STOP;
+	new->desc.dcmd |= DCMD_ENDIRQEN;
+
+	return &first->async_tx;
+
+fail:
+	if (first)
+		mmp_pdma_free_desc_list(chan, &first->tx_list);
+	return NULL;
+}
+
+static struct dma_async_tx_descriptor *
+mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
+			 unsigned int sg_len, enum dma_transfer_direction dir,
+			 unsigned long flags, void *context)
+{
+	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+	struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
+	size_t len, avail;
+	struct scatterlist *sg;
+	dma_addr_t addr;
+	int i;
+
+	if ((sgl == NULL) || (sg_len == 0))
+		return NULL;
+
+	for_each_sg(sgl, sg, sg_len, i) {
+		addr = sg_dma_address(sg);
+		avail = sg_dma_len(sgl);
+
+		do {
+			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
+
+			/* allocate and populate the descriptor */
+			new = mmp_pdma_alloc_descriptor(chan);
+			if (!new) {
+				dev_err(chan->dev, "no memory for desc\n");
+				goto fail;
+			}
+
+			new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
+			if (dir == DMA_MEM_TO_DEV) {
+				new->desc.dsadr = addr;
+				new->desc.dtadr = chan->dev_addr;
+			} else {
+				new->desc.dsadr = chan->dev_addr;
+				new->desc.dtadr = addr;
+			}
+
+			if (!first)
+				first = new;
+			else
+				prev->desc.ddadr = new->async_tx.phys;
+
+			new->async_tx.cookie = 0;
+			async_tx_ack(&new->async_tx);
+			prev = new;
+
+			/* Insert the link descriptor to the LD ring */
+			list_add_tail(&new->node, &first->tx_list);
+
+			/* update metadata */
+			addr += len;
+			avail -= len;
+		} while (avail);
+	}
+
+	first->async_tx.cookie = -EBUSY;
+	first->async_tx.flags = flags;
+
+	/* last desc and fire IRQ */
+	new->desc.ddadr = DDADR_STOP;
+	new->desc.dcmd |= DCMD_ENDIRQEN;
+
+	return &first->async_tx;
+
+fail:
+	if (first)
+		mmp_pdma_free_desc_list(chan, &first->tx_list);
+	return NULL;
+}
+
+static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
+		unsigned long arg)
+{
+	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+	struct dma_slave_config *cfg = (void *)arg;
+	unsigned long flags;
+	int ret = 0;
+	u32 maxburst = 0, addr = 0;
+	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+
+	if (!dchan)
+		return -EINVAL;
+
+	switch (cmd) {
+	case DMA_TERMINATE_ALL:
+		disable_chan(chan->phy);
+		if (chan->phy) {
+			chan->phy->vchan = NULL;
+			chan->phy = NULL;
+		}
+		spin_lock_irqsave(&chan->desc_lock, flags);
+		mmp_pdma_free_desc_list(chan, &chan->chain_pending);
+		mmp_pdma_free_desc_list(chan, &chan->chain_running);
+		spin_unlock_irqrestore(&chan->desc_lock, flags);
+		chan->idle = true;
+		break;
+	case DMA_SLAVE_CONFIG:
+		if (cfg->direction == DMA_DEV_TO_MEM) {
+			chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
+			maxburst = cfg->src_maxburst;
+			width = cfg->src_addr_width;
+			addr = cfg->src_addr;
+		} else if (cfg->direction == DMA_MEM_TO_DEV) {
+			chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
+			maxburst = cfg->dst_maxburst;
+			width = cfg->dst_addr_width;
+			addr = cfg->dst_addr;
+		}
+
+		if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+			chan->dcmd |= DCMD_WIDTH1;
+		else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
+			chan->dcmd |= DCMD_WIDTH2;
+		else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
+			chan->dcmd |= DCMD_WIDTH4;
+
+		if (maxburst == 8)
+			chan->dcmd |= DCMD_BURST8;
+		else if (maxburst == 16)
+			chan->dcmd |= DCMD_BURST16;
+		else if (maxburst == 32)
+			chan->dcmd |= DCMD_BURST32;
+
+		if (cfg) {
+			chan->dir = cfg->direction;
+			chan->drcmr = cfg->slave_id;
+		}
+		chan->dev_addr = addr;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	return ret;
+}
+
+static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
+			dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+	enum dma_status ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->desc_lock, flags);
+	ret = dma_cookie_status(dchan, cookie, txstate);
+	spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+	return ret;
+}
+
+/**
+ * mmp_pdma_issue_pending - Issue the DMA start command
+ * pending list ==> running list
+ */
+static void mmp_pdma_issue_pending(struct dma_chan *dchan)
+{
+	struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
+	unsigned long flags;
+
+	spin_lock_irqsave(&chan->desc_lock, flags);
+	start_pending_queue(chan);
+	spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+/*
+ * dma_do_tasklet
+ * Do call back
+ * Start pending list
+ */
+static void dma_do_tasklet(unsigned long data)
+{
+	struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
+	struct mmp_pdma_desc_sw *desc, *_desc;
+	LIST_HEAD(chain_cleanup);
+	unsigned long flags;
+
+	/* submit pending list; callback for each desc; free desc */
+
+	spin_lock_irqsave(&chan->desc_lock, flags);
+
+	/* update the cookie if we have some descriptors to cleanup */
+	if (!list_empty(&chan->chain_running)) {
+		dma_cookie_t cookie;
+
+		desc = to_mmp_pdma_desc(chan->chain_running.prev);
+		cookie = desc->async_tx.cookie;
+		dma_cookie_complete(&desc->async_tx);
+
+		dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
+	}
+
+	/*
+	 * move the descriptors to a temporary list so we can drop the lock
+	 * during the entire cleanup operation
+	 */
+	list_splice_tail_init(&chan->chain_running, &chain_cleanup);
+
+	/* the hardware is now idle and ready for more */
+	chan->idle = true;
+
+	/* Start any pending transactions automatically */
+	start_pending_queue(chan);
+	spin_unlock_irqrestore(&chan->desc_lock, flags);
+
+	/* Run the callback for each descriptor, in order */
+	list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
+		struct dma_async_tx_descriptor *txd = &desc->async_tx;
+
+		/* Remove from the list of transactions */
+		list_del(&desc->node);
+		/* Run the link descriptor callback function */
+		if (txd->callback)
+			txd->callback(txd->callback_param);
+
+		dma_pool_free(chan->desc_pool, desc, txd->phys);
+	}
+}
+
+static int __devexit mmp_pdma_remove(struct platform_device *op)
+{
+	struct mmp_pdma_device *pdev = platform_get_drvdata(op);
+
+	dma_async_device_unregister(&pdev->device);
+	return 0;
+}
+
+static int __devinit mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
+							int idx, int irq)
+{
+	struct mmp_pdma_phy *phy  = &pdev->phy[idx];
+	struct mmp_pdma_chan *chan;
+	int ret;
+
+	chan = devm_kzalloc(pdev->dev,
+			sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+	if (chan == NULL)
+		return -ENOMEM;
+
+	phy->idx = idx;
+	phy->base = pdev->base;
+
+	if (irq) {
+		ret = devm_request_irq(pdev->dev, irq,
+			mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
+		if (ret) {
+			dev_err(pdev->dev, "channel request irq fail!\n");
+			return ret;
+		}
+	}
+
+	spin_lock_init(&chan->desc_lock);
+	chan->dev = pdev->dev;
+	chan->chan.device = &pdev->device;
+	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+	INIT_LIST_HEAD(&chan->chain_pending);
+	INIT_LIST_HEAD(&chan->chain_running);
+
+	/* register virt channel to dma engine */
+	list_add_tail(&chan->chan.device_node,
+			&pdev->device.channels);
+
+	return 0;
+}
+
+static struct of_device_id mmp_pdma_dt_ids[] = {
+	{ .compatible = "marvell,pdma-1.0", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
+
+static int __devinit mmp_pdma_probe(struct platform_device *op)
+{
+	struct mmp_pdma_device *pdev;
+	const struct of_device_id *of_id;
+	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
+	struct resource *iores;
+	int i, ret, irq = 0;
+	int dma_channels = 0, irq_num = 0;
+
+	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
+	if (!pdev)
+		return -ENOMEM;
+	pdev->dev = &op->dev;
+
+	iores = platform_get_resource(op, IORESOURCE_MEM, 0);
+	if (!iores)
+		return -EINVAL;
+
+	pdev->base = devm_request_and_ioremap(pdev->dev, iores);
+	if (!pdev->base)
+		return -EADDRNOTAVAIL;
+
+	of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
+	if (of_id)
+		of_property_read_u32(pdev->dev->of_node,
+				"#dma-channels", &dma_channels);
+	else if (pdata && pdata->dma_channels)
+		dma_channels = pdata->dma_channels;
+	else
+		dma_channels = 32;	/* default 32 channel */
+	pdev->dma_channels = dma_channels;
+
+	for (i = 0; i < dma_channels; i++) {
+		if (platform_get_irq(op, i) > 0)
+			irq_num++;
+	}
+
+	pdev->phy = devm_kzalloc(pdev->dev,
+		dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
+	if (pdev->phy == NULL)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&pdev->device.channels);
+
+	if (irq_num != dma_channels) {
+		/* all chan share one irq, demux inside */
+		irq = platform_get_irq(op, 0);
+		ret = devm_request_irq(pdev->dev, irq,
+			mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < dma_channels; i++) {
+		irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
+		ret = mmp_pdma_chan_init(pdev, i, irq);
+		if (ret)
+			return ret;
+	}
+
+	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+	dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
+	dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
+	pdev->device.dev = &op->dev;
+	pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
+	pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
+	pdev->device.device_tx_status = mmp_pdma_tx_status;
+	pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
+	pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
+	pdev->device.device_issue_pending = mmp_pdma_issue_pending;
+	pdev->device.device_control = mmp_pdma_control;
+	pdev->device.copy_align = PDMA_ALIGNMENT;
+
+	if (pdev->dev->coherent_dma_mask)
+		dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
+	else
+		dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
+
+	ret = dma_async_device_register(&pdev->device);
+	if (ret) {
+		dev_err(pdev->device.dev, "unable to register\n");
+		return ret;
+	}
+
+	dev_info(pdev->device.dev, "initialized\n");
+	return 0;
+}
+
+static const struct platform_device_id mmp_pdma_id_table[] = {
+	{ "mmp-pdma", },
+	{ },
+};
+
+static struct platform_driver mmp_pdma_driver = {
+	.driver		= {
+		.name	= "mmp-pdma",
+		.owner  = THIS_MODULE,
+		.of_match_table = mmp_pdma_dt_ids,
+	},
+	.id_table	= mmp_pdma_id_table,
+	.probe		= mmp_pdma_probe,
+	.remove		= __devexit_p(mmp_pdma_remove),
+};
+
+module_platform_driver(mmp_pdma_driver);
+
+MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");