imx-dma.c 32.2 KB
Newer Older
1 2 3 4 5 6 7
/*
 * drivers/dma/imx-dma.c
 *
 * This file contains a driver for the Freescale i.MX DMA engine
 * found on i.MX1/21/27
 *
 * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8
 * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com>
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * The code contained herein is licensed under the GNU General Public
 * License. You may obtain a copy of the GNU General Public License
 * Version 2 or later at the following locations:
 *
 * http://www.opensource.org/licenses/gpl-license.html
 * http://www.gnu.org/copyleft/gpl.html
 */
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
26
#include <linux/clk.h>
27
#include <linux/dmaengine.h>
28
#include <linux/module.h>
29 30

#include <asm/irq.h>
31
#include <linux/platform_data/dma-imx.h>
32

33
#include "dmaengine.h"
34
#define IMXDMA_MAX_CHAN_DESCRIPTORS	16
35 36
#define IMX_DMA_CHANNELS  16

37 38 39 40
#define IMX_DMA_2D_SLOTS	2
#define IMX_DMA_2D_SLOT_A	0
#define IMX_DMA_2D_SLOT_B	1

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
#define IMX_DMA_LENGTH_LOOP	((unsigned int)-1)
#define IMX_DMA_MEMSIZE_32	(0 << 4)
#define IMX_DMA_MEMSIZE_8	(1 << 4)
#define IMX_DMA_MEMSIZE_16	(2 << 4)
#define IMX_DMA_TYPE_LINEAR	(0 << 10)
#define IMX_DMA_TYPE_2D		(1 << 10)
#define IMX_DMA_TYPE_FIFO	(2 << 10)

#define IMX_DMA_ERR_BURST     (1 << 0)
#define IMX_DMA_ERR_REQUEST   (1 << 1)
#define IMX_DMA_ERR_TRANSFER  (1 << 2)
#define IMX_DMA_ERR_BUFFER    (1 << 3)
#define IMX_DMA_ERR_TIMEOUT   (1 << 4)

#define DMA_DCR     0x00		/* Control Register */
#define DMA_DISR    0x04		/* Interrupt status Register */
#define DMA_DIMR    0x08		/* Interrupt mask Register */
#define DMA_DBTOSR  0x0c		/* Burst timeout status Register */
#define DMA_DRTOSR  0x10		/* Request timeout Register */
#define DMA_DSESR   0x14		/* Transfer Error Status Register */
#define DMA_DBOSR   0x18		/* Buffer overflow status Register */
#define DMA_DBTOCR  0x1c		/* Burst timeout control Register */
#define DMA_WSRA    0x40		/* W-Size Register A */
#define DMA_XSRA    0x44		/* X-Size Register A */
#define DMA_YSRA    0x48		/* Y-Size Register A */
#define DMA_WSRB    0x4c		/* W-Size Register B */
#define DMA_XSRB    0x50		/* X-Size Register B */
#define DMA_YSRB    0x54		/* Y-Size Register B */
#define DMA_SAR(x)  (0x80 + ((x) << 6))	/* Source Address Registers */
#define DMA_DAR(x)  (0x84 + ((x) << 6))	/* Destination Address Registers */
#define DMA_CNTR(x) (0x88 + ((x) << 6))	/* Count Registers */
#define DMA_CCR(x)  (0x8c + ((x) << 6))	/* Control Registers */
#define DMA_RSSR(x) (0x90 + ((x) << 6))	/* Request source select Registers */
#define DMA_BLR(x)  (0x94 + ((x) << 6))	/* Burst length Registers */
#define DMA_RTOR(x) (0x98 + ((x) << 6))	/* Request timeout Registers */
#define DMA_BUCR(x) (0x98 + ((x) << 6))	/* Bus Utilization Registers */
#define DMA_CCNR(x) (0x9C + ((x) << 6))	/* Channel counter Registers */

#define DCR_DRST           (1<<1)
#define DCR_DEN            (1<<0)
#define DBTOCR_EN          (1<<15)
#define DBTOCR_CNT(x)      ((x) & 0x7fff)
#define CNTR_CNT(x)        ((x) & 0xffffff)
#define CCR_ACRPT          (1<<14)
#define CCR_DMOD_LINEAR    (0x0 << 12)
#define CCR_DMOD_2D        (0x1 << 12)
#define CCR_DMOD_FIFO      (0x2 << 12)
#define CCR_DMOD_EOBFIFO   (0x3 << 12)
#define CCR_SMOD_LINEAR    (0x0 << 10)
#define CCR_SMOD_2D        (0x1 << 10)
#define CCR_SMOD_FIFO      (0x2 << 10)
#define CCR_SMOD_EOBFIFO   (0x3 << 10)
#define CCR_MDIR_DEC       (1<<9)
#define CCR_MSEL_B         (1<<8)
#define CCR_DSIZ_32        (0x0 << 6)
#define CCR_DSIZ_8         (0x1 << 6)
#define CCR_DSIZ_16        (0x2 << 6)
#define CCR_SSIZ_32        (0x0 << 4)
#define CCR_SSIZ_8         (0x1 << 4)
#define CCR_SSIZ_16        (0x2 << 4)
#define CCR_REN            (1<<3)
#define CCR_RPT            (1<<2)
#define CCR_FRC            (1<<1)
#define CCR_CEN            (1<<0)
#define RTOR_EN            (1<<15)
#define RTOR_CLK           (1<<14)
#define RTOR_PSC           (1<<13)
108 109 110 111 112 113 114 115

enum  imxdma_prep_type {
	IMXDMA_DESC_MEMCPY,
	IMXDMA_DESC_INTERLEAVED,
	IMXDMA_DESC_SLAVE_SG,
	IMXDMA_DESC_CYCLIC,
};

116 117 118 119 120 121 122
struct imx_dma_2d_config {
	u16		xsr;
	u16		ysr;
	u16		wsr;
	int		count;
};

123 124 125 126 127 128 129
struct imxdma_desc {
	struct list_head		node;
	struct dma_async_tx_descriptor	desc;
	enum dma_status			status;
	dma_addr_t			src;
	dma_addr_t			dest;
	size_t				len;
130
	enum dma_transfer_direction	direction;
131 132 133 134 135 136 137 138 139 140 141 142 143
	enum imxdma_prep_type		type;
	/* For memcpy and interleaved */
	unsigned int			config_port;
	unsigned int			config_mem;
	/* For interleaved transfers */
	unsigned int			x;
	unsigned int			y;
	unsigned int			w;
	/* For slave sg and cyclic */
	struct scatterlist		*sg;
	unsigned int			sgcount;
};

144
struct imxdma_channel {
145 146
	int				hw_chaining;
	struct timer_list		watchdog;
147 148 149
	struct imxdma_engine		*imxdma;
	unsigned int			channel;

150 151 152 153 154
	struct tasklet_struct		dma_tasklet;
	struct list_head		ld_free;
	struct list_head		ld_queue;
	struct list_head		ld_active;
	int				descs_allocated;
155 156 157 158 159 160 161 162
	enum dma_slave_buswidth		word_size;
	dma_addr_t			per_address;
	u32				watermark_level;
	struct dma_chan			chan;
	struct dma_async_tx_descriptor	desc;
	enum dma_status			status;
	int				dma_request;
	struct scatterlist		*sg_list;
163 164
	u32				ccr_from_device;
	u32				ccr_to_device;
165 166
	bool				enabled_2d;
	int				slot_2d;
167 168
};

169 170 171 172 173 174
enum imx_dma_type {
	IMX1_DMA,
	IMX21_DMA,
	IMX27_DMA,
};

175 176
struct imxdma_engine {
	struct device			*dev;
177
	struct device_dma_parameters	dma_parms;
178
	struct dma_device		dma_device;
179
	void __iomem			*base;
180 181
	struct clk			*dma_ahb;
	struct clk			*dma_ipg;
182 183
	spinlock_t			lock;
	struct imx_dma_2d_config	slots_2d[IMX_DMA_2D_SLOTS];
184
	struct imxdma_channel		channel[IMX_DMA_CHANNELS];
185
	enum imx_dma_type		devtype;
186 187
};

188 189 190 191 192 193 194 195 196 197 198 199 200
static struct platform_device_id imx_dma_devtype[] = {
	{
		.name = "imx1-dma",
		.driver_data = IMX1_DMA,
	}, {
		.name = "imx21-dma",
		.driver_data = IMX21_DMA,
	}, {
		.name = "imx27-dma",
		.driver_data = IMX27_DMA,
	}, {
		/* sentinel */
	}
201
};
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
MODULE_DEVICE_TABLE(platform, imx_dma_devtype);

static inline int is_imx1_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX1_DMA;
}

static inline int is_imx21_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX21_DMA;
}

static inline int is_imx27_dma(struct imxdma_engine *imxdma)
{
	return imxdma->devtype == IMX27_DMA;
}
218 219 220 221 222 223

static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
{
	return container_of(chan, struct imxdma_channel, chan);
}

224
static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac)
225
{
226 227 228 229 230 231 232 233 234
	struct imxdma_desc *desc;

	if (!list_empty(&imxdmac->ld_active)) {
		desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc,
					node);
		if (desc->type == IMXDMA_DESC_CYCLIC)
			return true;
	}
	return false;
235 236
}

237

238 239 240

static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val,
			     unsigned offset)
241
{
242
	__raw_writel(val, imxdma->base + offset);
243 244
}

245
static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset)
246
{
247
	return __raw_readl(imxdma->base + offset);
248
}
249

250
static int imxdma_hw_chain(struct imxdma_channel *imxdmac)
251
{
252 253 254
	struct imxdma_engine *imxdma = imxdmac->imxdma;

	if (is_imx27_dma(imxdma))
255
		return imxdmac->hw_chaining;
256 257 258 259 260 261 262
	else
		return 0;
}

/*
 * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation
 */
263
static inline int imxdma_sg_next(struct imxdma_desc *d)
264
{
265
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
266
	struct imxdma_engine *imxdma = imxdmac->imxdma;
267
	struct scatterlist *sg = d->sg;
268 269
	unsigned long now;

270
	now = min(d->len, sg_dma_len(sg));
271 272
	if (d->len != IMX_DMA_LENGTH_LOOP)
		d->len -= now;
273

274
	if (d->direction == DMA_DEV_TO_MEM)
275 276
		imx_dmav1_writel(imxdma, sg->dma_address,
				 DMA_DAR(imxdmac->channel));
277
	else
278 279
		imx_dmav1_writel(imxdma, sg->dma_address,
				 DMA_SAR(imxdmac->channel));
280

281
	imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel));
282

283 284
	dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, "
		"size 0x%08x\n", __func__, imxdmac->channel,
285 286 287
		 imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)),
		 imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)),
		 imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel)));
288 289

	return now;
290 291
}

292
static void imxdma_enable_hw(struct imxdma_desc *d)
293
{
294
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
295
	struct imxdma_engine *imxdma = imxdmac->imxdma;
296 297 298
	int channel = imxdmac->channel;
	unsigned long flags;

299
	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
300 301 302

	local_irq_save(flags);

303 304 305 306 307
	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) &
			 ~(1 << channel), DMA_DIMR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) |
			 CCR_CEN | CCR_ACRPT, DMA_CCR(channel));
308

309
	if (!is_imx1_dma(imxdma) &&
310
			d->sg && imxdma_hw_chain(imxdmac)) {
311 312
		d->sg = sg_next(d->sg);
		if (d->sg) {
313
			u32 tmp;
314
			imxdma_sg_next(d);
315 316 317
			tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel));
			imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT,
					 DMA_CCR(channel));
318 319 320 321 322 323 324 325
		}
	}

	local_irq_restore(flags);
}

static void imxdma_disable_hw(struct imxdma_channel *imxdmac)
{
326
	struct imxdma_engine *imxdma = imxdmac->imxdma;
327 328 329
	int channel = imxdmac->channel;
	unsigned long flags;

330
	dev_dbg(imxdma->dev, "%s channel %d\n", __func__, channel);
331

332 333
	if (imxdma_hw_chain(imxdmac))
		del_timer(&imxdmac->watchdog);
334 335

	local_irq_save(flags);
336 337 338 339 340
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) |
			 (1 << channel), DMA_DIMR);
	imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) &
			 ~CCR_CEN, DMA_CCR(channel));
	imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR);
341 342 343 344
	local_irq_restore(flags);
}

static void imxdma_watchdog(unsigned long data)
345
{
346
	struct imxdma_channel *imxdmac = (struct imxdma_channel *)data;
347
	struct imxdma_engine *imxdma = imxdmac->imxdma;
348
	int channel = imxdmac->channel;
349

350
	imx_dmav1_writel(imxdma, 0, DMA_CCR(channel));
351

352
	/* Tasklet watchdog error handler */
353
	tasklet_schedule(&imxdmac->dma_tasklet);
354 355
	dev_dbg(imxdma->dev, "channel %d: watchdog timeout!\n",
		imxdmac->channel);
356 357
}

358
static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
359
{
360 361 362 363 364
	struct imxdma_engine *imxdma = dev_id;
	unsigned int err_mask;
	int i, disr;
	int errcode;

365
	disr = imx_dmav1_readl(imxdma, DMA_DISR);
366

367 368 369 370
	err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) |
		   imx_dmav1_readl(imxdma, DMA_DRTOSR) |
		   imx_dmav1_readl(imxdma, DMA_DSESR)  |
		   imx_dmav1_readl(imxdma, DMA_DBOSR);
371 372 373 374

	if (!err_mask)
		return IRQ_HANDLED;

375
	imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR);
376 377 378 379 380 381

	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
		if (!(err_mask & (1 << i)))
			continue;
		errcode = 0;

382 383
		if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR);
384 385
			errcode |= IMX_DMA_ERR_BURST;
		}
386 387
		if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR);
388 389
			errcode |= IMX_DMA_ERR_REQUEST;
		}
390 391
		if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR);
392 393
			errcode |= IMX_DMA_ERR_TRANSFER;
		}
394 395
		if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) {
			imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR);
396 397 398 399 400 401 402 403 404 405 406 407 408
			errcode |= IMX_DMA_ERR_BUFFER;
		}
		/* Tasklet error handler */
		tasklet_schedule(&imxdma->channel[i].dma_tasklet);

		printk(KERN_WARNING
		       "DMA timeout on channel %d -%s%s%s%s\n", i,
		       errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
		       errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
		       errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
		       errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
	}
	return IRQ_HANDLED;
409 410
}

411
static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
412
{
413
	struct imxdma_engine *imxdma = imxdmac->imxdma;
414
	int chno = imxdmac->channel;
415
	struct imxdma_desc *desc;
416

417
	spin_lock(&imxdma->lock);
418
	if (list_empty(&imxdmac->ld_active)) {
419
		spin_unlock(&imxdma->lock);
420 421
		goto out;
	}
422

423 424 425
	desc = list_first_entry(&imxdmac->ld_active,
				struct imxdma_desc,
				node);
426
	spin_unlock(&imxdma->lock);
427

428 429 430
	if (desc->sg) {
		u32 tmp;
		desc->sg = sg_next(desc->sg);
431

432
		if (desc->sg) {
433
			imxdma_sg_next(desc);
434

435
			tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno));
436

437
			if (imxdma_hw_chain(imxdmac)) {
438 439 440
				/* FIXME: The timeout should probably be
				 * configurable
				 */
441
				mod_timer(&imxdmac->watchdog,
442 443 444
					jiffies + msecs_to_jiffies(500));

				tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT;
445
				imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
446
			} else {
447 448
				imx_dmav1_writel(imxdma, tmp & ~CCR_CEN,
						 DMA_CCR(chno));
449 450 451
				tmp |= CCR_CEN;
			}

452
			imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno));
453 454 455 456

			if (imxdma_chan_is_doing_cyclic(imxdmac))
				/* Tasklet progression */
				tasklet_schedule(&imxdmac->dma_tasklet);
457

458 459 460
			return;
		}

461 462
		if (imxdma_hw_chain(imxdmac)) {
			del_timer(&imxdmac->watchdog);
463 464 465 466
			return;
		}
	}

467
out:
468
	imx_dmav1_writel(imxdma, 0, DMA_CCR(chno));
469
	/* Tasklet irq */
470 471 472
	tasklet_schedule(&imxdmac->dma_tasklet);
}

473 474 475 476 477
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
	struct imxdma_engine *imxdma = dev_id;
	int i, disr;

478
	if (!is_imx1_dma(imxdma))
479 480
		imxdma_err_handler(irq, dev_id);

481
	disr = imx_dmav1_readl(imxdma, DMA_DISR);
482

483
	dev_dbg(imxdma->dev, "%s called, disr=0x%08x\n", __func__, disr);
484

485
	imx_dmav1_writel(imxdma, disr, DMA_DISR);
486
	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
487
		if (disr & (1 << i))
488 489 490 491 492 493
			dma_irq_handle_channel(&imxdma->channel[i]);
	}

	return IRQ_HANDLED;
}

494 495 496
static int imxdma_xfer_desc(struct imxdma_desc *d)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
497
	struct imxdma_engine *imxdma = imxdmac->imxdma;
498 499 500
	unsigned long flags;
	int slot = -1;
	int i;
501 502 503

	/* Configure and enable */
	switch (d->type) {
504 505 506 507 508 509 510 511 512 513 514 515
	case IMXDMA_DESC_INTERLEAVED:
		/* Try to get a free 2D slot */
		spin_lock_irqsave(&imxdma->lock, flags);
		for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
			if ((imxdma->slots_2d[i].count > 0) &&
			((imxdma->slots_2d[i].xsr != d->x) ||
			(imxdma->slots_2d[i].ysr != d->y) ||
			(imxdma->slots_2d[i].wsr != d->w)))
				continue;
			slot = i;
			break;
		}
516 517
		if (slot < 0) {
			spin_unlock_irqrestore(&imxdma->lock, flags);
518
			return -EBUSY;
519
		}
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546

		imxdma->slots_2d[slot].xsr = d->x;
		imxdma->slots_2d[slot].ysr = d->y;
		imxdma->slots_2d[slot].wsr = d->w;
		imxdma->slots_2d[slot].count++;

		imxdmac->slot_2d = slot;
		imxdmac->enabled_2d = true;
		spin_unlock_irqrestore(&imxdma->lock, flags);

		if (slot == IMX_DMA_2D_SLOT_A) {
			d->config_mem &= ~CCR_MSEL_B;
			d->config_port &= ~CCR_MSEL_B;
			imx_dmav1_writel(imxdma, d->x, DMA_XSRA);
			imx_dmav1_writel(imxdma, d->y, DMA_YSRA);
			imx_dmav1_writel(imxdma, d->w, DMA_WSRA);
		} else {
			d->config_mem |= CCR_MSEL_B;
			d->config_port |= CCR_MSEL_B;
			imx_dmav1_writel(imxdma, d->x, DMA_XSRB);
			imx_dmav1_writel(imxdma, d->y, DMA_YSRB);
			imx_dmav1_writel(imxdma, d->w, DMA_WSRB);
		}
		/*
		 * We fall-through here intentionally, since a 2D transfer is
		 * similar to MEMCPY just adding the 2D slot configuration.
		 */
547
	case IMXDMA_DESC_MEMCPY:
548 549 550
		imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel));
		imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel));
		imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2),
551
			 DMA_CCR(imxdmac->channel));
552

553
		imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel));
554 555 556 557 558 559

		dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08x src=0x%08x "
			"dma_length=%d\n", __func__, imxdmac->channel,
			d->dest, d->src, d->len);

		break;
560
	/* Cyclic transfer is the same as slave_sg with special sg configuration. */
561 562
	case IMXDMA_DESC_CYCLIC:
	case IMXDMA_DESC_SLAVE_SG:
563
		if (d->direction == DMA_DEV_TO_MEM) {
564
			imx_dmav1_writel(imxdma, imxdmac->per_address,
565
					 DMA_SAR(imxdmac->channel));
566
			imx_dmav1_writel(imxdma, imxdmac->ccr_from_device,
567 568 569 570 571 572 573
					 DMA_CCR(imxdmac->channel));

			dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
				"total length=%d dev_addr=0x%08x (dev2mem)\n",
				__func__, imxdmac->channel, d->sg, d->sgcount,
				d->len, imxdmac->per_address);
		} else if (d->direction == DMA_MEM_TO_DEV) {
574
			imx_dmav1_writel(imxdma, imxdmac->per_address,
575
					 DMA_DAR(imxdmac->channel));
576
			imx_dmav1_writel(imxdma, imxdmac->ccr_to_device,
577 578 579 580 581 582 583 584 585 586 587 588
					 DMA_CCR(imxdmac->channel));

			dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d "
				"total length=%d dev_addr=0x%08x (mem2dev)\n",
				__func__, imxdmac->channel, d->sg, d->sgcount,
				d->len, imxdmac->per_address);
		} else {
			dev_err(imxdma->dev, "%s channel: %d bad dma mode\n",
				__func__, imxdmac->channel);
			return -EINVAL;
		}

589
		imxdma_sg_next(d);
590

591 592 593 594
		break;
	default:
		return -EINVAL;
	}
595
	imxdma_enable_hw(d);
596
	return 0;
597 598
}

599
static void imxdma_tasklet(unsigned long data)
600
{
601 602 603
	struct imxdma_channel *imxdmac = (void *)data;
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;
604

605
	spin_lock(&imxdma->lock);
606 607 608 609 610 611 612 613 614 615

	if (list_empty(&imxdmac->ld_active)) {
		/* Someone might have called terminate all */
		goto out;
	}
	desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);

	if (desc->desc.callback)
		desc->desc.callback(desc->desc.callback_param);

Masanari Iida's avatar
Masanari Iida committed
616 617
	/* If we are dealing with a cyclic descriptor, keep it on ld_active
	 * and dont mark the descriptor as complete.
618 619
	 * Only in non-cyclic cases it would be marked as complete
	 */
620 621
	if (imxdma_chan_is_doing_cyclic(imxdmac))
		goto out;
622 623
	else
		dma_cookie_complete(&desc->desc);
624

625 626 627 628 629 630
	/* Free 2D slot if it was an interleaved transfer */
	if (imxdmac->enabled_2d) {
		imxdma->slots_2d[imxdmac->slot_2d].count--;
		imxdmac->enabled_2d = false;
	}

631 632 633 634 635 636 637 638 639 640 641
	list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);

	if (!list_empty(&imxdmac->ld_queue)) {
		desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
					node);
		list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
		if (imxdma_xfer_desc(desc) < 0)
			dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
				 __func__, imxdmac->channel);
	}
out:
642
	spin_unlock(&imxdma->lock);
643 644 645 646 647 648 649
}

static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
		unsigned long arg)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct dma_slave_config *dmaengine_cfg = (void *)arg;
650
	struct imxdma_engine *imxdma = imxdmac->imxdma;
651
	unsigned long flags;
652 653 654 655
	unsigned int mode = 0;

	switch (cmd) {
	case DMA_TERMINATE_ALL:
656
		imxdma_disable_hw(imxdmac);
657

658
		spin_lock_irqsave(&imxdma->lock, flags);
659 660
		list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
		list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
661
		spin_unlock_irqrestore(&imxdma->lock, flags);
662 663
		return 0;
	case DMA_SLAVE_CONFIG:
664
		if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
			imxdmac->per_address = dmaengine_cfg->src_addr;
			imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
			imxdmac->word_size = dmaengine_cfg->src_addr_width;
		} else {
			imxdmac->per_address = dmaengine_cfg->dst_addr;
			imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
			imxdmac->word_size = dmaengine_cfg->dst_addr_width;
		}

		switch (imxdmac->word_size) {
		case DMA_SLAVE_BUSWIDTH_1_BYTE:
			mode = IMX_DMA_MEMSIZE_8;
			break;
		case DMA_SLAVE_BUSWIDTH_2_BYTES:
			mode = IMX_DMA_MEMSIZE_16;
			break;
		default:
		case DMA_SLAVE_BUSWIDTH_4_BYTES:
			mode = IMX_DMA_MEMSIZE_32;
			break;
		}

687 688
		imxdmac->hw_chaining = 1;
		if (!imxdma_hw_chain(imxdmac))
689
			return -EINVAL;
690
		imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) |
691 692
			((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) |
			CCR_REN;
693
		imxdmac->ccr_to_device =
694 695
			(IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) |
			((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN;
696
		imx_dmav1_writel(imxdma, imxdmac->dma_request,
697 698
				 DMA_RSSR(imxdmac->channel));

699
		/* Set burst length */
700 701
		imx_dmav1_writel(imxdma, imxdmac->watermark_level *
				imxdmac->word_size, DMA_BLR(imxdmac->channel));
702 703 704 705 706 707 708 709 710 711 712 713 714

		return 0;
	default:
		return -ENOSYS;
	}

	return -EINVAL;
}

static enum dma_status imxdma_tx_status(struct dma_chan *chan,
					    dma_cookie_t cookie,
					    struct dma_tx_state *txstate)
{
715
	return dma_cookie_status(chan, cookie, txstate);
716 717 718 719 720
}

static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
721
	struct imxdma_engine *imxdma = imxdmac->imxdma;
722
	dma_cookie_t cookie;
723
	unsigned long flags;
724

725
	spin_lock_irqsave(&imxdma->lock, flags);
726
	list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue);
727
	cookie = dma_cookie_assign(tx);
728
	spin_unlock_irqrestore(&imxdma->lock, flags);
729 730 731 732 733 734 735 736 737

	return cookie;
}

static int imxdma_alloc_chan_resources(struct dma_chan *chan)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imx_dma_data *data = chan->private;

738 739
	if (data != NULL)
		imxdmac->dma_request = data->dma_request;
740

741 742
	while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) {
		struct imxdma_desc *desc;
743

744 745 746 747 748 749 750 751 752 753 754 755 756
		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
		if (!desc)
			break;
		__memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor));
		dma_async_tx_descriptor_init(&desc->desc, chan);
		desc->desc.tx_submit = imxdma_tx_submit;
		/* txd.flags will be overwritten in prep funcs */
		desc->desc.flags = DMA_CTRL_ACK;
		desc->status = DMA_SUCCESS;

		list_add_tail(&desc->node, &imxdmac->ld_free);
		imxdmac->descs_allocated++;
	}
757

758 759 760 761
	if (!imxdmac->descs_allocated)
		return -ENOMEM;

	return imxdmac->descs_allocated;
762 763 764 765 766
}

static void imxdma_free_chan_resources(struct dma_chan *chan)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
767
	struct imxdma_engine *imxdma = imxdmac->imxdma;
768 769 770
	struct imxdma_desc *desc, *_desc;
	unsigned long flags;

771
	spin_lock_irqsave(&imxdma->lock, flags);
772

773
	imxdma_disable_hw(imxdmac);
774 775
	list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free);
	list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free);
776

777
	spin_unlock_irqrestore(&imxdma->lock, flags);
778 779 780 781 782 783

	list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) {
		kfree(desc);
		imxdmac->descs_allocated--;
	}
	INIT_LIST_HEAD(&imxdmac->ld_free);
784 785 786 787 788 789 790 791 792

	if (imxdmac->sg_list) {
		kfree(imxdmac->sg_list);
		imxdmac->sg_list = NULL;
	}
}

static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
		struct dma_chan *chan, struct scatterlist *sgl,
793
		unsigned int sg_len, enum dma_transfer_direction direction,
794
		unsigned long flags, void *context)
795 796 797
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct scatterlist *sg;
798 799
	int i, dma_length = 0;
	struct imxdma_desc *desc;
800

801 802
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
803 804
		return NULL;

805
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
806 807

	for_each_sg(sgl, sg, sg_len, i) {
808
		dma_length += sg_dma_len(sg);
809 810
	}

811 812
	switch (imxdmac->word_size) {
	case DMA_SLAVE_BUSWIDTH_4_BYTES:
813
		if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
814 815 816
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_2_BYTES:
817
		if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
818 819 820 821 822 823 824 825
			return NULL;
		break;
	case DMA_SLAVE_BUSWIDTH_1_BYTE:
		break;
	default:
		return NULL;
	}

826 827 828 829
	desc->type = IMXDMA_DESC_SLAVE_SG;
	desc->sg = sgl;
	desc->sgcount = sg_len;
	desc->len = dma_length;
830
	desc->direction = direction;
831 832 833 834 835 836 837
	if (direction == DMA_DEV_TO_MEM) {
		desc->src = imxdmac->per_address;
	} else {
		desc->dest = imxdmac->per_address;
	}
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
838

839
	return &desc->desc;
840 841 842 843
}

static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
844
		size_t period_len, enum dma_transfer_direction direction,
845
		unsigned long flags, void *context)
846 847 848
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
849 850
	struct imxdma_desc *desc;
	int i;
851 852 853 854 855
	unsigned int periods = buf_len / period_len;

	dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
			__func__, imxdmac->channel, buf_len, period_len);

856 857
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
858 859
		return NULL;

860
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875

	if (imxdmac->sg_list)
		kfree(imxdmac->sg_list);

	imxdmac->sg_list = kcalloc(periods + 1,
			sizeof(struct scatterlist), GFP_KERNEL);
	if (!imxdmac->sg_list)
		return NULL;

	sg_init_table(imxdmac->sg_list, periods);

	for (i = 0; i < periods; i++) {
		imxdmac->sg_list[i].page_link = 0;
		imxdmac->sg_list[i].offset = 0;
		imxdmac->sg_list[i].dma_address = dma_addr;
876
		sg_dma_len(&imxdmac->sg_list[i]) = period_len;
877 878 879 880 881
		dma_addr += period_len;
	}

	/* close the loop */
	imxdmac->sg_list[periods].offset = 0;
882
	sg_dma_len(&imxdmac->sg_list[periods]) = 0;
883 884 885
	imxdmac->sg_list[periods].page_link =
		((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;

886 887 888 889
	desc->type = IMXDMA_DESC_CYCLIC;
	desc->sg = imxdmac->sg_list;
	desc->sgcount = periods;
	desc->len = IMX_DMA_LENGTH_LOOP;
890
	desc->direction = direction;
891 892 893 894 895 896 897
	if (direction == DMA_DEV_TO_MEM) {
		desc->src = imxdmac->per_address;
	} else {
		desc->dest = imxdmac->per_address;
	}
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
898

899
	return &desc->desc;
900 901
}

902 903 904 905 906 907
static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy(
	struct dma_chan *chan, dma_addr_t dest,
	dma_addr_t src, size_t len, unsigned long flags)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
908
	struct imxdma_desc *desc;
909

910 911 912
	dev_dbg(imxdma->dev, "%s channel: %d src=0x%x dst=0x%x len=%d\n",
			__func__, imxdmac->channel, src, dest, len);

913 914
	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
915 916
		return NULL;

917
	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
918

919 920 921 922
	desc->type = IMXDMA_DESC_MEMCPY;
	desc->src = src;
	desc->dest = dest;
	desc->len = len;
923
	desc->direction = DMA_MEM_TO_MEM;
924 925 926 927
	desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
	desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR;
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;
928

929
	return &desc->desc;
930 931
}

932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved(
	struct dma_chan *chan, struct dma_interleaved_template *xt,
	unsigned long flags)
{
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;

	dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%x dst_start=0x%x\n"
		"   src_sgl=%s dst_sgl=%s numf=%d frame_size=%d\n", __func__,
		imxdmac->channel, xt->src_start, xt->dst_start,
		xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false",
		xt->numf, xt->frame_size);

	if (list_empty(&imxdmac->ld_free) ||
	    imxdma_chan_is_doing_cyclic(imxdmac))
		return NULL;

	if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM)
		return NULL;

	desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);

	desc->type = IMXDMA_DESC_INTERLEAVED;
	desc->src = xt->src_start;
	desc->dest = xt->dst_start;
	desc->x = xt->sgl[0].size;
	desc->y = xt->numf;
	desc->w = xt->sgl[0].icg + desc->x;
	desc->len = desc->x * desc->y;
	desc->direction = DMA_MEM_TO_MEM;
	desc->config_port = IMX_DMA_MEMSIZE_32;
	desc->config_mem = IMX_DMA_MEMSIZE_32;
	if (xt->src_sgl)
		desc->config_mem |= IMX_DMA_TYPE_2D;
	if (xt->dst_sgl)
		desc->config_port |= IMX_DMA_TYPE_2D;
	desc->desc.callback = NULL;
	desc->desc.callback_param = NULL;

	return &desc->desc;
973 974 975 976
}

static void imxdma_issue_pending(struct dma_chan *chan)
{
977
	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
978 979 980 981
	struct imxdma_engine *imxdma = imxdmac->imxdma;
	struct imxdma_desc *desc;
	unsigned long flags;

982
	spin_lock_irqsave(&imxdma->lock, flags);
983 984 985 986 987 988 989 990 991 992 993 994 995 996
	if (list_empty(&imxdmac->ld_active) &&
	    !list_empty(&imxdmac->ld_queue)) {
		desc = list_first_entry(&imxdmac->ld_queue,
					struct imxdma_desc, node);

		if (imxdma_xfer_desc(desc) < 0) {
			dev_warn(imxdma->dev,
				 "%s: channel: %d couldn't issue DMA xfer\n",
				 __func__, imxdmac->channel);
		} else {
			list_move_tail(imxdmac->ld_queue.next,
				       &imxdmac->ld_active);
		}
	}
997
	spin_unlock_irqrestore(&imxdma->lock, flags);
998 999 1000
}

static int __init imxdma_probe(struct platform_device *pdev)
1001
	{
1002
	struct imxdma_engine *imxdma;
1003
	struct resource *res;
1004
	int ret, i;
1005
	int irq, irq_err;
1006

1007
	imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
1008 1009 1010
	if (!imxdma)
		return -ENOMEM;

1011 1012
	imxdma->devtype = pdev->id_entry->driver_data;

1013 1014 1015 1016 1017 1018 1019 1020
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	imxdma->base = devm_request_and_ioremap(&pdev->dev, res);
	if (!imxdma->base)
		return -EADDRNOTAVAIL;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;
1021

1022
	imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg");
1023 1024
	if (IS_ERR(imxdma->dma_ipg))
		return PTR_ERR(imxdma->dma_ipg);
1025 1026

	imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb");
1027 1028
	if (IS_ERR(imxdma->dma_ahb))
		return PTR_ERR(imxdma->dma_ahb);
1029 1030 1031

	clk_prepare_enable(imxdma->dma_ipg);
	clk_prepare_enable(imxdma->dma_ahb);
1032 1033

	/* reset DMA module */
1034
	imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR);
1035

1036
	if (is_imx1_dma(imxdma)) {
1037
		ret = devm_request_irq(&pdev->dev, irq,
1038
				       dma_irq_handler, 0, "DMA", imxdma);
1039
		if (ret) {
1040
			dev_warn(imxdma->dev, "Can't register IRQ for DMA\n");
1041
			goto err;
1042 1043
		}

1044 1045 1046 1047
		irq_err = platform_get_irq(pdev, 1);
		if (irq_err < 0) {
			ret = irq_err;
			goto err;
1048 1049
		}

1050
		ret = devm_request_irq(&pdev->dev, irq_err,
1051
				       imxdma_err_handler, 0, "DMA", imxdma);
1052
		if (ret) {
1053
			dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n");
1054
			goto err;
1055 1056 1057 1058
		}
	}

	/* enable DMA module */
1059
	imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR);
1060 1061

	/* clear all interrupts */
1062
	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR);
1063 1064

	/* disable interrupts */
1065
	imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR);
1066 1067 1068

	INIT_LIST_HEAD(&imxdma->dma_device.channels);

1069 1070
	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
1071
	dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask);
1072 1073 1074 1075 1076 1077 1078
	dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask);

	/* Initialize 2D global parameters */
	for (i = 0; i < IMX_DMA_2D_SLOTS; i++)
		imxdma->slots_2d[i].count = 0;

	spin_lock_init(&imxdma->lock);
1079

1080
	/* Initialize channel parameters */
1081
	for (i = 0; i < IMX_DMA_CHANNELS; i++) {
1082 1083
		struct imxdma_channel *imxdmac = &imxdma->channel[i];

1084
		if (!is_imx1_dma(imxdma)) {
1085
			ret = devm_request_irq(&pdev->dev, irq + i,
1086 1087
					dma_irq_handler, 0, "DMA", imxdma);
			if (ret) {
1088 1089
				dev_warn(imxdma->dev, "Can't register IRQ %d "
					 "for DMA channel %d\n",
1090
					 irq + i, i);
1091
				goto err;
1092
			}
1093 1094 1095
			init_timer(&imxdmac->watchdog);
			imxdmac->watchdog.function = &imxdma_watchdog;
			imxdmac->watchdog.data = (unsigned long)imxdmac;
1096
		}
1097 1098 1099

		imxdmac->imxdma = imxdma;

1100 1101 1102 1103 1104 1105
		INIT_LIST_HEAD(&imxdmac->ld_queue);
		INIT_LIST_HEAD(&imxdmac->ld_free);
		INIT_LIST_HEAD(&imxdmac->ld_active);

		tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet,
			     (unsigned long)imxdmac);
1106
		imxdmac->chan.device = &imxdma->dma_device;
1107
		dma_cookie_init(&imxdmac->chan);
1108 1109 1110
		imxdmac->channel = i;

		/* Add the channel to the DMAC list */
1111 1112
		list_add_tail(&imxdmac->chan.device_node,
			      &imxdma->dma_device.channels);
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
	}

	imxdma->dev = &pdev->dev;
	imxdma->dma_device.dev = &pdev->dev;

	imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
	imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
	imxdma->dma_device.device_tx_status = imxdma_tx_status;
	imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
1123
	imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy;
1124
	imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved;
1125 1126 1127 1128 1129
	imxdma->dma_device.device_control = imxdma_control;
	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;

	platform_set_drvdata(pdev, imxdma);

1130
	imxdma->dma_device.copy_align = 2; /* 2^2 = 4 bytes alignment */
1131 1132 1133
	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);

1134 1135 1136
	ret = dma_async_device_register(&imxdma->dma_device);
	if (ret) {
		dev_err(&pdev->dev, "unable to register\n");
1137
		goto err;
1138 1139 1140 1141
	}

	return 0;

1142
err:
1143 1144
	clk_disable_unprepare(imxdma->dma_ipg);
	clk_disable_unprepare(imxdma->dma_ahb);
1145 1146 1147 1148 1149 1150 1151 1152 1153
	return ret;
}

static int __exit imxdma_remove(struct platform_device *pdev)
{
	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);

        dma_async_device_unregister(&imxdma->dma_device);

1154 1155
	clk_disable_unprepare(imxdma->dma_ipg);
	clk_disable_unprepare(imxdma->dma_ahb);
1156 1157 1158 1159 1160 1161 1162 1163

        return 0;
}

static struct platform_driver imxdma_driver = {
	.driver		= {
		.name	= "imx-dma",
	},
1164
	.id_table	= imx_dma_devtype,
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
	.remove		= __exit_p(imxdma_remove),
};

static int __init imxdma_module_init(void)
{
	return platform_driver_probe(&imxdma_driver, imxdma_probe);
}
subsys_initcall(imxdma_module_init);

MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX dma driver");
MODULE_LICENSE("GPL");