videobuf2-dma-contig.c 19.2 KB
Newer Older
1 2 3 4 5
/*
 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
 *
 * Copyright (C) 2010 Samsung Electronics
 *
6
 * Author: Pawel Osciak <pawel@osciak.com>
7 8 9 10 11 12
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

13
#include <linux/dma-buf.h>
14
#include <linux/module.h>
15
#include <linux/refcount.h>
16 17
#include <linux/scatterlist.h>
#include <linux/sched.h>
18 19 20
#include <linux/slab.h>
#include <linux/dma-mapping.h>

21
#include <media/videobuf2-v4l2.h>
22
#include <media/videobuf2-dma-contig.h>
23 24 25
#include <media/videobuf2-memops.h>

struct vb2_dc_buf {
26
	struct device			*dev;
27 28
	void				*vaddr;
	unsigned long			size;
29
	void				*cookie;
30
	dma_addr_t			dma_addr;
31
	unsigned long			attrs;
32 33
	enum dma_data_direction		dma_dir;
	struct sg_table			*dma_sgt;
34
	struct frame_vector		*vec;
35 36

	/* MMAP related */
37
	struct vb2_vmarea_handler	handler;
38
	refcount_t			refcount;
39
	struct sg_table			*sgt_base;
40

41 42
	/* DMABUF related */
	struct dma_buf_attachment	*db_attach;
43 44
};

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
/*********************************************/
/*        scatterlist table functions        */
/*********************************************/

static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
{
	struct scatterlist *s;
	dma_addr_t expected = sg_dma_address(sgt->sgl);
	unsigned int i;
	unsigned long size = 0;

	for_each_sg(sgt->sgl, s, sgt->nents, i) {
		if (sg_dma_address(s) != expected)
			break;
		expected = sg_dma_address(s) + sg_dma_len(s);
		size += sg_dma_len(s);
	}
	return size;
}

65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
/*********************************************/
/*         callbacks for all buffers         */
/*********************************************/

static void *vb2_dc_cookie(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

	return &buf->dma_addr;
}

static void *vb2_dc_vaddr(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

80 81 82
	if (!buf->vaddr && buf->db_attach)
		buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);

83 84 85 86 87 88 89
	return buf->vaddr;
}

static unsigned int vb2_dc_num_users(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

90
	return refcount_read(&buf->refcount);
91 92
}

93 94 95 96 97
static void vb2_dc_prepare(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

98 99
	/* DMABUF exporter will flush the cache for us */
	if (!sgt || buf->db_attach)
100 101
		return;

102 103
	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
			       buf->dma_dir);
104 105 106 107 108 109 110
}

static void vb2_dc_finish(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;

111 112
	/* DMABUF exporter will flush the cache for us */
	if (!sgt || buf->db_attach)
113 114
		return;

115
	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
116 117
}

118 119 120 121 122 123 124 125
/*********************************************/
/*        callbacks for MMAP buffers         */
/*********************************************/

static void vb2_dc_put(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;

126
	if (!refcount_dec_and_test(&buf->refcount))
127 128
		return;

129 130 131 132
	if (buf->sgt_base) {
		sg_free_table(buf->sgt_base);
		kfree(buf->sgt_base);
	}
133
	dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
134
		       buf->attrs);
135
	put_device(buf->dev);
136 137
	kfree(buf);
}
138

139
static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
140 141
			  unsigned long size, enum dma_data_direction dma_dir,
			  gfp_t gfp_flags)
142 143 144
{
	struct vb2_dc_buf *buf;

145 146 147
	if (WARN_ON(!dev))
		return ERR_PTR(-EINVAL);

148 149 150 151
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

152
	if (attrs)
153
		buf->attrs = attrs;
154
	buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155
					GFP_KERNEL | gfp_flags, buf->attrs);
156
	if (!buf->cookie) {
157
		dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
158 159 160 161
		kfree(buf);
		return ERR_PTR(-ENOMEM);
	}

162
	if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
163 164
		buf->vaddr = buf->cookie;

165 166
	/* Prevent the device from being released while the buffer is used */
	buf->dev = get_device(dev);
167
	buf->size = size;
168
	buf->dma_dir = dma_dir;
169 170

	buf->handler.refcount = &buf->refcount;
171
	buf->handler.put = vb2_dc_put;
172 173
	buf->handler.arg = buf;

174
	refcount_set(&buf->refcount, 1);
175 176 177 178

	return buf;
}

179
static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
180 181
{
	struct vb2_dc_buf *buf = buf_priv;
182
	int ret;
183 184 185 186 187 188

	if (!buf) {
		printk(KERN_ERR "No buffer to map\n");
		return -EINVAL;
	}

189 190 191 192 193 194
	/*
	 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
	 * map whole buffer
	 */
	vma->vm_pgoff = 0;

195
	ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
196
		buf->dma_addr, buf->size, buf->attrs);
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213

	if (ret) {
		pr_err("Remapping memory failed, error: %d\n", ret);
		return ret;
	}

	vma->vm_flags		|= VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_private_data	= &buf->handler;
	vma->vm_ops		= &vb2_common_vm_ops;

	vma->vm_ops->open(vma);

	pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
		__func__, (unsigned long)buf->dma_addr, vma->vm_start,
		buf->size);

	return 0;
214 215
}

216 217 218 219 220 221
/*********************************************/
/*         DMABUF ops for exporters          */
/*********************************************/

struct vb2_dc_attachment {
	struct sg_table sgt;
222
	enum dma_data_direction dma_dir;
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
};

static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
	struct dma_buf_attachment *dbuf_attach)
{
	struct vb2_dc_attachment *attach;
	unsigned int i;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt;
	struct vb2_dc_buf *buf = dbuf->priv;
	int ret;

	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
	if (!attach)
		return -ENOMEM;

	sgt = &attach->sgt;
	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
	 * map the same scatter list to multiple attachments at the same time.
	 */
	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
	if (ret) {
		kfree(attach);
		return -ENOMEM;
	}

	rd = buf->sgt_base->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

257
	attach->dma_dir = DMA_NONE;
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
	dbuf_attach->priv = attach;

	return 0;
}

static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
	struct dma_buf_attachment *db_attach)
{
	struct vb2_dc_attachment *attach = db_attach->priv;
	struct sg_table *sgt;

	if (!attach)
		return;

	sgt = &attach->sgt;

	/* release the scatterlist cache */
275
	if (attach->dma_dir != DMA_NONE)
276
		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
277
			attach->dma_dir);
278 279 280 281 282 283
	sg_free_table(sgt);
	kfree(attach);
	db_attach->priv = NULL;
}

static struct sg_table *vb2_dc_dmabuf_ops_map(
284
	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
285 286 287 288 289 290 291 292 293 294
{
	struct vb2_dc_attachment *attach = db_attach->priv;
	/* stealing dmabuf mutex to serialize map/unmap operations */
	struct mutex *lock = &db_attach->dmabuf->lock;
	struct sg_table *sgt;

	mutex_lock(lock);

	sgt = &attach->sgt;
	/* return previously mapped sg table */
295
	if (attach->dma_dir == dma_dir) {
296 297 298 299 300
		mutex_unlock(lock);
		return sgt;
	}

	/* release any previous cache */
301
	if (attach->dma_dir != DMA_NONE) {
302
		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
303 304
			attach->dma_dir);
		attach->dma_dir = DMA_NONE;
305 306 307
	}

	/* mapping to the client with new direction */
308 309 310
	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
				dma_dir);
	if (!sgt->nents) {
311 312 313 314 315
		pr_err("failed to map scatterlist\n");
		mutex_unlock(lock);
		return ERR_PTR(-EIO);
	}

316
	attach->dma_dir = dma_dir;
317 318 319 320 321 322 323

	mutex_unlock(lock);

	return sgt;
}

static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
324
	struct sg_table *sgt, enum dma_data_direction dma_dir)
325 326 327 328 329 330 331 332 333 334 335 336 337 338
{
	/* nothing to be done here */
}

static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
{
	/* drop reference obtained in vb2_dc_get_dmabuf */
	vb2_dc_put(dbuf->priv);
}

static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
{
	struct vb2_dc_buf *buf = dbuf->priv;

339
	return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
}

static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
{
	struct vb2_dc_buf *buf = dbuf->priv;

	return buf->vaddr;
}

static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
	struct vm_area_struct *vma)
{
	return vb2_dc_mmap(dbuf->priv, vma);
}

355
static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
356 357 358 359
	.attach = vb2_dc_dmabuf_ops_attach,
	.detach = vb2_dc_dmabuf_ops_detach,
	.map_dma_buf = vb2_dc_dmabuf_ops_map,
	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
360 361
	.map = vb2_dc_dmabuf_ops_kmap,
	.map_atomic = vb2_dc_dmabuf_ops_kmap,
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
	.vmap = vb2_dc_dmabuf_ops_vmap,
	.mmap = vb2_dc_dmabuf_ops_mmap,
	.release = vb2_dc_dmabuf_ops_release,
};

static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
{
	int ret;
	struct sg_table *sgt;

	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		dev_err(buf->dev, "failed to alloc sg table\n");
		return NULL;
	}

378
	ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
379
		buf->size, buf->attrs);
380 381 382 383 384 385 386 387 388
	if (ret < 0) {
		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
		kfree(sgt);
		return NULL;
	}

	return sgt;
}

389
static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
390 391 392
{
	struct vb2_dc_buf *buf = buf_priv;
	struct dma_buf *dbuf;
393 394 395 396 397 398
	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);

	exp_info.ops = &vb2_dc_dmabuf_ops;
	exp_info.size = buf->size;
	exp_info.flags = flags;
	exp_info.priv = buf;
399 400 401 402 403 404 405

	if (!buf->sgt_base)
		buf->sgt_base = vb2_dc_get_base_sgt(buf);

	if (WARN_ON(!buf->sgt_base))
		return NULL;

406
	dbuf = dma_buf_export(&exp_info);
407 408 409 410
	if (IS_ERR(dbuf))
		return NULL;

	/* dmabuf keeps reference to vb2 buffer */
411
	refcount_inc(&buf->refcount);
412 413 414 415

	return dbuf;
}

416 417 418 419
/*********************************************/
/*       callbacks for USERPTR buffers       */
/*********************************************/

420 421 422 423
static void vb2_dc_put_userptr(void *buf_priv)
{
	struct vb2_dc_buf *buf = buf_priv;
	struct sg_table *sgt = buf->dma_sgt;
424 425
	int i;
	struct page **pages;
426

427
	if (sgt) {
428 429 430 431 432
		/*
		 * No need to sync to CPU, it's already synced to the CPU
		 * since the finish() memop will have been called before this.
		 */
		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
433
				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
434 435 436 437 438
		pages = frame_vector_pages(buf->vec);
		/* sgt should exist only if vector contains pages... */
		BUG_ON(IS_ERR(pages));
		for (i = 0; i < frame_vector_count(buf->vec); i++)
			set_page_dirty_lock(pages[i]);
439 440 441
		sg_free_table(sgt);
		kfree(sgt);
	}
442
	vb2_destroy_framevec(buf->vec);
443 444 445
	kfree(buf);
}

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
/*
 * For some kind of reserved memory there might be no struct page available,
 * so all that can be done to support such 'pages' is to try to convert
 * pfn to dma address or at the last resort just assume that
 * dma address == physical address (like it has been assumed in earlier version
 * of videobuf2-dma-contig
 */

#ifdef __arch_pfn_to_dma
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
}
#elif defined(__pfn_to_bus)
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__pfn_to_bus(pfn);
}
#elif defined(__pfn_to_phys)
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	return (dma_addr_t)__pfn_to_phys(pfn);
}
#else
static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
{
	/* really, we cannot do anything better at this point */
	return (dma_addr_t)(pfn) << PAGE_SHIFT;
}
#endif

477
static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
478
	unsigned long size, enum dma_data_direction dma_dir)
479 480
{
	struct vb2_dc_buf *buf;
481
	struct frame_vector *vec;
482
	unsigned long offset;
483
	int n_pages, i;
484 485 486
	int ret = 0;
	struct sg_table *sgt;
	unsigned long contig_size;
487 488 489 490 491 492 493 494 495 496 497 498
	unsigned long dma_align = dma_get_cache_alignment();

	/* Only cache aligned DMA transfers are reliable */
	if (!IS_ALIGNED(vaddr | size, dma_align)) {
		pr_debug("user data must be aligned to %lu bytes\n", dma_align);
		return ERR_PTR(-EINVAL);
	}

	if (!size) {
		pr_debug("size is zero\n");
		return ERR_PTR(-EINVAL);
	}
499

500 501 502
	if (WARN_ON(!dev))
		return ERR_PTR(-EINVAL);

503 504 505 506
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

507
	buf->dev = dev;
508
	buf->dma_dir = dma_dir;
509 510

	offset = vaddr & ~PAGE_MASK;
511 512
	vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
					       dma_dir == DMA_BIDIRECTIONAL);
513 514
	if (IS_ERR(vec)) {
		ret = PTR_ERR(vec);
515 516
		goto fail_buf;
	}
517 518 519 520 521
	buf->vec = vec;
	n_pages = frame_vector_count(vec);
	ret = frame_vector_to_pages(vec);
	if (ret < 0) {
		unsigned long *nums = frame_vector_pfns(vec);
522

523 524 525 526 527 528 529 530 531
		/*
		 * Failed to convert to pages... Check the memory is physically
		 * contiguous and use direct mapping
		 */
		for (i = 1; i < n_pages; i++)
			if (nums[i-1] + 1 != nums[i])
				goto fail_pfnvec;
		buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, nums[0]);
		goto out;
532 533 534 535 536 537
	}

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt) {
		pr_err("failed to allocate sg table\n");
		ret = -ENOMEM;
538
		goto fail_pfnvec;
539 540
	}

541
	ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
542 543 544 545 546 547
		offset, size, GFP_KERNEL);
	if (ret) {
		pr_err("failed to initialize sg table\n");
		goto fail_sgt;
	}

548 549 550 551 552
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
553
				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
554 555 556 557 558 559 560 561 562 563 564 565
	if (sgt->nents <= 0) {
		pr_err("failed to map scatterlist\n");
		ret = -EIO;
		goto fail_sgt_init;
	}

	contig_size = vb2_dc_get_contiguous_size(sgt);
	if (contig_size < size) {
		pr_err("contiguous mapping is too small %lu/%lu\n",
			contig_size, size);
		ret = -EFAULT;
		goto fail_map_sg;
566 567
	}

568 569
	buf->dma_addr = sg_dma_address(sgt->sgl);
	buf->dma_sgt = sgt;
570 571
out:
	buf->size = size;
572 573 574

	return buf;

575
fail_map_sg:
576
	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
577
			   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
578

579 580 581 582 583
fail_sgt_init:
	sg_free_table(sgt);

fail_sgt:
	kfree(sgt);
584

585 586
fail_pfnvec:
	vb2_destroy_framevec(vec);
587 588

fail_buf:
589
	kfree(buf);
590 591

	return ERR_PTR(ret);
592 593
}

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
/*********************************************/
/*       callbacks for DMABUF buffers        */
/*********************************************/

static int vb2_dc_map_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;
	struct sg_table *sgt;
	unsigned long contig_size;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to pin a non attached buffer\n");
		return -EINVAL;
	}

	if (WARN_ON(buf->dma_sgt)) {
		pr_err("dmabuf buffer is already pinned\n");
		return 0;
	}

	/* get the associated scatterlist for this buffer */
	sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
616
	if (IS_ERR(sgt)) {
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
		pr_err("Error getting dmabuf scatterlist\n");
		return -EINVAL;
	}

	/* checking if dmabuf is big enough to store contiguous chunk */
	contig_size = vb2_dc_get_contiguous_size(sgt);
	if (contig_size < buf->size) {
		pr_err("contiguous chunk is too small %lu/%lu b\n",
			contig_size, buf->size);
		dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
		return -EFAULT;
	}

	buf->dma_addr = sg_dma_address(sgt->sgl);
	buf->dma_sgt = sgt;
632
	buf->vaddr = NULL;
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651

	return 0;
}

static void vb2_dc_unmap_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;
	struct sg_table *sgt = buf->dma_sgt;

	if (WARN_ON(!buf->db_attach)) {
		pr_err("trying to unpin a not attached buffer\n");
		return;
	}

	if (WARN_ON(!sgt)) {
		pr_err("dmabuf buffer is already unpinned\n");
		return;
	}

652 653 654 655
	if (buf->vaddr) {
		dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
		buf->vaddr = NULL;
	}
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674
	dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);

	buf->dma_addr = 0;
	buf->dma_sgt = NULL;
}

static void vb2_dc_detach_dmabuf(void *mem_priv)
{
	struct vb2_dc_buf *buf = mem_priv;

	/* if vb2 works correctly you should never detach mapped buffer */
	if (WARN_ON(buf->dma_addr))
		vb2_dc_unmap_dmabuf(buf);

	/* detach this attachment */
	dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
	kfree(buf);
}

675
static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
676
	unsigned long size, enum dma_data_direction dma_dir)
677 678 679 680 681 682 683
{
	struct vb2_dc_buf *buf;
	struct dma_buf_attachment *dba;

	if (dbuf->size < size)
		return ERR_PTR(-EFAULT);

684 685 686
	if (WARN_ON(!dev))
		return ERR_PTR(-EINVAL);

687 688 689 690
	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
	if (!buf)
		return ERR_PTR(-ENOMEM);

691
	buf->dev = dev;
692 693 694 695 696 697 698 699
	/* create attachment for the dmabuf with the user device */
	dba = dma_buf_attach(dbuf, buf->dev);
	if (IS_ERR(dba)) {
		pr_err("failed to attach dmabuf\n");
		kfree(buf);
		return dba;
	}

700
	buf->dma_dir = dma_dir;
701 702 703 704 705 706
	buf->size = size;
	buf->db_attach = dba;

	return buf;
}

707 708 709 710
/*********************************************/
/*       DMA CONTIG exported functions       */
/*********************************************/

711
const struct vb2_mem_ops vb2_dma_contig_memops = {
712 713
	.alloc		= vb2_dc_alloc,
	.put		= vb2_dc_put,
714
	.get_dmabuf	= vb2_dc_get_dmabuf,
715 716 717 718 719
	.cookie		= vb2_dc_cookie,
	.vaddr		= vb2_dc_vaddr,
	.mmap		= vb2_dc_mmap,
	.get_userptr	= vb2_dc_get_userptr,
	.put_userptr	= vb2_dc_put_userptr,
720 721
	.prepare	= vb2_dc_prepare,
	.finish		= vb2_dc_finish,
722 723 724 725
	.map_dmabuf	= vb2_dc_map_dmabuf,
	.unmap_dmabuf	= vb2_dc_unmap_dmabuf,
	.attach_dmabuf	= vb2_dc_attach_dmabuf,
	.detach_dmabuf	= vb2_dc_detach_dmabuf,
726
	.num_users	= vb2_dc_num_users,
727 728 729
};
EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);

730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756
/**
 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
 * @dev:	device for configuring DMA parameters
 * @size:	size of DMA max segment size to set
 *
 * To allow mapping the scatter-list into a single chunk in the DMA
 * address space, the device is required to have the DMA max segment
 * size parameter set to a value larger than the buffer size. Otherwise,
 * the DMA-mapping subsystem will split the mapping into max segment
 * size chunks. This function sets the DMA max segment size
 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
 * address space.
 * This code assumes that the DMA-mapping subsystem will merge all
 * scatterlist segments if this is really possible (for example when
 * an IOMMU is available and enabled).
 * Ideally, this parameter should be set by the generic bus code, but it
 * is left with the default 64KiB value due to historical litmiations in
 * other subsystems (like limited USB host drivers) and there no good
 * place to set it to the proper value.
 * This function should be called from the drivers, which are known to
 * operate on platforms with IOMMU and provide access to shared buffers
 * (either USERPTR or DMABUF). This should be done before initializing
 * videobuf2 queue.
 */
int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
{
	if (!dev->dma_parms) {
757
		dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782
		if (!dev->dma_parms)
			return -ENOMEM;
	}
	if (dma_get_max_seg_size(dev) < size)
		return dma_set_max_seg_size(dev, size);

	return 0;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);

/*
 * vb2_dma_contig_clear_max_seg_size() - release resources for DMA parameters
 * @dev:	device for configuring DMA parameters
 *
 * This function releases resources allocated to configure DMA parameters
 * (see vb2_dma_contig_set_max_seg_size() function). It should be called from
 * device drivers on driver remove.
 */
void vb2_dma_contig_clear_max_seg_size(struct device *dev)
{
	kfree(dev->dma_parms);
	dev->dma_parms = NULL;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_clear_max_seg_size);

783
MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
784
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
785
MODULE_LICENSE("GPL");