videobuf2-v4l2.c 34.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * videobuf2-v4l2.c - V4L2 driver helper framework
 *
 * Copyright (C) 2010 Samsung Electronics
 *
 * Author: Pawel Osciak <pawel@osciak.com>
 *	   Marek Szyprowski <m.szyprowski@samsung.com>
 *
 * The vb2_thread implementation was based on code from videobuf-dvb.c:
 *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation.
 */

#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/freezer.h>
#include <linux/kthread.h>

27
#include <media/v4l2-dev.h>
28
#include <media/v4l2-device.h>
29 30 31 32
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
#include <media/v4l2-common.h>

33 34
#include <media/videobuf2-v4l2.h>

35 36 37
static int debug;
module_param(debug, int, 0644);

38
#define dprintk(q, level, fmt, arg...)					      \
39 40
	do {								      \
		if (debug >= level)					      \
41 42
			pr_info("vb2-v4l2: [%p] %s: " fmt,		      \
				(q)->name, __func__, ## arg);		      \
43
	} while (0)
44

45
/* Flags that are set by us */
46 47 48
#define V4L2_BUFFER_MASK_FLAGS	(V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
				 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
				 V4L2_BUF_FLAG_PREPARED | \
49 50
				 V4L2_BUF_FLAG_IN_REQUEST | \
				 V4L2_BUF_FLAG_REQUEST_FD | \
51 52
				 V4L2_BUF_FLAG_TIMESTAMP_MASK)
/* Output buffer flags that should be passed on to the driver */
53 54 55 56 57
#define V4L2_BUFFER_OUT_FLAGS	(V4L2_BUF_FLAG_PFRAME | \
				 V4L2_BUF_FLAG_BFRAME | \
				 V4L2_BUF_FLAG_KEYFRAME | \
				 V4L2_BUF_FLAG_TIMECODE | \
				 V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
58

59
/*
60 61 62 63 64 65 66 67 68
 * __verify_planes_array() - verify that the planes array passed in struct
 * v4l2_buffer from userspace can be safely used
 */
static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
	if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
		return 0;

	/* Is memory for copying plane information present? */
69
	if (b->m.planes == NULL) {
70 71
		dprintk(vb->vb2_queue, 1,
			"multi-planar buffer passed but planes array not provided\n");
72 73 74 75
		return -EINVAL;
	}

	if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
76 77
		dprintk(vb->vb2_queue, 1,
			"incorrect planes array length, expected %d, got %d\n",
78
			vb->num_planes, b->length);
79 80 81 82 83 84
		return -EINVAL;
	}

	return 0;
}

85 86 87 88 89
static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
{
	return __verify_planes_array(vb, pb);
}

90
/*
91 92 93 94 95 96 97 98 99
 * __verify_length() - Verify that the bytesused value for each plane fits in
 * the plane length and that the data offset doesn't exceed the bytesused value.
 */
static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
{
	unsigned int length;
	unsigned int bytesused;
	unsigned int plane;

100
	if (V4L2_TYPE_IS_CAPTURE(b->type))
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
		return 0;

	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
		for (plane = 0; plane < vb->num_planes; ++plane) {
			length = (b->memory == VB2_MEMORY_USERPTR ||
				  b->memory == VB2_MEMORY_DMABUF)
			       ? b->m.planes[plane].length
				: vb->planes[plane].length;
			bytesused = b->m.planes[plane].bytesused
				  ? b->m.planes[plane].bytesused : length;

			if (b->m.planes[plane].bytesused > length)
				return -EINVAL;

			if (b->m.planes[plane].data_offset > 0 &&
			    b->m.planes[plane].data_offset >= bytesused)
				return -EINVAL;
		}
	} else {
120 121
		length = (b->memory == VB2_MEMORY_USERPTR ||
			  b->memory == VB2_MEMORY_DMABUF)
122 123 124 125 126 127 128 129 130
			? b->length : vb->planes[0].length;

		if (b->bytesused > length)
			return -EINVAL;
	}

	return 0;
}

131
/*
132
 * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
133
 */
134
static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
135 136 137 138 139 140
{
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);

	vbuf->request_fd = -1;
}

141
static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
142 143 144 145 146 147 148 149 150 151
{
	const struct v4l2_buffer *b = pb;
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	struct vb2_queue *q = vb->vb2_queue;

	if (q->is_output) {
		/*
		 * For output buffers copy the timestamp if needed,
		 * and the timecode field and flag if needed.
		 */
152
		if (q->copy_timestamp)
153
			vb->timestamp = v4l2_buffer_get_timestamp(b);
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
		vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
		if (b->flags & V4L2_BUF_FLAG_TIMECODE)
			vbuf->timecode = b->timecode;
	}
};

static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
{
	static bool check_once;

	if (check_once)
		return;

	check_once = true;

	pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
	if (vb->vb2_queue->allow_zero_bytesused)
		pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
	else
		pr_warn("use the actual size instead.\n");
}

176
static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
177 178 179
{
	struct vb2_queue *q = vb->vb2_queue;
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
180
	struct vb2_plane *planes = vbuf->planes;
181 182 183 184 185
	unsigned int plane;
	int ret;

	ret = __verify_length(vb, b);
	if (ret < 0) {
186
		dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
187 188 189 190 191 192 193 194 195 196 197 198
		return ret;
	}
	if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
		/*
		 * If the format's field is ALTERNATE, then the buffer's field
		 * should be either TOP or BOTTOM, not ALTERNATE since that
		 * makes no sense. The driver has to know whether the
		 * buffer represents a top or a bottom field in order to
		 * program any DMA correctly. Using ALTERNATE is wrong, since
		 * that just says that it is either a top or a bottom field,
		 * but not which of the two it is.
		 */
199
		dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
200 201 202
		return -EINVAL;
	}
	vbuf->sequence = 0;
203
	vbuf->request_fd = -1;
204
	vbuf->is_held = false;
205 206

	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
207 208
		switch (b->memory) {
		case VB2_MEMORY_USERPTR:
209 210 211 212 213 214
			for (plane = 0; plane < vb->num_planes; ++plane) {
				planes[plane].m.userptr =
					b->m.planes[plane].m.userptr;
				planes[plane].length =
					b->m.planes[plane].length;
			}
215 216
			break;
		case VB2_MEMORY_DMABUF:
217 218 219 220 221 222
			for (plane = 0; plane < vb->num_planes; ++plane) {
				planes[plane].m.fd =
					b->m.planes[plane].m.fd;
				planes[plane].length =
					b->m.planes[plane].length;
			}
223 224
			break;
		default:
225 226 227 228 229 230
			for (plane = 0; plane < vb->num_planes; ++plane) {
				planes[plane].m.offset =
					vb->planes[plane].m.offset;
				planes[plane].length =
					vb->planes[plane].length;
			}
231
			break;
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
		}

		/* Fill in driver-provided information for OUTPUT types */
		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
			/*
			 * Will have to go up to b->length when API starts
			 * accepting variable number of planes.
			 *
			 * If bytesused == 0 for the output buffer, then fall
			 * back to the full buffer size. In that case
			 * userspace clearly never bothered to set it and
			 * it's a safe assumption that they really meant to
			 * use the full plane sizes.
			 *
			 * Some drivers, e.g. old codec drivers, use bytesused == 0
			 * as a way to indicate that streaming is finished.
			 * In that case, the driver should use the
			 * allow_zero_bytesused flag to keep old userspace
			 * applications working.
			 */
			for (plane = 0; plane < vb->num_planes; ++plane) {
				struct vb2_plane *pdst = &planes[plane];
				struct v4l2_plane *psrc = &b->m.planes[plane];

				if (psrc->bytesused == 0)
					vb2_warn_zero_bytesused(vb);

				if (vb->vb2_queue->allow_zero_bytesused)
					pdst->bytesused = psrc->bytesused;
				else
					pdst->bytesused = psrc->bytesused ?
						psrc->bytesused : pdst->length;
				pdst->data_offset = psrc->data_offset;
			}
		}
	} else {
		/*
		 * Single-planar buffers do not use planes array,
		 * so fill in relevant v4l2_buffer struct fields instead.
		 * In videobuf we use our internal V4l2_planes struct for
		 * single-planar buffers as well, for simplicity.
		 *
		 * If bytesused == 0 for the output buffer, then fall back
		 * to the full buffer size as that's a sensible default.
		 *
		 * Some drivers, e.g. old codec drivers, use bytesused == 0 as
		 * a way to indicate that streaming is finished. In that case,
		 * the driver should use the allow_zero_bytesused flag to keep
		 * old userspace applications working.
		 */
282 283
		switch (b->memory) {
		case VB2_MEMORY_USERPTR:
284 285
			planes[0].m.userptr = b->m.userptr;
			planes[0].length = b->length;
286 287
			break;
		case VB2_MEMORY_DMABUF:
288 289
			planes[0].m.fd = b->m.fd;
			planes[0].length = b->length;
290 291
			break;
		default:
292 293
			planes[0].m.offset = vb->planes[0].m.offset;
			planes[0].length = vb->planes[0].length;
294
			break;
295 296
		}

297
		planes[0].data_offset = 0;
298 299 300 301 302 303 304 305 306 307 308 309 310 311
		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
			if (b->bytesused == 0)
				vb2_warn_zero_bytesused(vb);

			if (vb->vb2_queue->allow_zero_bytesused)
				planes[0].bytesused = b->bytesused;
			else
				planes[0].bytesused = b->bytesused ?
					b->bytesused : planes[0].length;
		} else
			planes[0].bytesused = 0;

	}

312
	/* Zero flags that we handle */
313
	vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
314
	if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
315 316 317 318 319 320 321 322 323 324 325
		/*
		 * Non-COPY timestamps and non-OUTPUT queues will get
		 * their timestamp and timestamp source flags from the
		 * queue.
		 */
		vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
	}

	if (V4L2_TYPE_IS_OUTPUT(b->type)) {
		/*
		 * For output buffers mask out the timecode flag:
326
		 * this will be handled later in vb2_qbuf().
327 328 329 330 331
		 * The 'field' is valid metadata for this output buffer
		 * and so that needs to be copied here.
		 */
		vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
		vbuf->field = b->field;
332 333
		if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
			vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
334 335 336
	} else {
		/* Zero any output buffer flags as this is a capture buffer */
		vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
337 338
		/* Zero last flag, this is a signal from driver to userspace */
		vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
339 340 341 342 343
	}

	return 0;
}

344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
static void set_buffer_cache_hints(struct vb2_queue *q,
				   struct vb2_buffer *vb,
				   struct v4l2_buffer *b)
{
	/*
	 * DMA exporter should take care of cache syncs, so we can avoid
	 * explicit ->prepare()/->finish() syncs. For other ->memory types
	 * we always need ->prepare() or/and ->finish() cache sync.
	 */
	if (q->memory == VB2_MEMORY_DMABUF) {
		vb->need_cache_sync_on_finish = 0;
		vb->need_cache_sync_on_prepare = 0;
		return;
	}

	/*
	 * Cache sync/invalidation flags are set by default in order to
	 * preserve existing behaviour for old apps/drivers.
	 */
	vb->need_cache_sync_on_prepare = 1;
	vb->need_cache_sync_on_finish = 1;

	if (!vb2_queue_allows_cache_hints(q)) {
		/*
		 * Clear buffer cache flags if queue does not support user
		 * space hints. That's to indicate to userspace that these
		 * flags won't work.
		 */
		b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
		b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
		return;
	}

	/*
	 * ->finish() cache sync can be avoided when queue direction is
	 * TO_DEVICE.
	 */
	if (q->dma_dir == DMA_TO_DEVICE)
		vb->need_cache_sync_on_finish = 0;

	if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
		vb->need_cache_sync_on_finish = 0;

	if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
		vb->need_cache_sync_on_prepare = 0;
}

391
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
392
				    struct v4l2_buffer *b, bool is_prepare,
393
				    struct media_request **p_req)
394
{
395
	const char *opname = is_prepare ? "prepare_buf" : "qbuf";
396
	struct media_request *req;
397 398 399 400
	struct vb2_v4l2_buffer *vbuf;
	struct vb2_buffer *vb;
	int ret;

401
	if (b->type != q->type) {
402
		dprintk(q, 1, "%s: invalid buffer type\n", opname);
403 404 405 406
		return -EINVAL;
	}

	if (b->index >= q->num_buffers) {
407
		dprintk(q, 1, "%s: buffer index out of range\n", opname);
408 409 410 411 412
		return -EINVAL;
	}

	if (q->bufs[b->index] == NULL) {
		/* Should never happen */
413
		dprintk(q, 1, "%s: buffer is NULL\n", opname);
414 415 416 417
		return -EINVAL;
	}

	if (b->memory != q->memory) {
418
		dprintk(q, 1, "%s: invalid memory type\n", opname);
419 420 421
		return -EINVAL;
	}

422 423 424 425 426 427
	vb = q->bufs[b->index];
	vbuf = to_vb2_v4l2_buffer(vb);
	ret = __verify_planes_array(vb, b);
	if (ret)
		return ret;

428 429
	if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
	    vb->state != VB2_BUF_STATE_DEQUEUED) {
430
		dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
431 432 433
		return -EINVAL;
	}

434
	if (!vb->prepared) {
435
		set_buffer_cache_hints(q, vb, b);
436 437 438 439
		/* Copy relevant information provided by the userspace */
		memset(vbuf->planes, 0,
		       sizeof(vbuf->planes[0]) * vb->num_planes);
		ret = vb2_fill_vb2_v4l2_buffer(vb, b);
440 441
		if (ret)
			return ret;
442
	}
443

444 445 446
	if (is_prepare)
		return 0;

447
	if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
448
		if (q->requires_requests) {
449
			dprintk(q, 1, "%s: queue requires requests\n", opname);
450 451
			return -EBADR;
		}
452
		if (q->uses_requests) {
453
			dprintk(q, 1, "%s: queue uses requests\n", opname);
454
			return -EBUSY;
455
		}
456
		return 0;
457
	} else if (!q->supports_requests) {
458
		dprintk(q, 1, "%s: queue does not support requests\n", opname);
459
		return -EBADR;
460
	} else if (q->uses_qbuf) {
461
		dprintk(q, 1, "%s: queue does not use requests\n", opname);
462
		return -EBUSY;
463
	}
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479

	/*
	 * For proper locking when queueing a request you need to be able
	 * to lock access to the vb2 queue, so check that there is a lock
	 * that we can use. In addition p_req must be non-NULL.
	 */
	if (WARN_ON(!q->lock || !p_req))
		return -EINVAL;

	/*
	 * Make sure this op is implemented by the driver. It's easy to forget
	 * this callback, but is it important when canceling a buffer in a
	 * queued request.
	 */
	if (WARN_ON(!q->ops->buf_request_complete))
		return -EINVAL;
480 481 482 483 484 485 486 487 488
	/*
	 * Make sure this op is implemented by the driver for the output queue.
	 * It's easy to forget this callback, but is it important to correctly
	 * validate the 'field' value at QBUF time.
	 */
	if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
		     q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
		    !q->ops->buf_out_validate))
		return -EINVAL;
489 490

	if (b->request_fd < 0) {
491
		dprintk(q, 1, "%s: request_fd < 0\n", opname);
492 493 494 495 496
		return -EINVAL;
	}

	req = media_request_get_by_fd(mdev, b->request_fd);
	if (IS_ERR(req)) {
497
		dprintk(q, 1, "%s: invalid request_fd\n", opname);
498 499 500 501 502 503 504 505 506
		return PTR_ERR(req);
	}

	/*
	 * Early sanity check. This is checked again when the buffer
	 * is bound to the request in vb2_core_qbuf().
	 */
	if (req->state != MEDIA_REQUEST_STATE_IDLE &&
	    req->state != MEDIA_REQUEST_STATE_UPDATING) {
507
		dprintk(q, 1, "%s: request is not idle\n", opname);
508 509 510 511 512 513 514 515
		media_request_put(req);
		return -EBUSY;
	}

	*p_req = req;
	vbuf->request_fd = b->request_fd;

	return 0;
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
}

/*
 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
 * returned to userspace
 */
static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
{
	struct v4l2_buffer *b = pb;
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	struct vb2_queue *q = vb->vb2_queue;
	unsigned int plane;

	/* Copy back data such as timestamp, flags, etc. */
	b->index = vb->index;
	b->type = vb->type;
	b->memory = vb->memory;
	b->bytesused = 0;

	b->flags = vbuf->flags;
	b->field = vbuf->field;
537
	v4l2_buffer_set_timestamp(b, vb->timestamp);
538 539 540
	b->timecode = vbuf->timecode;
	b->sequence = vbuf->sequence;
	b->reserved2 = 0;
541
	b->request_fd = 0;
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597

	if (q->is_multiplanar) {
		/*
		 * Fill in plane-related data if userspace provided an array
		 * for it. The caller has already verified memory and size.
		 */
		b->length = vb->num_planes;
		for (plane = 0; plane < vb->num_planes; ++plane) {
			struct v4l2_plane *pdst = &b->m.planes[plane];
			struct vb2_plane *psrc = &vb->planes[plane];

			pdst->bytesused = psrc->bytesused;
			pdst->length = psrc->length;
			if (q->memory == VB2_MEMORY_MMAP)
				pdst->m.mem_offset = psrc->m.offset;
			else if (q->memory == VB2_MEMORY_USERPTR)
				pdst->m.userptr = psrc->m.userptr;
			else if (q->memory == VB2_MEMORY_DMABUF)
				pdst->m.fd = psrc->m.fd;
			pdst->data_offset = psrc->data_offset;
			memset(pdst->reserved, 0, sizeof(pdst->reserved));
		}
	} else {
		/*
		 * We use length and offset in v4l2_planes array even for
		 * single-planar buffers, but userspace does not.
		 */
		b->length = vb->planes[0].length;
		b->bytesused = vb->planes[0].bytesused;
		if (q->memory == VB2_MEMORY_MMAP)
			b->m.offset = vb->planes[0].m.offset;
		else if (q->memory == VB2_MEMORY_USERPTR)
			b->m.userptr = vb->planes[0].m.userptr;
		else if (q->memory == VB2_MEMORY_DMABUF)
			b->m.fd = vb->planes[0].m.fd;
	}

	/*
	 * Clear any buffer state related flags.
	 */
	b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
	b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
	if (!q->copy_timestamp) {
		/*
		 * For non-COPY timestamps, drop timestamp source bits
		 * and obtain the timestamp source from the queue.
		 */
		b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
		b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
	}

	switch (vb->state) {
	case VB2_BUF_STATE_QUEUED:
	case VB2_BUF_STATE_ACTIVE:
		b->flags |= V4L2_BUF_FLAG_QUEUED;
		break;
598
	case VB2_BUF_STATE_IN_REQUEST:
599
		b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
600
		break;
601 602 603 604 605 606 607 608 609 610 611 612
	case VB2_BUF_STATE_ERROR:
		b->flags |= V4L2_BUF_FLAG_ERROR;
		/* fall through */
	case VB2_BUF_STATE_DONE:
		b->flags |= V4L2_BUF_FLAG_DONE;
		break;
	case VB2_BUF_STATE_PREPARING:
	case VB2_BUF_STATE_DEQUEUED:
		/* nothing */
		break;
	}

613 614 615
	if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
	     vb->state == VB2_BUF_STATE_IN_REQUEST) &&
	    vb->synced && vb->prepared)
616 617
		b->flags |= V4L2_BUF_FLAG_PREPARED;

618 619
	if (vb2_buffer_in_use(q, vb))
		b->flags |= V4L2_BUF_FLAG_MAPPED;
620 621 622 623
	if (vbuf->request_fd >= 0) {
		b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
		b->request_fd = vbuf->request_fd;
	}
624 625
}

626 627 628 629 630 631 632 633 634 635
/*
 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
 * v4l2_buffer by the userspace. It also verifies that struct
 * v4l2_buffer has a valid number of planes.
 */
static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
{
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	unsigned int plane;

636
	if (!vb->vb2_queue->copy_timestamp)
637 638 639 640 641 642 643 644 645 646 647 648 649
		vb->timestamp = 0;

	for (plane = 0; plane < vb->num_planes; ++plane) {
		if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
			planes[plane].m = vbuf->planes[plane].m;
			planes[plane].length = vbuf->planes[plane].length;
		}
		planes[plane].bytesused = vbuf->planes[plane].bytesused;
		planes[plane].data_offset = vbuf->planes[plane].data_offset;
	}
	return 0;
}

650
static const struct vb2_buf_ops v4l2_buf_ops = {
651
	.verify_planes_array	= __verify_planes_array_core,
652
	.init_buffer		= __init_vb2_v4l2_buffer,
653 654
	.fill_user_buffer	= __fill_v4l2_buffer,
	.fill_vb2_buffer	= __fill_vb2_buffer,
655
	.copy_timestamp		= __copy_timestamp,
656 657
};

658 659 660 661 662
int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
		       unsigned int start_idx)
{
	unsigned int i;

663
	for (i = start_idx; i < q->num_buffers; i++)
664 665
		if (q->bufs[i]->copied_timestamp &&
		    q->bufs[i]->timestamp == timestamp)
666 667 668 669 670
			return i;
	return -1;
}
EXPORT_SYMBOL_GPL(vb2_find_timestamp);

671
/*
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
 * vb2_querybuf() - query video buffer information
 * @q:		videobuf queue
 * @b:		buffer struct passed from userspace to vidioc_querybuf handler
 *		in driver
 *
 * Should be called from vidioc_querybuf ioctl handler in driver.
 * This function will verify the passed v4l2_buffer structure and fill the
 * relevant information for the userspace.
 *
 * The return values from this function are intended to be directly returned
 * from vidioc_querybuf handler in driver.
 */
int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
{
	struct vb2_buffer *vb;
	int ret;

	if (b->type != q->type) {
690
		dprintk(q, 1, "wrong buffer type\n");
691 692 693 694
		return -EINVAL;
	}

	if (b->index >= q->num_buffers) {
695
		dprintk(q, 1, "buffer index out of range\n");
696 697 698 699
		return -EINVAL;
	}
	vb = q->bufs[b->index];
	ret = __verify_planes_array(vb, b);
700 701 702
	if (!ret)
		vb2_core_querybuf(q, b->index, b);
	return ret;
703 704 705
}
EXPORT_SYMBOL(vb2_querybuf);

706 707
static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
{
708
	*caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
709 710 711 712 713 714
	if (q->io_modes & VB2_MMAP)
		*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
	if (q->io_modes & VB2_USERPTR)
		*caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
	if (q->io_modes & VB2_DMABUF)
		*caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
715 716
	if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
		*caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
717 718
	if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
		*caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
719
#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
720 721
	if (q->supports_requests)
		*caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
722
#endif
723 724
}

725 726 727 728
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
	int ret = vb2_verify_memory_type(q, req->memory, req->type);

729
	fill_buf_caps(q, &req->capabilities);
730
	return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
731 732 733
}
EXPORT_SYMBOL_GPL(vb2_reqbufs);

734 735
int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
		    struct v4l2_buffer *b)
736 737 738 739
{
	int ret;

	if (vb2_fileio_is_active(q)) {
740
		dprintk(q, 1, "file io in progress\n");
741 742 743
		return -EBUSY;
	}

744 745 746
	if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
		return -EINVAL;

747
	ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
748 749 750 751 752 753 754

	return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
}
EXPORT_SYMBOL_GPL(vb2_prepare_buf);

int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
{
755 756 757 758 759
	unsigned requested_planes = 1;
	unsigned requested_sizes[VIDEO_MAX_PLANES];
	struct v4l2_format *f = &create->format;
	int ret = vb2_verify_memory_type(q, create->memory, f->type);
	unsigned i;
760

761
	fill_buf_caps(q, &create->capabilities);
762 763 764
	create->index = q->num_buffers;
	if (create->count == 0)
		return ret != -EBUSY ? ret : 0;
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793

	switch (f->type) {
	case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
	case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
		requested_planes = f->fmt.pix_mp.num_planes;
		if (requested_planes == 0 ||
		    requested_planes > VIDEO_MAX_PLANES)
			return -EINVAL;
		for (i = 0; i < requested_planes; i++)
			requested_sizes[i] =
				f->fmt.pix_mp.plane_fmt[i].sizeimage;
		break;
	case V4L2_BUF_TYPE_VIDEO_CAPTURE:
	case V4L2_BUF_TYPE_VIDEO_OUTPUT:
		requested_sizes[0] = f->fmt.pix.sizeimage;
		break;
	case V4L2_BUF_TYPE_VBI_CAPTURE:
	case V4L2_BUF_TYPE_VBI_OUTPUT:
		requested_sizes[0] = f->fmt.vbi.samples_per_line *
			(f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
		break;
	case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
	case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
		requested_sizes[0] = f->fmt.sliced.io_size;
		break;
	case V4L2_BUF_TYPE_SDR_CAPTURE:
	case V4L2_BUF_TYPE_SDR_OUTPUT:
		requested_sizes[0] = f->fmt.sdr.buffersize;
		break;
794
	case V4L2_BUF_TYPE_META_CAPTURE:
795
	case V4L2_BUF_TYPE_META_OUTPUT:
796 797
		requested_sizes[0] = f->fmt.meta.buffersize;
		break;
798 799 800 801 802 803
	default:
		return -EINVAL;
	}
	for (i = 0; i < requested_planes; i++)
		if (requested_sizes[i] == 0)
			return -EINVAL;
804 805 806 807
	return ret ? ret : vb2_core_create_bufs(q, create->memory,
						&create->count,
						requested_planes,
						requested_sizes);
808 809 810
}
EXPORT_SYMBOL_GPL(vb2_create_bufs);

811 812
int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
	     struct v4l2_buffer *b)
813
{
814
	struct media_request *req = NULL;
815 816
	int ret;

817
	if (vb2_fileio_is_active(q)) {
818
		dprintk(q, 1, "file io in progress\n");
819 820 821
		return -EBUSY;
	}

822
	ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
823 824 825 826 827 828
	if (ret)
		return ret;
	ret = vb2_core_qbuf(q, b->index, b, req);
	if (req)
		media_request_put(req);
	return ret;
829 830 831 832 833
}
EXPORT_SYMBOL_GPL(vb2_qbuf);

int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
{
834 835
	int ret;

836
	if (vb2_fileio_is_active(q)) {
837
		dprintk(q, 1, "file io in progress\n");
838 839
		return -EBUSY;
	}
840 841

	if (b->type != q->type) {
842
		dprintk(q, 1, "invalid buffer type\n");
843 844 845 846 847
		return -EINVAL;
	}

	ret = vb2_core_dqbuf(q, NULL, b, nonblocking);

848 849 850 851 852
	if (!q->is_output &&
	    b->flags & V4L2_BUF_FLAG_DONE &&
	    b->flags & V4L2_BUF_FLAG_LAST)
		q->last_buffer_dequeued = true;

853 854 855 856 857 858 859
	/*
	 *  After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
	 *  cleared.
	 */
	b->flags &= ~V4L2_BUF_FLAG_DONE;

	return ret;
860 861 862 863 864 865
}
EXPORT_SYMBOL_GPL(vb2_dqbuf);

int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
{
	if (vb2_fileio_is_active(q)) {
866
		dprintk(q, 1, "file io in progress\n");
867 868 869 870 871 872 873 874 875
		return -EBUSY;
	}
	return vb2_core_streamon(q, type);
}
EXPORT_SYMBOL_GPL(vb2_streamon);

int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
{
	if (vb2_fileio_is_active(q)) {
876
		dprintk(q, 1, "file io in progress\n");
877 878 879 880 881 882 883 884 885 886 887 888 889
		return -EBUSY;
	}
	return vb2_core_streamoff(q, type);
}
EXPORT_SYMBOL_GPL(vb2_streamoff);

int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
{
	return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
				eb->plane, eb->flags);
}
EXPORT_SYMBOL_GPL(vb2_expbuf);

890
int vb2_queue_init_name(struct vb2_queue *q, const char *name)
891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916
{
	/*
	 * Sanity check
	 */
	if (WARN_ON(!q)			  ||
	    WARN_ON(q->timestamp_flags &
		    ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
		      V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
		return -EINVAL;

	/* Warn that the driver should choose an appropriate timestamp type */
	WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
		V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);

	/* Warn that vb2_memory should match with v4l2_memory */
	if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
		|| WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
		|| WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
		return -EINVAL;

	if (q->buf_struct_size == 0)
		q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);

	q->buf_ops = &v4l2_buf_ops;
	q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
	q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
917 918
	q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
			== V4L2_BUF_FLAG_TIMESTAMP_COPY;
919 920
	/*
	 * For compatibility with vb1: if QBUF hasn't been called yet, then
921
	 * return EPOLLERR as well. This only affects capture queues, output
922 923 924
	 * queues will always initialize waiting_for_buffers to false.
	 */
	q->quirk_poll_must_check_waiting_for_buffers = true;
925

926 927 928 929 930
	if (name)
		strscpy(q->name, name, sizeof(q->name));
	else
		q->name[0] = '\0';

931 932
	return vb2_core_queue_init(q);
}
933 934 935 936 937 938
EXPORT_SYMBOL_GPL(vb2_queue_init_name);

int vb2_queue_init(struct vb2_queue *q)
{
	return vb2_queue_init_name(q, NULL);
}
939 940 941 942 943 944 945 946
EXPORT_SYMBOL_GPL(vb2_queue_init);

void vb2_queue_release(struct vb2_queue *q)
{
	vb2_core_queue_release(q);
}
EXPORT_SYMBOL_GPL(vb2_queue_release);

947
__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
948 949
{
	struct video_device *vfd = video_devdata(file);
950 951 952
	__poll_t res;

	res = vb2_core_poll(q, file, wait);
953 954 955 956

	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
		struct v4l2_fh *fh = file->private_data;

957
		poll_wait(file, &fh->wait, wait);
958
		if (v4l2_event_pending(fh))
959
			res |= EPOLLPRI;
960
	}
961

962
	return res;
963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
}
EXPORT_SYMBOL_GPL(vb2_poll);

/*
 * The following functions are not part of the vb2 core API, but are helper
 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
 * and struct vb2_ops.
 * They contain boilerplate code that most if not all drivers have to do
 * and so they simplify the driver code.
 */

/* The queue is busy if there is a owner and you are not that owner. */
static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
{
	return vdev->queue->owner && vdev->queue->owner != file->private_data;
}

/* vb2 ioctl helpers */

int vb2_ioctl_reqbufs(struct file *file, void *priv,
			  struct v4l2_requestbuffers *p)
{
	struct video_device *vdev = video_devdata(file);
	int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);

988
	fill_buf_caps(vdev->queue, &p->capabilities);
989 990 991 992
	if (res)
		return res;
	if (vb2_queue_is_busy(vdev, file))
		return -EBUSY;
993
	res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
	/* If count == 0, then the owner has released all buffers and he
	   is no longer owner of the queue. Otherwise we have a new owner. */
	if (res == 0)
		vdev->queue->owner = p->count ? file->private_data : NULL;
	return res;
}
EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);

int vb2_ioctl_create_bufs(struct file *file, void *priv,
			  struct v4l2_create_buffers *p)
{
	struct video_device *vdev = video_devdata(file);
	int res = vb2_verify_memory_type(vdev->queue, p->memory,
			p->format.type);

	p->index = vdev->queue->num_buffers;
1010
	fill_buf_caps(vdev->queue, &p->capabilities);
1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	/*
	 * If count == 0, then just check if memory and type are valid.
	 * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
	 */
	if (p->count == 0)
		return res != -EBUSY ? res : 0;
	if (res)
		return res;
	if (vb2_queue_is_busy(vdev, file))
		return -EBUSY;
1021 1022

	res = vb2_create_bufs(vdev->queue, p);
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
	if (res == 0)
		vdev->queue->owner = file->private_data;
	return res;
}
EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);

int vb2_ioctl_prepare_buf(struct file *file, void *priv,
			  struct v4l2_buffer *p)
{
	struct video_device *vdev = video_devdata(file);

	if (vb2_queue_is_busy(vdev, file))
		return -EBUSY;
1036
	return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
}
EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);

int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
	struct video_device *vdev = video_devdata(file);

	/* No need to call vb2_queue_is_busy(), anyone can query buffers. */
	return vb2_querybuf(vdev->queue, p);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);

int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
	struct video_device *vdev = video_devdata(file);

	if (vb2_queue_is_busy(vdev, file))
		return -EBUSY;
1055
	return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181
}
EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);

int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
{
	struct video_device *vdev = video_devdata(file);

	if (vb2_queue_is_busy(vdev, file))
		return -EBUSY;
	return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);

int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
	struct video_device *vdev = video_devdata(file);

	if (vb2_queue_is_busy(vdev, file))
		return -EBUSY;
	return vb2_streamon(vdev->queue, i);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);

int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
	struct video_device *vdev = video_devdata(file);

	if (vb2_queue_is_busy(vdev, file))
		return -EBUSY;
	return vb2_streamoff(vdev->queue, i);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);

int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
{
	struct video_device *vdev = video_devdata(file);

	if (vb2_queue_is_busy(vdev, file))
		return -EBUSY;
	return vb2_expbuf(vdev->queue, p);
}
EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);

/* v4l2_file_operations helpers */

int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
	struct video_device *vdev = video_devdata(file);

	return vb2_mmap(vdev->queue, vma);
}
EXPORT_SYMBOL_GPL(vb2_fop_mmap);

int _vb2_fop_release(struct file *file, struct mutex *lock)
{
	struct video_device *vdev = video_devdata(file);

	if (lock)
		mutex_lock(lock);
	if (file->private_data == vdev->queue->owner) {
		vb2_queue_release(vdev->queue);
		vdev->queue->owner = NULL;
	}
	if (lock)
		mutex_unlock(lock);
	return v4l2_fh_release(file);
}
EXPORT_SYMBOL_GPL(_vb2_fop_release);

int vb2_fop_release(struct file *file)
{
	struct video_device *vdev = video_devdata(file);
	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;

	return _vb2_fop_release(file, lock);
}
EXPORT_SYMBOL_GPL(vb2_fop_release);

ssize_t vb2_fop_write(struct file *file, const char __user *buf,
		size_t count, loff_t *ppos)
{
	struct video_device *vdev = video_devdata(file);
	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
	int err = -EBUSY;

	if (!(vdev->queue->io_modes & VB2_WRITE))
		return -EINVAL;
	if (lock && mutex_lock_interruptible(lock))
		return -ERESTARTSYS;
	if (vb2_queue_is_busy(vdev, file))
		goto exit;
	err = vb2_write(vdev->queue, buf, count, ppos,
		       file->f_flags & O_NONBLOCK);
	if (vdev->queue->fileio)
		vdev->queue->owner = file->private_data;
exit:
	if (lock)
		mutex_unlock(lock);
	return err;
}
EXPORT_SYMBOL_GPL(vb2_fop_write);

ssize_t vb2_fop_read(struct file *file, char __user *buf,
		size_t count, loff_t *ppos)
{
	struct video_device *vdev = video_devdata(file);
	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
	int err = -EBUSY;

	if (!(vdev->queue->io_modes & VB2_READ))
		return -EINVAL;
	if (lock && mutex_lock_interruptible(lock))
		return -ERESTARTSYS;
	if (vb2_queue_is_busy(vdev, file))
		goto exit;
	err = vb2_read(vdev->queue, buf, count, ppos,
		       file->f_flags & O_NONBLOCK);
	if (vdev->queue->fileio)
		vdev->queue->owner = file->private_data;
exit:
	if (lock)
		mutex_unlock(lock);
	return err;
}
EXPORT_SYMBOL_GPL(vb2_fop_read);

1182
__poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1183 1184 1185 1186
{
	struct video_device *vdev = video_devdata(file);
	struct vb2_queue *q = vdev->queue;
	struct mutex *lock = q->lock ? q->lock : vdev->lock;
1187
	__poll_t res;
1188 1189 1190 1191 1192 1193 1194 1195 1196
	void *fileio;

	/*
	 * If this helper doesn't know how to lock, then you shouldn't be using
	 * it but you should write your own.
	 */
	WARN_ON(!lock);

	if (lock && mutex_lock_interruptible(lock))
1197
		return EPOLLERR;
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236

	fileio = q->fileio;

	res = vb2_poll(vdev->queue, file, wait);

	/* If fileio was started, then we have a new queue owner. */
	if (!fileio && q->fileio)
		q->owner = file->private_data;
	if (lock)
		mutex_unlock(lock);
	return res;
}
EXPORT_SYMBOL_GPL(vb2_fop_poll);

#ifndef CONFIG_MMU
unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
		unsigned long len, unsigned long pgoff, unsigned long flags)
{
	struct video_device *vdev = video_devdata(file);

	return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
}
EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
#endif

/* vb2_ops helpers. Only use if vq->lock is non-NULL. */

void vb2_ops_wait_prepare(struct vb2_queue *vq)
{
	mutex_unlock(vq->lock);
}
EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);

void vb2_ops_wait_finish(struct vb2_queue *vq)
{
	mutex_lock(vq->lock);
}
EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);

1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247
/*
 * Note that this function is called during validation time and
 * thus the req_queue_mutex is held to ensure no request objects
 * can be added or deleted while validating. So there is no need
 * to protect the objects list.
 */
int vb2_request_validate(struct media_request *req)
{
	struct media_request_object *obj;
	int ret = 0;

1248
	if (!vb2_request_buffer_cnt(req))
1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
		return -ENOENT;

	list_for_each_entry(obj, &req->objects, list) {
		if (!obj->ops->prepare)
			continue;

		ret = obj->ops->prepare(obj);
		if (ret)
			break;
	}

	if (ret) {
		list_for_each_entry_continue_reverse(obj, &req->objects, list)
			if (obj->ops->unprepare)
				obj->ops->unprepare(obj);
		return ret;
	}
	return 0;
}
EXPORT_SYMBOL_GPL(vb2_request_validate);

void vb2_request_queue(struct media_request *req)
{
	struct media_request_object *obj, *obj_safe;

	/*
	 * Queue all objects. Note that buffer objects are at the end of the
	 * objects list, after all other object types. Once buffer objects
	 * are queued, the driver might delete them immediately (if the driver
	 * processes the buffer at once), so we have to use
	 * list_for_each_entry_safe() to handle the case where the object we
	 * queue is deleted.
	 */
	list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
		if (obj->ops->queue)
			obj->ops->queue(obj);
}
EXPORT_SYMBOL_GPL(vb2_request_queue);

1288 1289 1290
MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
MODULE_LICENSE("GPL");