Commit d790b7ed authored by Hans Verkuil's avatar Hans Verkuil Committed by Mauro Carvalho Chehab

[media] vb2-dma-sg: move dma_(un)map_sg here

This moves dma_(un)map_sg to the get_userptr/put_userptr and alloc/put
memops of videobuf2-dma-sg.c and adds dma_sync_sg_for_device/cpu to the
prepare/finish memops.

Now that vb2-dma-sg will sync the buffers for you in the prepare/finish
memops we can drop that from the drivers that use dma-sg.

For the solo6x10 driver that was a bit more involved because it needs to
copy JPEG or MPEG headers to the buffer before returning it to userspace,
and that cannot be done in the old place since the buffer there is still
setup for DMA access, not for CPU access. However, the buf_finish
op is the ideal place to do this. By the time buf_finish is called
the buffer is available for CPU access, so copying to the buffer is fine.

[mchehab@osg.samsung.com: Fix a compilation breakage:
 drivers/media/v4l2-core/videobuf2-dma-sg.c:150:19: error: 'struct vb2_dma_sg_buf' has no member named 'dma_sgt']
Signed-off-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Acked-by: default avatarPawel Osciak <pawel@osciak.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@osg.samsung.com>
parent 0c3a14c1
...@@ -1167,11 +1167,8 @@ static void buffer_finish(struct vb2_buffer *vb) ...@@ -1167,11 +1167,8 @@ static void buffer_finish(struct vb2_buffer *vb)
struct cx23885_dev *dev = vb->vb2_queue->drv_priv; struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb); struct cx23885_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
cx23885_free_buffer(dev, buf); cx23885_free_buffer(dev, buf);
dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
} }
static void buffer_queue(struct vb2_buffer *vb) static void buffer_queue(struct vb2_buffer *vb)
......
...@@ -1453,17 +1453,12 @@ int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port) ...@@ -1453,17 +1453,12 @@ int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
struct cx23885_dev *dev = port->dev; struct cx23885_dev *dev = port->dev;
int size = port->ts_packet_size * port->ts_packet_count; int size = port->ts_packet_size * port->ts_packet_count;
struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0); struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
int rc;
dprintk(1, "%s: %p\n", __func__, buf); dprintk(1, "%s: %p\n", __func__, buf);
if (vb2_plane_size(&buf->vb, 0) < size) if (vb2_plane_size(&buf->vb, 0) < size)
return -EINVAL; return -EINVAL;
vb2_set_plane_payload(&buf->vb, 0, size); vb2_set_plane_payload(&buf->vb, 0, size);
rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
if (!rc)
return -EIO;
cx23885_risc_databuffer(dev->pci, &buf->risc, cx23885_risc_databuffer(dev->pci, &buf->risc,
sgt->sgl, sgt->sgl,
port->ts_packet_size, port->ts_packet_count, 0); port->ts_packet_size, port->ts_packet_count, 0);
......
...@@ -123,11 +123,8 @@ static void buffer_finish(struct vb2_buffer *vb) ...@@ -123,11 +123,8 @@ static void buffer_finish(struct vb2_buffer *vb)
struct cx23885_dev *dev = port->dev; struct cx23885_dev *dev = port->dev;
struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb); struct cx23885_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
cx23885_free_buffer(dev, buf); cx23885_free_buffer(dev, buf);
dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
} }
static void buffer_queue(struct vb2_buffer *vb) static void buffer_queue(struct vb2_buffer *vb)
......
...@@ -143,7 +143,6 @@ static int buffer_prepare(struct vb2_buffer *vb) ...@@ -143,7 +143,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
struct cx23885_buffer, vb); struct cx23885_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
unsigned lines = VBI_PAL_LINE_COUNT; unsigned lines = VBI_PAL_LINE_COUNT;
int ret;
if (dev->tvnorm & V4L2_STD_525_60) if (dev->tvnorm & V4L2_STD_525_60)
lines = VBI_NTSC_LINE_COUNT; lines = VBI_NTSC_LINE_COUNT;
...@@ -152,10 +151,6 @@ static int buffer_prepare(struct vb2_buffer *vb) ...@@ -152,10 +151,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL; return -EINVAL;
vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2); vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2);
ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
if (!ret)
return -EIO;
cx23885_risc_vbibuffer(dev->pci, &buf->risc, cx23885_risc_vbibuffer(dev->pci, &buf->risc,
sgt->sgl, sgt->sgl,
0, VBI_LINE_LENGTH * lines, 0, VBI_LINE_LENGTH * lines,
...@@ -166,14 +161,10 @@ static int buffer_prepare(struct vb2_buffer *vb) ...@@ -166,14 +161,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb) static void buffer_finish(struct vb2_buffer *vb)
{ {
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb); struct cx23885_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
} }
/* /*
......
...@@ -335,7 +335,6 @@ static int buffer_prepare(struct vb2_buffer *vb) ...@@ -335,7 +335,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
u32 line0_offset, line1_offset; u32 line0_offset, line1_offset;
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
int field_tff; int field_tff;
int ret;
buf->bpl = (dev->width * dev->fmt->depth) >> 3; buf->bpl = (dev->width * dev->fmt->depth) >> 3;
...@@ -343,10 +342,6 @@ static int buffer_prepare(struct vb2_buffer *vb) ...@@ -343,10 +342,6 @@ static int buffer_prepare(struct vb2_buffer *vb)
return -EINVAL; return -EINVAL;
vb2_set_plane_payload(vb, 0, dev->height * buf->bpl); vb2_set_plane_payload(vb, 0, dev->height * buf->bpl);
ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
if (!ret)
return -EIO;
switch (dev->field) { switch (dev->field) {
case V4L2_FIELD_TOP: case V4L2_FIELD_TOP:
cx23885_risc_buffer(dev->pci, &buf->risc, cx23885_risc_buffer(dev->pci, &buf->risc,
...@@ -414,14 +409,10 @@ static int buffer_prepare(struct vb2_buffer *vb) ...@@ -414,14 +409,10 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_finish(struct vb2_buffer *vb) static void buffer_finish(struct vb2_buffer *vb)
{ {
struct cx23885_dev *dev = vb->vb2_queue->drv_priv;
struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer *buf = container_of(vb,
struct cx23885_buffer, vb); struct cx23885_buffer, vb);
struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0);
cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); cx23885_free_buffer(vb->vb2_queue->drv_priv, buf);
dma_unmap_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
} }
/* /*
......
...@@ -96,7 +96,6 @@ static struct vb2_ops saa7134_empress_qops = { ...@@ -96,7 +96,6 @@ static struct vb2_ops saa7134_empress_qops = {
.queue_setup = saa7134_ts_queue_setup, .queue_setup = saa7134_ts_queue_setup,
.buf_init = saa7134_ts_buffer_init, .buf_init = saa7134_ts_buffer_init,
.buf_prepare = saa7134_ts_buffer_prepare, .buf_prepare = saa7134_ts_buffer_prepare,
.buf_finish = saa7134_ts_buffer_finish,
.buf_queue = saa7134_vb2_buffer_queue, .buf_queue = saa7134_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare, .wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish, .wait_finish = vb2_ops_wait_finish,
......
...@@ -94,7 +94,6 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2) ...@@ -94,7 +94,6 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0); struct sg_table *dma = vb2_dma_sg_plane_desc(vb2, 0);
unsigned int lines, llength, size; unsigned int lines, llength, size;
int ret;
dprintk("buffer_prepare [%p]\n", buf); dprintk("buffer_prepare [%p]\n", buf);
...@@ -108,25 +107,11 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2) ...@@ -108,25 +107,11 @@ int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2)
vb2_set_plane_payload(vb2, 0, size); vb2_set_plane_payload(vb2, 0, size);
vb2->v4l2_buf.field = dev->field; vb2->v4l2_buf.field = dev->field;
ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
if (!ret)
return -EIO;
return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
saa7134_buffer_startpage(buf)); saa7134_buffer_startpage(buf));
} }
EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare); EXPORT_SYMBOL_GPL(saa7134_ts_buffer_prepare);
void saa7134_ts_buffer_finish(struct vb2_buffer *vb2)
{
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
}
EXPORT_SYMBOL_GPL(saa7134_ts_buffer_finish);
int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes, unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[]) unsigned int sizes[], void *alloc_ctxs[])
...@@ -188,7 +173,6 @@ struct vb2_ops saa7134_ts_qops = { ...@@ -188,7 +173,6 @@ struct vb2_ops saa7134_ts_qops = {
.queue_setup = saa7134_ts_queue_setup, .queue_setup = saa7134_ts_queue_setup,
.buf_init = saa7134_ts_buffer_init, .buf_init = saa7134_ts_buffer_init,
.buf_prepare = saa7134_ts_buffer_prepare, .buf_prepare = saa7134_ts_buffer_prepare,
.buf_finish = saa7134_ts_buffer_finish,
.buf_queue = saa7134_vb2_buffer_queue, .buf_queue = saa7134_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare, .wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish, .wait_finish = vb2_ops_wait_finish,
......
...@@ -120,7 +120,6 @@ static int buffer_prepare(struct vb2_buffer *vb2) ...@@ -120,7 +120,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
unsigned int size; unsigned int size;
int ret;
if (dma->sgl->offset) { if (dma->sgl->offset) {
pr_err("The buffer is not page-aligned\n"); pr_err("The buffer is not page-aligned\n");
...@@ -132,9 +131,6 @@ static int buffer_prepare(struct vb2_buffer *vb2) ...@@ -132,9 +131,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
vb2_set_plane_payload(vb2, 0, size); vb2_set_plane_payload(vb2, 0, size);
ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
if (!ret)
return -EIO;
return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
saa7134_buffer_startpage(buf)); saa7134_buffer_startpage(buf));
} }
...@@ -170,21 +166,10 @@ static int buffer_init(struct vb2_buffer *vb2) ...@@ -170,21 +166,10 @@ static int buffer_init(struct vb2_buffer *vb2)
return 0; return 0;
} }
static void buffer_finish(struct vb2_buffer *vb2)
{
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
}
struct vb2_ops saa7134_vbi_qops = { struct vb2_ops saa7134_vbi_qops = {
.queue_setup = queue_setup, .queue_setup = queue_setup,
.buf_init = buffer_init, .buf_init = buffer_init,
.buf_prepare = buffer_prepare, .buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
.buf_queue = saa7134_vb2_buffer_queue, .buf_queue = saa7134_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare, .wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish, .wait_finish = vb2_ops_wait_finish,
......
...@@ -883,7 +883,6 @@ static int buffer_prepare(struct vb2_buffer *vb2) ...@@ -883,7 +883,6 @@ static int buffer_prepare(struct vb2_buffer *vb2)
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2); struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0); struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
unsigned int size; unsigned int size;
int ret;
if (dma->sgl->offset) { if (dma->sgl->offset) {
pr_err("The buffer is not page-aligned\n"); pr_err("The buffer is not page-aligned\n");
...@@ -896,23 +895,10 @@ static int buffer_prepare(struct vb2_buffer *vb2) ...@@ -896,23 +895,10 @@ static int buffer_prepare(struct vb2_buffer *vb2)
vb2_set_plane_payload(vb2, 0, size); vb2_set_plane_payload(vb2, 0, size);
vb2->v4l2_buf.field = dev->field; vb2->v4l2_buf.field = dev->field;
ret = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
if (!ret)
return -EIO;
return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents, return saa7134_pgtable_build(dev->pci, &dmaq->pt, dma->sgl, dma->nents,
saa7134_buffer_startpage(buf)); saa7134_buffer_startpage(buf));
} }
static void buffer_finish(struct vb2_buffer *vb2)
{
struct saa7134_dmaqueue *dmaq = vb2->vb2_queue->drv_priv;
struct saa7134_dev *dev = dmaq->dev;
struct saa7134_buf *buf = container_of(vb2, struct saa7134_buf, vb2);
struct sg_table *dma = vb2_dma_sg_plane_desc(&buf->vb2, 0);
dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
}
static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes, unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[]) unsigned int sizes[], void *alloc_ctxs[])
...@@ -1005,7 +991,6 @@ static struct vb2_ops vb2_qops = { ...@@ -1005,7 +991,6 @@ static struct vb2_ops vb2_qops = {
.queue_setup = queue_setup, .queue_setup = queue_setup,
.buf_init = buffer_init, .buf_init = buffer_init,
.buf_prepare = buffer_prepare, .buf_prepare = buffer_prepare,
.buf_finish = buffer_finish,
.buf_queue = saa7134_vb2_buffer_queue, .buf_queue = saa7134_vb2_buffer_queue,
.wait_prepare = vb2_ops_wait_prepare, .wait_prepare = vb2_ops_wait_prepare,
.wait_finish = vb2_ops_wait_finish, .wait_finish = vb2_ops_wait_finish,
......
...@@ -815,7 +815,6 @@ void saa7134_video_fini(struct saa7134_dev *dev); ...@@ -815,7 +815,6 @@ void saa7134_video_fini(struct saa7134_dev *dev);
int saa7134_ts_buffer_init(struct vb2_buffer *vb2); int saa7134_ts_buffer_init(struct vb2_buffer *vb2);
int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2); int saa7134_ts_buffer_prepare(struct vb2_buffer *vb2);
void saa7134_ts_buffer_finish(struct vb2_buffer *vb2);
int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, int saa7134_ts_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes, unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[]); unsigned int sizes[], void *alloc_ctxs[]);
......
...@@ -463,7 +463,6 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc, ...@@ -463,7 +463,6 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
struct solo_dev *solo_dev = solo_enc->solo_dev; struct solo_dev *solo_dev = solo_enc->solo_dev;
struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
int frame_size; int frame_size;
int ret;
vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME; vb->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
...@@ -473,22 +472,10 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc, ...@@ -473,22 +472,10 @@ static int solo_fill_jpeg(struct solo_enc_dev *solo_enc,
frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN); frame_size = ALIGN(vop_jpeg_size(vh) + solo_enc->jpeg_len, DMA_ALIGN);
vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len); vb2_set_plane_payload(vb, 0, vop_jpeg_size(vh) + solo_enc->jpeg_len);
/* may discard all previous data in vbuf->sgl */ return solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE))
return -ENOMEM;
ret = solo_send_desc(solo_enc, solo_enc->jpeg_len, vbuf,
vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev), vop_jpeg_offset(vh) - SOLO_JPEG_EXT_ADDR(solo_dev),
frame_size, SOLO_JPEG_EXT_ADDR(solo_dev), frame_size, SOLO_JPEG_EXT_ADDR(solo_dev),
SOLO_JPEG_EXT_SIZE(solo_dev)); SOLO_JPEG_EXT_SIZE(solo_dev));
dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
/* add the header only after dma_unmap_sg() */
sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
solo_enc->jpeg_header, solo_enc->jpeg_len);
return ret;
} }
static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
...@@ -498,7 +485,6 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, ...@@ -498,7 +485,6 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0); struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
int frame_off, frame_size; int frame_off, frame_size;
int skip = 0; int skip = 0;
int ret;
if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh)) if (vb2_plane_size(vb, 0) < vop_mpeg_size(vh))
return -EIO; return -EIO;
...@@ -521,21 +507,9 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc, ...@@ -521,21 +507,9 @@ static int solo_fill_mpeg(struct solo_enc_dev *solo_enc,
sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev); sizeof(*vh)) % SOLO_MP4E_EXT_SIZE(solo_dev);
frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN); frame_size = ALIGN(vop_mpeg_size(vh) + skip, DMA_ALIGN);
/* may discard all previous data in vbuf->sgl */ return solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
if (!dma_map_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE))
return -ENOMEM;
ret = solo_send_desc(solo_enc, skip, vbuf, frame_off, frame_size,
SOLO_MP4E_EXT_ADDR(solo_dev), SOLO_MP4E_EXT_ADDR(solo_dev),
SOLO_MP4E_EXT_SIZE(solo_dev)); SOLO_MP4E_EXT_SIZE(solo_dev));
dma_unmap_sg(&solo_dev->pdev->dev, vbuf->sgl, vbuf->nents,
DMA_FROM_DEVICE);
/* add the header only after dma_unmap_sg() */
if (!vop_type(vh))
sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
solo_enc->vop, solo_enc->vop_len);
return ret;
} }
static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc, static int solo_enc_fillbuf(struct solo_enc_dev *solo_enc,
...@@ -793,9 +767,29 @@ static void solo_enc_stop_streaming(struct vb2_queue *q) ...@@ -793,9 +767,29 @@ static void solo_enc_stop_streaming(struct vb2_queue *q)
spin_unlock_irqrestore(&solo_enc->av_lock, flags); spin_unlock_irqrestore(&solo_enc->av_lock, flags);
} }
static void solo_enc_buf_finish(struct vb2_buffer *vb)
{
struct solo_enc_dev *solo_enc = vb2_get_drv_priv(vb->vb2_queue);
struct sg_table *vbuf = vb2_dma_sg_plane_desc(vb, 0);
switch (solo_enc->fmt) {
case V4L2_PIX_FMT_MPEG4:
case V4L2_PIX_FMT_H264:
if (vb->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME)
sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
solo_enc->vop, solo_enc->vop_len);
break;
default: /* V4L2_PIX_FMT_MJPEG */
sg_copy_from_buffer(vbuf->sgl, vbuf->nents,
solo_enc->jpeg_header, solo_enc->jpeg_len);
break;
}
}
static struct vb2_ops solo_enc_video_qops = { static struct vb2_ops solo_enc_video_qops = {
.queue_setup = solo_enc_queue_setup, .queue_setup = solo_enc_queue_setup,
.buf_queue = solo_enc_buf_queue, .buf_queue = solo_enc_buf_queue,
.buf_finish = solo_enc_buf_finish,
.start_streaming = solo_enc_start_streaming, .start_streaming = solo_enc_start_streaming,
.stop_streaming = solo_enc_stop_streaming, .stop_streaming = solo_enc_stop_streaming,
.wait_prepare = vb2_ops_wait_prepare, .wait_prepare = vb2_ops_wait_prepare,
......
...@@ -462,17 +462,12 @@ static int tw68_buf_prepare(struct vb2_buffer *vb) ...@@ -462,17 +462,12 @@ static int tw68_buf_prepare(struct vb2_buffer *vb)
struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0); struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
unsigned size, bpl; unsigned size, bpl;
int rc;
size = (dev->width * dev->height * dev->fmt->depth) >> 3; size = (dev->width * dev->height * dev->fmt->depth) >> 3;
if (vb2_plane_size(vb, 0) < size) if (vb2_plane_size(vb, 0) < size)
return -EINVAL; return -EINVAL;
vb2_set_plane_payload(vb, 0, size); vb2_set_plane_payload(vb, 0, size);
rc = dma_map_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
if (!rc)
return -EIO;
bpl = (dev->width * dev->fmt->depth) >> 3; bpl = (dev->width * dev->fmt->depth) >> 3;
switch (dev->field) { switch (dev->field) {
case V4L2_FIELD_TOP: case V4L2_FIELD_TOP:
...@@ -506,11 +501,8 @@ static void tw68_buf_finish(struct vb2_buffer *vb) ...@@ -506,11 +501,8 @@ static void tw68_buf_finish(struct vb2_buffer *vb)
{ {
struct vb2_queue *vq = vb->vb2_queue; struct vb2_queue *vq = vb->vb2_queue;
struct tw68_dev *dev = vb2_get_drv_priv(vq); struct tw68_dev *dev = vb2_get_drv_priv(vq);
struct sg_table *dma = vb2_dma_sg_plane_desc(vb, 0);
struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb); struct tw68_buf *buf = container_of(vb, struct tw68_buf, vb);
dma_unmap_sg(&dev->pci->dev, dma->sgl, dma->nents, DMA_FROM_DEVICE);
pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma); pci_free_consistent(dev->pci, buf->size, buf->cpu, buf->dma);
} }
......
...@@ -1220,17 +1220,12 @@ static int mcam_vb_sg_buf_init(struct vb2_buffer *vb) ...@@ -1220,17 +1220,12 @@ static int mcam_vb_sg_buf_init(struct vb2_buffer *vb)
static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
{ {
struct mcam_vb_buffer *mvb = vb_to_mvb(vb); struct mcam_vb_buffer *mvb = vb_to_mvb(vb);
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0); struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
struct mcam_dma_desc *desc = mvb->dma_desc; struct mcam_dma_desc *desc = mvb->dma_desc;
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
mvb->dma_desc_nent = dma_map_sg(cam->dev, sg_table->sgl, for_each_sg(sg_table->sgl, sg, sg_table->nents, i) {
sg_table->nents, DMA_FROM_DEVICE);
if (mvb->dma_desc_nent <= 0)
return -EIO; /* Not sure what's right here */
for_each_sg(sg_table->sgl, sg, mvb->dma_desc_nent, i) {
desc->dma_addr = sg_dma_address(sg); desc->dma_addr = sg_dma_address(sg);
desc->segment_len = sg_dma_len(sg); desc->segment_len = sg_dma_len(sg);
desc++; desc++;
...@@ -1238,16 +1233,6 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb) ...@@ -1238,16 +1233,6 @@ static int mcam_vb_sg_buf_prepare(struct vb2_buffer *vb)
return 0; return 0;
} }
static void mcam_vb_sg_buf_finish(struct vb2_buffer *vb)
{
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
struct sg_table *sg_table = vb2_dma_sg_plane_desc(vb, 0);
if (sg_table)
dma_unmap_sg(cam->dev, sg_table->sgl,
sg_table->nents, DMA_FROM_DEVICE);
}
static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb) static void mcam_vb_sg_buf_cleanup(struct vb2_buffer *vb)
{ {
struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue); struct mcam_camera *cam = vb2_get_drv_priv(vb->vb2_queue);
...@@ -1264,7 +1249,6 @@ static const struct vb2_ops mcam_vb2_sg_ops = { ...@@ -1264,7 +1249,6 @@ static const struct vb2_ops mcam_vb2_sg_ops = {
.buf_init = mcam_vb_sg_buf_init, .buf_init = mcam_vb_sg_buf_init,
.buf_prepare = mcam_vb_sg_buf_prepare, .buf_prepare = mcam_vb_sg_buf_prepare,
.buf_queue = mcam_vb_buf_queue, .buf_queue = mcam_vb_buf_queue,
.buf_finish = mcam_vb_sg_buf_finish,
.buf_cleanup = mcam_vb_sg_buf_cleanup, .buf_cleanup = mcam_vb_sg_buf_cleanup,
.start_streaming = mcam_vb_start_streaming, .start_streaming = mcam_vb_start_streaming,
.stop_streaming = mcam_vb_stop_streaming, .stop_streaming = mcam_vb_stop_streaming,
......
...@@ -96,6 +96,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, ...@@ -96,6 +96,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
{ {
struct vb2_dma_sg_conf *conf = alloc_ctx; struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf; struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
int ret; int ret;
int num_pages; int num_pages;
...@@ -128,6 +129,12 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, ...@@ -128,6 +129,12 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
/* Prevent the device from being released while the buffer is used */ /* Prevent the device from being released while the buffer is used */
buf->dev = get_device(conf->dev); buf->dev = get_device(conf->dev);
sgt = &buf->sg_table;
if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
goto fail_map;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
buf->handler.refcount = &buf->refcount; buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put; buf->handler.put = vb2_dma_sg_put;
buf->handler.arg = buf; buf->handler.arg = buf;
...@@ -138,6 +145,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, ...@@ -138,6 +145,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
__func__, buf->num_pages); __func__, buf->num_pages);
return buf; return buf;
fail_map:
put_device(buf->dev);
sg_free_table(sgt);
fail_table_alloc: fail_table_alloc:
num_pages = buf->num_pages; num_pages = buf->num_pages;
while (num_pages--) while (num_pages--)
...@@ -152,11 +162,13 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, ...@@ -152,11 +162,13 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
static void vb2_dma_sg_put(void *buf_priv) static void vb2_dma_sg_put(void *buf_priv)
{ {
struct vb2_dma_sg_buf *buf = buf_priv; struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages; int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) { if (atomic_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages); buf->num_pages);
dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
if (buf->vaddr) if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages); vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(&buf->sg_table); sg_free_table(&buf->sg_table);
...@@ -168,6 +180,22 @@ static void vb2_dma_sg_put(void *buf_priv) ...@@ -168,6 +180,22 @@ static void vb2_dma_sg_put(void *buf_priv)
} }
} }
static void vb2_dma_sg_prepare(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table;
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}
static void vb2_dma_sg_finish(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
}
static inline int vma_is_io(struct vm_area_struct *vma) static inline int vma_is_io(struct vm_area_struct *vma)
{ {
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP)); return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
...@@ -177,16 +205,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -177,16 +205,19 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, unsigned long size,
enum dma_data_direction dma_dir) enum dma_data_direction dma_dir)
{ {
struct vb2_dma_sg_conf *conf = alloc_ctx;
struct vb2_dma_sg_buf *buf; struct vb2_dma_sg_buf *buf;
unsigned long first, last; unsigned long first, last;
int num_pages_from_user; int num_pages_from_user;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct sg_table *sgt;
buf = kzalloc(sizeof *buf, GFP_KERNEL); buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf) if (!buf)
return NULL; return NULL;
buf->vaddr = NULL; buf->vaddr = NULL;
buf->dev = conf->dev;
buf->dma_dir = dma_dir; buf->dma_dir = dma_dir;
buf->offset = vaddr & ~PAGE_MASK; buf->offset = vaddr & ~PAGE_MASK;
buf->size = size; buf->size = size;
...@@ -246,8 +277,14 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -246,8 +277,14 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->num_pages, buf->offset, size, 0)) buf->num_pages, buf->offset, size, 0))
goto userptr_fail_alloc_table_from_pages; goto userptr_fail_alloc_table_from_pages;
sgt = &buf->sg_table;
if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
goto userptr_fail_map;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
return buf; return buf;
userptr_fail_map:
sg_free_table(&buf->sg_table);
userptr_fail_alloc_table_from_pages: userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages: userptr_fail_get_user_pages:
dprintk(1, "get_user_pages requested/got: %d/%d]\n", dprintk(1, "get_user_pages requested/got: %d/%d]\n",
...@@ -270,10 +307,12 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -270,10 +307,12 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
static void vb2_dma_sg_put_userptr(void *buf_priv) static void vb2_dma_sg_put_userptr(void *buf_priv)
{ {
struct vb2_dma_sg_buf *buf = buf_priv; struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages; int i = buf->num_pages;
dprintk(1, "%s: Releasing userspace buffer of %d pages\n", dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages); __func__, buf->num_pages);
dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
if (buf->vaddr) if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages); vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(&buf->sg_table); sg_free_table(&buf->sg_table);
...@@ -360,6 +399,8 @@ const struct vb2_mem_ops vb2_dma_sg_memops = { ...@@ -360,6 +399,8 @@ const struct vb2_mem_ops vb2_dma_sg_memops = {
.put = vb2_dma_sg_put, .put = vb2_dma_sg_put,
.get_userptr = vb2_dma_sg_get_userptr, .get_userptr = vb2_dma_sg_get_userptr,
.put_userptr = vb2_dma_sg_put_userptr, .put_userptr = vb2_dma_sg_put_userptr,
.prepare = vb2_dma_sg_prepare,
.finish = vb2_dma_sg_finish,
.vaddr = vb2_dma_sg_vaddr, .vaddr = vb2_dma_sg_vaddr,
.mmap = vb2_dma_sg_mmap, .mmap = vb2_dma_sg_mmap,
.num_users = vb2_dma_sg_num_users, .num_users = vb2_dma_sg_num_users,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment