Commit 23543b3c authored by Dmitry Osipenko's avatar Dmitry Osipenko

media: videobuf2: Stop using internal dma-buf lock

All drivers that use dma-bufs have been moved to the updated locking
specification and now dma-buf reservation is guaranteed to be locked
by importers during the mapping operations. There is no need to take
the internal dma-buf lock anymore. Remove locking from the videobuf2
memory allocators.
Acked-by: default avatarTomasz Figa <tfiga@chromium.org>
Acked-by: default avatarHans Verkuil <hverkuil-cisco@xs4all.nl>
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221017172229.42269-21-dmitry.osipenko@collabora.com
parent ae2e7f28
...@@ -382,18 +382,12 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( ...@@ -382,18 +382,12 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{ {
struct vb2_dc_attachment *attach = db_attach->priv; struct vb2_dc_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt; struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt; sgt = &attach->sgt;
/* return previously mapped sg table */ /* return previously mapped sg table */
if (attach->dma_dir == dma_dir) { if (attach->dma_dir == dma_dir)
mutex_unlock(lock);
return sgt; return sgt;
}
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
...@@ -409,14 +403,11 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( ...@@ -409,14 +403,11 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
DMA_ATTR_SKIP_CPU_SYNC)) { DMA_ATTR_SKIP_CPU_SYNC)) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
attach->dma_dir = dma_dir; attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt; return sgt;
} }
......
...@@ -424,18 +424,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map( ...@@ -424,18 +424,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{ {
struct vb2_dma_sg_attachment *attach = db_attach->priv; struct vb2_dma_sg_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt; struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt; sgt = &attach->sgt;
/* return previously mapped sg table */ /* return previously mapped sg table */
if (attach->dma_dir == dma_dir) { if (attach->dma_dir == dma_dir)
mutex_unlock(lock);
return sgt; return sgt;
}
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
...@@ -446,14 +440,11 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map( ...@@ -446,14 +440,11 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
/* mapping to the client with new direction */ /* mapping to the client with new direction */
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
attach->dma_dir = dma_dir; attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt; return sgt;
} }
......
...@@ -267,18 +267,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map( ...@@ -267,18 +267,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{ {
struct vb2_vmalloc_attachment *attach = db_attach->priv; struct vb2_vmalloc_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt; struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt; sgt = &attach->sgt;
/* return previously mapped sg table */ /* return previously mapped sg table */
if (attach->dma_dir == dma_dir) { if (attach->dma_dir == dma_dir)
mutex_unlock(lock);
return sgt; return sgt;
}
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
...@@ -289,14 +283,11 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map( ...@@ -289,14 +283,11 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
/* mapping to the client with new direction */ /* mapping to the client with new direction */
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) { if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
attach->dma_dir = dma_dir; attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt; return sgt;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment