Commit a7b362fb authored by Inki Dae's avatar Inki Dae Committed by Inki Dae

drm/exynos: add dmabuf attach/detach callbacks.

With this patch, When dma_buf_unmap_attachment is called,
the pages of sgt aren't unmapped from iommu table.
Instead, when dma_buf_detach is called, that would be done.

And also removes exynos_get_sgt function used to get clone sgt
and uses attachment's sgt instead. This patch would resolve
performance deterioration issue when v4l2-based driver is using
the buffer imported from gem.

This change is derived from videobuf2-dma-contig.c
Signed-off-by: default avatarInki Dae <inki.dae@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
parent ae9dace2
...@@ -30,63 +30,107 @@ ...@@ -30,63 +30,107 @@
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev, struct exynos_drm_dmabuf_attachment {
struct exynos_drm_gem_buf *buf) struct sg_table sgt;
enum dma_data_direction dir;
};
static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
struct device *dev,
struct dma_buf_attachment *attach)
{ {
struct sg_table *sgt = NULL; struct exynos_drm_dmabuf_attachment *exynos_attach;
int ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
if (!sgt) if (!exynos_attach)
goto out; return -ENOMEM;
ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr, exynos_attach->dir = DMA_NONE;
buf->dma_addr, buf->size); attach->priv = exynos_attach;
if (ret < 0) {
DRM_ERROR("failed to get sgtable.\n");
goto err_free_sgt;
}
return sgt; return 0;
}
err_free_sgt: static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
kfree(sgt); struct dma_buf_attachment *attach)
sgt = NULL; {
out: struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
return NULL; struct sg_table *sgt;
if (!exynos_attach)
return;
sgt = &exynos_attach->sgt;
if (exynos_attach->dir != DMA_NONE)
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
exynos_attach->dir);
sg_free_table(sgt);
kfree(exynos_attach);
attach->priv = NULL;
} }
static struct sg_table * static struct sg_table *
exynos_gem_map_dma_buf(struct dma_buf_attachment *attach, exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv; struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
struct drm_device *dev = gem_obj->base.dev; struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf; struct exynos_drm_gem_buf *buf;
struct scatterlist *rd, *wr;
struct sg_table *sgt = NULL; struct sg_table *sgt = NULL;
int nents; unsigned int i;
int nents, ret;
DRM_DEBUG_PRIME("%s\n", __FILE__); DRM_DEBUG_PRIME("%s\n", __FILE__);
if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
/* just return current sgt if already requested. */
if (exynos_attach->dir == dir)
return &exynos_attach->sgt;
/* reattaching is not allowed. */
if (WARN_ON(exynos_attach->dir != DMA_NONE))
return ERR_PTR(-EBUSY);
buf = gem_obj->buffer; buf = gem_obj->buffer;
if (!buf) { if (!buf) {
DRM_ERROR("buffer is null.\n"); DRM_ERROR("buffer is null.\n");
return sgt; return ERR_PTR(-ENOMEM);
}
sgt = &exynos_attach->sgt;
ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
if (ret) {
DRM_ERROR("failed to alloc sgt.\n");
return ERR_PTR(-ENOMEM);
} }
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
sgt = exynos_get_sgt(dev, buf); rd = buf->sgt->sgl;
if (!sgt) wr = sgt->sgl;
goto err_unlock; for (i = 0; i < sgt->orig_nents; ++i) {
sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
rd = sg_next(rd);
wr = sg_next(wr);
}
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir); nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
if (!nents) { if (!nents) {
DRM_ERROR("failed to map sgl with iommu.\n"); DRM_ERROR("failed to map sgl with iommu.\n");
sgt = NULL; sgt = ERR_PTR(-EIO);
goto err_unlock; goto err_unlock;
} }
exynos_attach->dir = dir;
attach->priv = exynos_attach;
DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size); DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
err_unlock: err_unlock:
...@@ -98,11 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach, ...@@ -98,11 +142,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt, struct sg_table *sgt,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); /* Nothing to do. */
sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
} }
static void exynos_dmabuf_release(struct dma_buf *dmabuf) static void exynos_dmabuf_release(struct dma_buf *dmabuf)
...@@ -164,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, ...@@ -164,6 +204,8 @@ static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
} }
static struct dma_buf_ops exynos_dmabuf_ops = { static struct dma_buf_ops exynos_dmabuf_ops = {
.attach = exynos_gem_attach_dma_buf,
.detach = exynos_gem_detach_dma_buf,
.map_dma_buf = exynos_gem_map_dma_buf, .map_dma_buf = exynos_gem_map_dma_buf,
.unmap_dma_buf = exynos_gem_unmap_dma_buf, .unmap_dma_buf = exynos_gem_unmap_dma_buf,
.kmap = exynos_gem_dmabuf_kmap, .kmap = exynos_gem_dmabuf_kmap,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment