Commit 97817fd4 authored by Laurent Pinchart's avatar Laurent Pinchart Committed by Tomi Valkeinen

drm: omapdrm: Map pages for DMA in DMA_TO_DEVICE direction

The display engine only reads from memory, there's no need to use
bidirectional DMA mappings. Use DMA_TO_DEVICE instead.
Signed-off-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: default avatarTomi Valkeinen <tomi.valkeinen@ti.com>
parent 930dc19c
...@@ -254,7 +254,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) ...@@ -254,7 +254,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
addrs[i] = dma_map_page(dev->dev, pages[i], addrs[i] = dma_map_page(dev->dev, pages[i],
0, PAGE_SIZE, DMA_BIDIRECTIONAL); 0, PAGE_SIZE, DMA_TO_DEVICE);
if (dma_mapping_error(dev->dev, addrs[i])) { if (dma_mapping_error(dev->dev, addrs[i])) {
dev_warn(dev->dev, dev_warn(dev->dev,
...@@ -262,7 +262,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj) ...@@ -262,7 +262,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
for (i = i - 1; i >= 0; --i) { for (i = i - 1; i >= 0; --i) {
dma_unmap_page(dev->dev, addrs[i], dma_unmap_page(dev->dev, addrs[i],
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_TO_DEVICE);
} }
ret = -ENOMEM; ret = -ENOMEM;
...@@ -322,7 +322,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj) ...@@ -322,7 +322,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
for (i = 0; i < npages; i++) { for (i = 0; i < npages; i++) {
if (omap_obj->dma_addrs[i]) if (omap_obj->dma_addrs[i])
dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i], dma_unmap_page(obj->dev->dev, omap_obj->dma_addrs[i],
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_TO_DEVICE);
} }
kfree(omap_obj->dma_addrs); kfree(omap_obj->dma_addrs);
...@@ -744,7 +744,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff) ...@@ -744,7 +744,7 @@ void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff)
if (omap_obj->dma_addrs[pgoff]) { if (omap_obj->dma_addrs[pgoff]) {
dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff], dma_unmap_page(dev->dev, omap_obj->dma_addrs[pgoff],
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, DMA_TO_DEVICE);
omap_obj->dma_addrs[pgoff] = 0; omap_obj->dma_addrs[pgoff] = 0;
} }
} }
...@@ -767,8 +767,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj, ...@@ -767,8 +767,7 @@ void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
dma_addr_t addr; dma_addr_t addr;
addr = dma_map_page(dev->dev, pages[i], 0, addr = dma_map_page(dev->dev, pages[i], 0,
PAGE_SIZE, DMA_BIDIRECTIONAL); PAGE_SIZE, dir);
if (dma_mapping_error(dev->dev, addr)) { if (dma_mapping_error(dev->dev, addr)) {
dev_warn(dev->dev, "%s: failed to map page\n", dev_warn(dev->dev, "%s: failed to map page\n",
__func__); __func__);
......
...@@ -210,7 +210,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, ...@@ -210,7 +210,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf); get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
if (IS_ERR(sgt)) { if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt); ret = PTR_ERR(sgt);
goto fail_detach; goto fail_detach;
...@@ -227,7 +227,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, ...@@ -227,7 +227,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
return obj; return obj;
fail_unmap: fail_unmap:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
fail_detach: fail_detach:
dma_buf_detach(dma_buf, attach); dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf); dma_buf_put(dma_buf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment