Commit f2d8e15b authored by Dmitry Osipenko's avatar Dmitry Osipenko

drm/i915: Prepare to dynamic dma-buf locking specification

Prepare i915 driver to the common dynamic dma-buf locking convention
by starting to use the unlocked versions of dma-buf API functions
and handling cases where importer now holds the reservation lock.
Acked-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarDmitry Osipenko <dmitry.osipenko@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221017172229.42269-7-dmitry.osipenko@collabora.com
parent e4ea5428
...@@ -72,7 +72,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, ...@@ -72,7 +72,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
void *vaddr; void *vaddr;
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr)) if (IS_ERR(vaddr))
return PTR_ERR(vaddr); return PTR_ERR(vaddr);
......
...@@ -290,7 +290,21 @@ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj) ...@@ -290,7 +290,21 @@ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
__i915_gem_object_free_mmaps(obj); __i915_gem_object_free_mmaps(obj);
atomic_set(&obj->mm.pages_pin_count, 0); atomic_set(&obj->mm.pages_pin_count, 0);
/*
* dma_buf_unmap_attachment() requires reservation to be
* locked. The imported GEM shouldn't share reservation lock
* and ttm_bo_cleanup_memtype_use() shouldn't be invoked for
* dma-buf, so it's safe to take the lock.
*/
if (obj->base.import_attach)
i915_gem_object_lock(obj, NULL);
__i915_gem_object_put_pages(obj); __i915_gem_object_put_pages(obj);
if (obj->base.import_attach)
i915_gem_object_unlock(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj)); GEM_BUG_ON(i915_gem_object_has_pages(obj));
} }
......
...@@ -213,7 +213,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, ...@@ -213,7 +213,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_import; goto out_import;
} }
st = dma_buf_map_attachment(import_attach, DMA_BIDIRECTIONAL); st = dma_buf_map_attachment_unlocked(import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(st)) { if (IS_ERR(st)) {
err = PTR_ERR(st); err = PTR_ERR(st);
goto out_detach; goto out_detach;
...@@ -226,7 +226,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, ...@@ -226,7 +226,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
timeout = -ETIME; timeout = -ETIME;
} }
err = timeout > 0 ? 0 : timeout; err = timeout > 0 ? 0 : timeout;
dma_buf_unmap_attachment(import_attach, st, DMA_BIDIRECTIONAL); dma_buf_unmap_attachment_unlocked(import_attach, st, DMA_BIDIRECTIONAL);
out_detach: out_detach:
dma_buf_detach(dmabuf, import_attach); dma_buf_detach(dmabuf, import_attach);
out_import: out_import:
...@@ -296,7 +296,7 @@ static int igt_dmabuf_import(void *arg) ...@@ -296,7 +296,7 @@ static int igt_dmabuf_import(void *arg)
goto out_obj; goto out_obj;
} }
err = dma_buf_vmap(dmabuf, &map); err = dma_buf_vmap_unlocked(dmabuf, &map);
dma_map = err ? NULL : map.vaddr; dma_map = err ? NULL : map.vaddr;
if (!dma_map) { if (!dma_map) {
pr_err("dma_buf_vmap failed\n"); pr_err("dma_buf_vmap failed\n");
...@@ -337,7 +337,7 @@ static int igt_dmabuf_import(void *arg) ...@@ -337,7 +337,7 @@ static int igt_dmabuf_import(void *arg)
err = 0; err = 0;
out_dma_map: out_dma_map:
dma_buf_vunmap(dmabuf, &map); dma_buf_vunmap_unlocked(dmabuf, &map);
out_obj: out_obj:
i915_gem_object_put(obj); i915_gem_object_put(obj);
out_dmabuf: out_dmabuf:
...@@ -358,7 +358,7 @@ static int igt_dmabuf_import_ownership(void *arg) ...@@ -358,7 +358,7 @@ static int igt_dmabuf_import_ownership(void *arg)
if (IS_ERR(dmabuf)) if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf); return PTR_ERR(dmabuf);
err = dma_buf_vmap(dmabuf, &map); err = dma_buf_vmap_unlocked(dmabuf, &map);
ptr = err ? NULL : map.vaddr; ptr = err ? NULL : map.vaddr;
if (!ptr) { if (!ptr) {
pr_err("dma_buf_vmap failed\n"); pr_err("dma_buf_vmap failed\n");
...@@ -367,7 +367,7 @@ static int igt_dmabuf_import_ownership(void *arg) ...@@ -367,7 +367,7 @@ static int igt_dmabuf_import_ownership(void *arg)
} }
memset(ptr, 0xc5, PAGE_SIZE); memset(ptr, 0xc5, PAGE_SIZE);
dma_buf_vunmap(dmabuf, &map); dma_buf_vunmap_unlocked(dmabuf, &map);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf)); obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
...@@ -418,7 +418,7 @@ static int igt_dmabuf_export_vmap(void *arg) ...@@ -418,7 +418,7 @@ static int igt_dmabuf_export_vmap(void *arg)
} }
i915_gem_object_put(obj); i915_gem_object_put(obj);
err = dma_buf_vmap(dmabuf, &map); err = dma_buf_vmap_unlocked(dmabuf, &map);
ptr = err ? NULL : map.vaddr; ptr = err ? NULL : map.vaddr;
if (!ptr) { if (!ptr) {
pr_err("dma_buf_vmap failed\n"); pr_err("dma_buf_vmap failed\n");
...@@ -435,7 +435,7 @@ static int igt_dmabuf_export_vmap(void *arg) ...@@ -435,7 +435,7 @@ static int igt_dmabuf_export_vmap(void *arg)
memset(ptr, 0xc5, dmabuf->size); memset(ptr, 0xc5, dmabuf->size);
err = 0; err = 0;
dma_buf_vunmap(dmabuf, &map); dma_buf_vunmap_unlocked(dmabuf, &map);
out: out:
dma_buf_put(dmabuf); dma_buf_put(dmabuf);
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment