Commit 9764951e authored by Chris Wilson's avatar Chris Wilson

drm/i915: Pin the pages first in shmem prepare read/write

There is an improbable, but not impossible, case that if we leave the
pages unpin as we operate on the object, then somebody via the shrinker
may steal the lock (which lock? right now, it is struct_mutex, THE lock)
and change the cache domains after we have already inspected them.

(Whilst here, avail ourselves of the opportunity to take a couple of
steps to make the two functions look more similar.)
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-11-chris@chris-wilson.co.uk
parent 3b5724d7
...@@ -622,6 +622,12 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, ...@@ -622,6 +622,12 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
i915_gem_object_flush_gtt_write_domain(obj); i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu read domain, set ourself into the gtt /* If we're not in the cpu read domain, set ourself into the gtt
...@@ -633,22 +639,20 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, ...@@ -633,22 +639,20 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
*needs_clflush = !cpu_cache_is_coherent(obj->base.dev, *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level); obj->cache_level);
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, false); ret = i915_gem_object_set_to_cpu_domain(obj, false);
if (ret) { if (ret)
i915_gem_object_unpin_pages(obj); goto err_unpin;
return ret;
}
*needs_clflush = 0; *needs_clflush = 0;
} }
/* return with the pages pinned */
return 0; return 0;
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
} }
int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
...@@ -664,6 +668,12 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, ...@@ -664,6 +668,12 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
if (ret) if (ret)
return ret; return ret;
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
i915_gem_object_flush_gtt_write_domain(obj); i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu write domain, set ourself into the /* If we're not in the cpu write domain, set ourself into the
...@@ -681,18 +691,11 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, ...@@ -681,18 +691,11 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev, *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
obj->cache_level); obj->cache_level);
ret = i915_gem_object_get_pages(obj);
if (ret)
return ret;
i915_gem_object_pin_pages(obj);
if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) { if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
ret = i915_gem_object_set_to_cpu_domain(obj, true); ret = i915_gem_object_set_to_cpu_domain(obj, true);
if (ret) { if (ret)
i915_gem_object_unpin_pages(obj); goto err_unpin;
return ret;
}
*needs_clflush = 0; *needs_clflush = 0;
} }
...@@ -701,7 +704,12 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj, ...@@ -701,7 +704,12 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
intel_fb_obj_invalidate(obj, ORIGIN_CPU); intel_fb_obj_invalidate(obj, ORIGIN_CPU);
obj->dirty = 1; obj->dirty = 1;
/* return with the pages pinned */
return 0; return 0;
err_unpin:
i915_gem_object_unpin_pages(obj);
return ret;
} }
/* Per-page copy function for the shmem pread fastpath. /* Per-page copy function for the shmem pread fastpath.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment