Commit 86a1ee26 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Only pwrite through the GTT if there is space in the aperture

Avoid stalling and waiting for the GPU by checking to see if there is
sufficient inactive space in the aperture for us to bind the buffer
prior to writing through the GTT. If there is inadequate space we will
have to stall waiting for the GPU, and incur overheads moving objects
about. Instead, only incur the clflush overhead on the target object by
writing through shmem.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 53621860
...@@ -1307,7 +1307,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, ...@@ -1307,7 +1307,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
bool map_and_fenceable); bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj); void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
...@@ -1454,7 +1455,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev, ...@@ -1454,7 +1455,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev,
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, unsigned alignment,
unsigned cache_level, unsigned cache_level,
bool mappable); bool mappable,
bool nonblock);
int i915_gem_evict_everything(struct drm_device *dev); int i915_gem_evict_everything(struct drm_device *dev);
/* i915_gem_stolen.c */ /* i915_gem_stolen.c */
......
...@@ -41,7 +41,8 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o ...@@ -41,7 +41,8 @@ static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *o
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment, unsigned alignment,
bool map_and_fenceable); bool map_and_fenceable,
bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev, static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj, struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
...@@ -609,7 +610,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -609,7 +610,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data; char __user *user_data;
int page_offset, page_length, ret; int page_offset, page_length, ret;
ret = i915_gem_object_pin(obj, 0, true); ret = i915_gem_object_pin(obj, 0, true, true);
if (ret) if (ret)
goto out; goto out;
...@@ -925,10 +926,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -925,10 +926,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out; goto out;
} }
if (obj->gtt_space && if (obj->cache_level == I915_CACHE_NONE &&
obj->cache_level == I915_CACHE_NONE &&
obj->tiling_mode == I915_TILING_NONE && obj->tiling_mode == I915_TILING_NONE &&
obj->map_and_fenceable &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) { obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user /* Note that the gtt paths might fail with non-page-backed user
...@@ -936,7 +935,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -936,7 +935,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* textures). Fallback to the shmem path in that case. */ * textures). Fallback to the shmem path in that case. */
} }
if (ret == -EFAULT) if (ret == -EFAULT || ret == -ENOSPC)
ret = i915_gem_shmem_pwrite(dev, obj, args, file); ret = i915_gem_shmem_pwrite(dev, obj, args, file);
out: out:
...@@ -1115,7 +1114,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1115,7 +1114,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock; goto unlock;
} }
if (!obj->gtt_space) { if (!obj->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true); ret = i915_gem_object_bind_to_gtt(obj, 0, true, false);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -2772,7 +2771,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev) ...@@ -2772,7 +2771,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
static int static int
i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment, unsigned alignment,
bool map_and_fenceable) bool map_and_fenceable,
bool nonblocking)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
...@@ -2848,7 +2848,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, ...@@ -2848,7 +2848,8 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL) { if (obj->gtt_space == NULL) {
ret = i915_gem_evict_something(dev, size, alignment, ret = i915_gem_evict_something(dev, size, alignment,
obj->cache_level, obj->cache_level,
map_and_fenceable); map_and_fenceable,
nonblocking);
if (ret) if (ret)
return ret; return ret;
...@@ -3188,7 +3189,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3188,7 +3189,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* (e.g. libkms for the bootup splash), we have to ensure that we * (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers. * always use map_and_fenceable for all scanout buffers.
*/ */
ret = i915_gem_object_pin(obj, alignment, true); ret = i915_gem_object_pin(obj, alignment, true, false);
if (ret) if (ret)
return ret; return ret;
...@@ -3325,7 +3326,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) ...@@ -3325,7 +3326,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int int
i915_gem_object_pin(struct drm_i915_gem_object *obj, i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
bool map_and_fenceable) bool map_and_fenceable,
bool nonblocking)
{ {
int ret; int ret;
...@@ -3349,7 +3351,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, ...@@ -3349,7 +3351,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
if (obj->gtt_space == NULL) { if (obj->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment, ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable); map_and_fenceable,
nonblocking);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -3407,7 +3410,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -3407,7 +3410,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count++; obj->user_pin_count++;
obj->pin_filp = file; obj->pin_filp = file;
if (obj->user_pin_count == 1) { if (obj->user_pin_count == 1) {
ret = i915_gem_object_pin(obj, args->alignment, true); ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret) if (ret)
goto out; goto out;
} }
......
...@@ -221,7 +221,7 @@ static int create_default_context(struct drm_i915_private *dev_priv) ...@@ -221,7 +221,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context. * default context.
*/ */
dev_priv->ring[RCS].default_context = ctx; dev_priv->ring[RCS].default_context = ctx;
ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false); ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) if (ret)
goto err_destroy; goto err_destroy;
...@@ -374,7 +374,7 @@ static int do_switch(struct i915_hw_context *to) ...@@ -374,7 +374,7 @@ static int do_switch(struct i915_hw_context *to)
if (from_obj == to->obj) if (from_obj == to->obj)
return 0; return 0;
ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false); ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret) if (ret)
return ret; return ret;
......
...@@ -45,7 +45,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) ...@@ -45,7 +45,7 @@ mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
int int
i915_gem_evict_something(struct drm_device *dev, int min_size, i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment, unsigned cache_level, unsigned alignment, unsigned cache_level,
bool mappable) bool mappable, bool nonblocking)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list; struct list_head eviction_list, unwind_list;
...@@ -92,12 +92,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -92,12 +92,16 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto found; goto found;
} }
if (nonblocking)
goto none;
/* Now merge in the soon-to-be-expired objects... */ /* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (mark_free(obj, &unwind_list)) if (mark_free(obj, &unwind_list))
goto found; goto found;
} }
none:
/* Nothing found, clean up and bail out! */ /* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) { while (!list_empty(&unwind_list)) {
obj = list_first_entry(&unwind_list, obj = list_first_entry(&unwind_list,
......
...@@ -354,7 +354,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj, ...@@ -354,7 +354,7 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj); need_mappable = need_fence || need_reloc_mappable(obj);
ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
if (ret) if (ret)
return ret; return ret;
......
...@@ -1383,7 +1383,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1383,7 +1383,7 @@ void intel_setup_overlay(struct drm_device *dev)
} }
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else { } else {
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) { if (ret) {
DRM_ERROR("failed to pin overlay register bo\n"); DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo; goto out_free_bo;
......
...@@ -2138,7 +2138,7 @@ intel_alloc_context_page(struct drm_device *dev) ...@@ -2138,7 +2138,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL; return NULL;
} }
ret = i915_gem_object_pin(ctx, 4096, true); ret = i915_gem_object_pin(ctx, 4096, true, false);
if (ret) { if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret); DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref; goto err_unref;
......
...@@ -391,7 +391,7 @@ init_pipe_control(struct intel_ring_buffer *ring) ...@@ -391,7 +391,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true); ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret) if (ret)
goto err_unref; goto err_unref;
...@@ -979,7 +979,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -979,7 +979,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true); ret = i915_gem_object_pin(obj, 4096, true, false);
if (ret != 0) { if (ret != 0) {
goto err_unref; goto err_unref;
} }
...@@ -1036,7 +1036,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ...@@ -1036,7 +1036,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj; ring->obj = obj;
ret = i915_gem_object_pin(obj, PAGE_SIZE, true); ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
if (ret) if (ret)
goto err_unref; goto err_unref;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment