Commit fe3db79b authored by Chris Wilson's avatar Chris Wilson Committed by Joonas Lahtinen

drm/i915: Propagate error from drm_gem_object_init()

Propagate the real error from drm_gem_object_init(). Note this also
fixes some confusion in the error return from i915_gem_alloc_object...

v2:
(Matthew Auld)
  - updated new users of gem_alloc_object from latest drm-nightly
  - replaced occurrences of IS_ERR_OR_NULL() with IS_ERR()
v3:
(Joonas Lahtinen)
  - fix double "From:" in commit message
  - add goto teardown path
v4:
(Matthew Auld)
  - rebase with i915_gem_alloc_object name change
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1461587533-8841-1-git-send-email-matthew.auld@intel.com
[Joonas: Removed spurious " = NULL" from _init() function]
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
parent bcbdb6d0
...@@ -382,8 +382,8 @@ i915_gem_create(struct drm_file *file, ...@@ -382,8 +382,8 @@ i915_gem_create(struct drm_file *file,
/* Allocate the new object */ /* Allocate the new object */
obj = i915_gem_object_create(dev, size); obj = i915_gem_object_create(dev, size);
if (obj == NULL) if (IS_ERR(obj))
return -ENOMEM; return PTR_ERR(obj);
ret = drm_gem_handle_create(file, &obj->base, &handle); ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */ /* drop reference from allocate - handle holds it now */
...@@ -4501,15 +4501,15 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, ...@@ -4501,15 +4501,15 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct address_space *mapping; struct address_space *mapping;
gfp_t mask; gfp_t mask;
int ret;
obj = i915_gem_object_alloc(dev); obj = i915_gem_object_alloc(dev);
if (obj == NULL) if (obj == NULL)
return NULL; return ERR_PTR(-ENOMEM);
if (drm_gem_object_init(dev, &obj->base, size) != 0) { ret = drm_gem_object_init(dev, &obj->base, size);
i915_gem_object_free(obj); if (ret)
return NULL; goto fail;
}
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
...@@ -4546,6 +4546,11 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, ...@@ -4546,6 +4546,11 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
trace_i915_gem_object_create(obj); trace_i915_gem_object_create(obj);
return obj; return obj;
fail:
i915_gem_object_free(obj);
return ERR_PTR(ret);
} }
static bool discard_backing_storage(struct drm_i915_gem_object *obj) static bool discard_backing_storage(struct drm_i915_gem_object *obj)
...@@ -5351,7 +5356,7 @@ i915_gem_object_create_from_data(struct drm_device *dev, ...@@ -5351,7 +5356,7 @@ i915_gem_object_create_from_data(struct drm_device *dev,
int ret; int ret;
obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE)); obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
if (IS_ERR_OR_NULL(obj)) if (IS_ERR(obj))
return obj; return obj;
ret = i915_gem_object_set_to_cpu_domain(obj, true); ret = i915_gem_object_set_to_cpu_domain(obj, true);
......
...@@ -135,8 +135,8 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -135,8 +135,8 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
int ret; int ret;
obj = i915_gem_object_create(pool->dev, size); obj = i915_gem_object_create(pool->dev, size);
if (obj == NULL) if (IS_ERR(obj))
return ERR_PTR(-ENOMEM); return obj;
ret = i915_gem_object_get_pages(obj); ret = i915_gem_object_get_pages(obj);
if (ret) if (ret)
......
...@@ -179,8 +179,8 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size) ...@@ -179,8 +179,8 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
int ret; int ret;
obj = i915_gem_object_create(dev, size); obj = i915_gem_object_create(dev, size);
if (obj == NULL) if (IS_ERR(obj))
return ERR_PTR(-ENOMEM); return obj;
/* /*
* Try to make the context utilize L3 as well as LLC. * Try to make the context utilize L3 as well as LLC.
......
...@@ -58,8 +58,8 @@ static int render_state_init(struct render_state *so, struct drm_device *dev) ...@@ -58,8 +58,8 @@ static int render_state_init(struct render_state *so, struct drm_device *dev)
return -EINVAL; return -EINVAL;
so->obj = i915_gem_object_create(dev, 4096); so->obj = i915_gem_object_create(dev, 4096);
if (so->obj == NULL) if (IS_ERR(so->obj))
return -ENOMEM; return PTR_ERR(so->obj);
ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
if (ret) if (ret)
......
...@@ -588,7 +588,7 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, ...@@ -588,7 +588,7 @@ static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
obj = i915_gem_object_create(dev, size); obj = i915_gem_object_create(dev, size);
if (!obj) if (IS_ERR(obj))
return NULL; return NULL;
if (i915_gem_object_get_pages(obj)) { if (i915_gem_object_get_pages(obj)) {
......
...@@ -10321,8 +10321,8 @@ intel_framebuffer_create_for_mode(struct drm_device *dev, ...@@ -10321,8 +10321,8 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
obj = i915_gem_object_create(dev, obj = i915_gem_object_create(dev,
intel_framebuffer_size_for_mode(mode, bpp)); intel_framebuffer_size_for_mode(mode, bpp));
if (obj == NULL) if (IS_ERR(obj))
return ERR_PTR(-ENOMEM); return ERR_CAST(obj);
mode_cmd.width = mode->hdisplay; mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay; mode_cmd.height = mode->vdisplay;
......
...@@ -151,9 +151,9 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -151,9 +151,9 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
obj = i915_gem_object_create_stolen(dev, size); obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL) if (obj == NULL)
obj = i915_gem_object_create(dev, size); obj = i915_gem_object_create(dev, size);
if (!obj) { if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n"); DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM; ret = PTR_ERR(obj);
goto out; goto out;
} }
......
...@@ -1474,9 +1474,11 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) ...@@ -1474,9 +1474,11 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
engine->wa_ctx.obj = i915_gem_object_create(engine->dev, engine->wa_ctx.obj = i915_gem_object_create(engine->dev,
PAGE_ALIGN(size)); PAGE_ALIGN(size));
if (!engine->wa_ctx.obj) { if (IS_ERR(engine->wa_ctx.obj)) {
DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
return -ENOMEM; ret = PTR_ERR(engine->wa_ctx.obj);
engine->wa_ctx.obj = NULL;
return ret;
} }
ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
...@@ -2666,9 +2668,9 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ...@@ -2666,9 +2668,9 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
context_size += PAGE_SIZE * LRC_PPHWSP_PN; context_size += PAGE_SIZE * LRC_PPHWSP_PN;
ctx_obj = i915_gem_object_create(dev, context_size); ctx_obj = i915_gem_object_create(dev, context_size);
if (!ctx_obj) { if (IS_ERR(ctx_obj)) {
DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
return -ENOMEM; return PTR_ERR(ctx_obj);
} }
ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE);
......
...@@ -1397,7 +1397,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1397,7 +1397,7 @@ void intel_setup_overlay(struct drm_device *dev)
reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
if (reg_bo == NULL) if (reg_bo == NULL)
reg_bo = i915_gem_object_create(dev, PAGE_SIZE); reg_bo = i915_gem_object_create(dev, PAGE_SIZE);
if (reg_bo == NULL) if (IS_ERR(reg_bo))
goto out_free; goto out_free;
overlay->reg_bo = reg_bo; overlay->reg_bo = reg_bo;
......
...@@ -672,9 +672,10 @@ intel_init_pipe_control(struct intel_engine_cs *engine) ...@@ -672,9 +672,10 @@ intel_init_pipe_control(struct intel_engine_cs *engine)
WARN_ON(engine->scratch.obj); WARN_ON(engine->scratch.obj);
engine->scratch.obj = i915_gem_object_create(engine->dev, 4096); engine->scratch.obj = i915_gem_object_create(engine->dev, 4096);
if (engine->scratch.obj == NULL) { if (IS_ERR(engine->scratch.obj)) {
DRM_ERROR("Failed to allocate seqno page\n"); DRM_ERROR("Failed to allocate seqno page\n");
ret = -ENOMEM; ret = PTR_ERR(engine->scratch.obj);
engine->scratch.obj = NULL;
goto err; goto err;
} }
...@@ -2033,9 +2034,9 @@ static int init_status_page(struct intel_engine_cs *engine) ...@@ -2033,9 +2034,9 @@ static int init_status_page(struct intel_engine_cs *engine)
int ret; int ret;
obj = i915_gem_object_create(engine->dev, 4096); obj = i915_gem_object_create(engine->dev, 4096);
if (obj == NULL) { if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate status page\n"); DRM_ERROR("Failed to allocate status page\n");
return -ENOMEM; return PTR_ERR(obj);
} }
ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
...@@ -2174,8 +2175,8 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev, ...@@ -2174,8 +2175,8 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
obj = i915_gem_object_create_stolen(dev, ringbuf->size); obj = i915_gem_object_create_stolen(dev, ringbuf->size);
if (obj == NULL) if (obj == NULL)
obj = i915_gem_object_create(dev, ringbuf->size); obj = i915_gem_object_create(dev, ringbuf->size);
if (obj == NULL) if (IS_ERR(obj))
return -ENOMEM; return PTR_ERR(obj);
/* mark ring buffers as read-only from GPU side by default */ /* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1; obj->gt_ro = 1;
...@@ -2787,7 +2788,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -2787,7 +2788,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
if (INTEL_INFO(dev)->gen >= 8) { if (INTEL_INFO(dev)->gen >= 8) {
if (i915_semaphore_is_enabled(dev)) { if (i915_semaphore_is_enabled(dev)) {
obj = i915_gem_object_create(dev, 4096); obj = i915_gem_object_create(dev, 4096);
if (obj == NULL) { if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n"); DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
i915.semaphores = 0; i915.semaphores = 0;
} else { } else {
...@@ -2896,9 +2897,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ...@@ -2896,9 +2897,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
/* Workaround batchbuffer to combat CS tlb bug. */ /* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) { if (HAS_BROKEN_CS_TLB(dev)) {
obj = i915_gem_object_create(dev, I830_WA_SIZE); obj = i915_gem_object_create(dev, I830_WA_SIZE);
if (obj == NULL) { if (IS_ERR(obj)) {
DRM_ERROR("Failed to allocate batch bo\n"); DRM_ERROR("Failed to allocate batch bo\n");
return -ENOMEM; return PTR_ERR(obj);
} }
ret = i915_gem_obj_ggtt_pin(obj, 0, 0); ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment