Commit 26827088 authored by Dave Gordon's avatar Dave Gordon Committed by Daniel Vetter

drm/i915: simplify allocation of driver-internal requests

There are a number of places where the driver needs a request, but isn't
working on behalf of any specific user or in a specific context. At
present, we associate them with the per-engine default context. A future
patch will abolish those per-engine context pointers; but we can already
eliminate a lot of the references to them, just by making the allocator
allow NULL as a shorthand for "an appropriate context for this ring",
which will mean that the callers don't need to know anything about how
the "appropriate context" is found (e.g. per-ring vs per-device, etc).

So this patch renames the existing i915_gem_request_alloc(), and makes
it local (static inline), and replaces it with a wrapper that provides
a default if the context is NULL, and also has a nicer calling
convention (doesn't require a pointer to an output parameter). Then we
change all callers to use the new convention:
OLD:
	err = i915_gem_request_alloc(ring, user_ctx, &req);
	if (err) ...
NEW:
	req = i915_gem_request_alloc(ring, user_ctx);
	if (IS_ERR(req)) ...
OLD:
	err = i915_gem_request_alloc(ring, ring->default_context, &req);
	if (err) ...
NEW:
	req = i915_gem_request_alloc(ring, NULL);
	if (IS_ERR(req)) ...

v4:	Rebased
Signed-off-by: default avatarDave Gordon <david.s.gordon@intel.com>
Reviewed-by: default avatarNick Hoath <nicholas.hoath@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1453230175-19330-2-git-send-email-david.s.gordon@intel.comSigned-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent e7941294
...@@ -2268,9 +2268,9 @@ struct drm_i915_gem_request { ...@@ -2268,9 +2268,9 @@ struct drm_i915_gem_request {
}; };
int i915_gem_request_alloc(struct intel_engine_cs *ring, struct drm_i915_gem_request * __must_check
struct intel_context *ctx, i915_gem_request_alloc(struct intel_engine_cs *engine,
struct drm_i915_gem_request **req_out); struct intel_context *ctx);
void i915_gem_request_cancel(struct drm_i915_gem_request *req); void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref); void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
......
...@@ -2690,9 +2690,10 @@ void i915_gem_request_free(struct kref *req_ref) ...@@ -2690,9 +2690,10 @@ void i915_gem_request_free(struct kref *req_ref)
kmem_cache_free(req->i915->requests, req); kmem_cache_free(req->i915->requests, req);
} }
int i915_gem_request_alloc(struct intel_engine_cs *ring, static inline int
struct intel_context *ctx, __i915_gem_request_alloc(struct intel_engine_cs *ring,
struct drm_i915_gem_request **req_out) struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{ {
struct drm_i915_private *dev_priv = to_i915(ring->dev); struct drm_i915_private *dev_priv = to_i915(ring->dev);
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
...@@ -2755,6 +2756,31 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring, ...@@ -2755,6 +2756,31 @@ int i915_gem_request_alloc(struct intel_engine_cs *ring,
return ret; return ret;
} }
/**
* i915_gem_request_alloc - allocate a request structure
*
* @engine: engine that we wish to issue the request on.
* @ctx: context that the request will be associated with.
* This can be NULL if the request is not directly related to
* any specific user context, in which case this function will
* choose an appropriate context to use.
*
* Returns a pointer to the allocated request if successful,
* or an error code if not.
*/
struct drm_i915_gem_request *
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx)
{
struct drm_i915_gem_request *req;
int err;
if (ctx == NULL)
ctx = engine->default_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
void i915_gem_request_cancel(struct drm_i915_gem_request *req) void i915_gem_request_cancel(struct drm_i915_gem_request *req)
{ {
intel_ring_reserved_space_cancel(req->ringbuf); intel_ring_reserved_space_cancel(req->ringbuf);
...@@ -3172,9 +3198,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, ...@@ -3172,9 +3198,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
return 0; return 0;
if (*to_req == NULL) { if (*to_req == NULL) {
ret = i915_gem_request_alloc(to, to->default_context, to_req); struct drm_i915_gem_request *req;
if (ret)
return ret; req = i915_gem_request_alloc(to, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
*to_req = req;
} }
trace_i915_gem_ring_sync_to(*to_req, from, from_req); trace_i915_gem_ring_sync_to(*to_req, from, from_req);
...@@ -3374,9 +3404,9 @@ int i915_gpu_idle(struct drm_device *dev) ...@@ -3374,9 +3404,9 @@ int i915_gpu_idle(struct drm_device *dev)
if (!i915.enable_execlists) { if (!i915.enable_execlists) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
ret = i915_gem_request_alloc(ring, ring->default_context, &req); req = i915_gem_request_alloc(ring, NULL);
if (ret) if (IS_ERR(req))
return ret; return PTR_ERR(req);
ret = i915_switch_context(req); ret = i915_switch_context(req);
if (ret) { if (ret) {
...@@ -4871,10 +4901,9 @@ i915_gem_init_hw(struct drm_device *dev) ...@@ -4871,10 +4901,9 @@ i915_gem_init_hw(struct drm_device *dev)
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
WARN_ON(!ring->default_context); req = i915_gem_request_alloc(ring, NULL);
if (IS_ERR(req)) {
ret = i915_gem_request_alloc(ring, ring->default_context, &req); ret = PTR_ERR(req);
if (ret) {
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
goto out; goto out;
} }
......
...@@ -1381,6 +1381,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1381,6 +1381,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec) struct drm_i915_gem_exec_object2 *exec)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb; struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
struct drm_i915_gem_exec_object2 shadow_exec_entry; struct drm_i915_gem_exec_object2 shadow_exec_entry;
...@@ -1602,11 +1603,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1602,11 +1603,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
/* Allocate a request for this batch buffer nice and early. */ /* Allocate a request for this batch buffer nice and early. */
ret = i915_gem_request_alloc(ring, ctx, &params->request); req = i915_gem_request_alloc(ring, ctx);
if (ret) if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto err_batch_unpin; goto err_batch_unpin;
}
ret = i915_gem_request_add_to_client(params->request, file); ret = i915_gem_request_add_to_client(req, file);
if (ret) if (ret)
goto err_batch_unpin; goto err_batch_unpin;
...@@ -1622,6 +1625,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1622,6 +1625,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->dispatch_flags = dispatch_flags; params->dispatch_flags = dispatch_flags;
params->batch_obj = batch_obj; params->batch_obj = batch_obj;
params->ctx = ctx; params->ctx = ctx;
params->request = req;
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas); ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
...@@ -1645,8 +1649,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1645,8 +1649,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
* must be freed again. If it was submitted then it is being tracked * must be freed again. If it was submitted then it is being tracked
* on the active request list and no clean up is required here. * on the active request list and no clean up is required here.
*/ */
if (ret && params->request) if (ret && req)
i915_gem_request_cancel(params->request); i915_gem_request_cancel(req);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
......
...@@ -11690,9 +11690,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -11690,9 +11690,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj->last_write_req); obj->last_write_req);
} else { } else {
if (!request) { if (!request) {
ret = i915_gem_request_alloc(ring, ring->default_context, &request); request = i915_gem_request_alloc(ring, NULL);
if (ret) if (IS_ERR(request)) {
ret = PTR_ERR(request);
goto cleanup_unpin; goto cleanup_unpin;
}
} }
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
......
...@@ -2520,11 +2520,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, ...@@ -2520,11 +2520,10 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
if (ctx != ring->default_context && ring->init_context) { if (ctx != ring->default_context && ring->init_context) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
ret = i915_gem_request_alloc(ring, req = i915_gem_request_alloc(ring, ctx);
ctx, &req); if (IS_ERR(req)) {
if (ret) { ret = PTR_ERR(req);
DRM_ERROR("ring create req: %d\n", DRM_ERROR("ring create req: %d\n", ret);
ret);
goto error_ringbuf; goto error_ringbuf;
} }
......
...@@ -240,9 +240,9 @@ static int intel_overlay_on(struct intel_overlay *overlay) ...@@ -240,9 +240,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
WARN_ON(overlay->active); WARN_ON(overlay->active);
WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
ret = i915_gem_request_alloc(ring, ring->default_context, &req); req = i915_gem_request_alloc(ring, NULL);
if (ret) if (IS_ERR(req))
return ret; return PTR_ERR(req);
ret = intel_ring_begin(req, 4); ret = intel_ring_begin(req, 4);
if (ret) { if (ret) {
...@@ -283,9 +283,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay, ...@@ -283,9 +283,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17)) if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
ret = i915_gem_request_alloc(ring, ring->default_context, &req); req = i915_gem_request_alloc(ring, NULL);
if (ret) if (IS_ERR(req))
return ret; return PTR_ERR(req);
ret = intel_ring_begin(req, 2); ret = intel_ring_begin(req, 2);
if (ret) { if (ret) {
...@@ -349,9 +349,9 @@ static int intel_overlay_off(struct intel_overlay *overlay) ...@@ -349,9 +349,9 @@ static int intel_overlay_off(struct intel_overlay *overlay)
* of the hw. Do it in both cases */ * of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE; flip_addr |= OFC_UPDATE;
ret = i915_gem_request_alloc(ring, ring->default_context, &req); req = i915_gem_request_alloc(ring, NULL);
if (ret) if (IS_ERR(req))
return ret; return PTR_ERR(req);
ret = intel_ring_begin(req, 6); ret = intel_ring_begin(req, 6);
if (ret) { if (ret) {
...@@ -423,9 +423,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) ...@@ -423,9 +423,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
/* synchronous slowpath */ /* synchronous slowpath */
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
ret = i915_gem_request_alloc(ring, ring->default_context, &req); req = i915_gem_request_alloc(ring, NULL);
if (ret) if (IS_ERR(req))
return ret; return PTR_ERR(req);
ret = intel_ring_begin(req, 2); ret = intel_ring_begin(req, 2);
if (ret) { if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment