Commit 3fbbbef4 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Convert the final GEM_TRACE to GT_TRACE and co

Convert the few remaining GEM_TRACE() used for debugging over to the
appropriate GT_TRACE or RQ_TRACE.

References: 639f2f24 ("drm/i915: Introduce new macros for tracing")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Venkata Sandeep Dhanalakota <venkata.s.dhanalakota@intel.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200106114234.2529613-4-chris@chris-wilson.co.uk
parent e1c31fb5
...@@ -152,6 +152,8 @@ static int __intel_context_active(struct i915_active *active) ...@@ -152,6 +152,8 @@ static int __intel_context_active(struct i915_active *active)
struct intel_context *ce = container_of(active, typeof(*ce), active); struct intel_context *ce = container_of(active, typeof(*ce), active);
int err; int err;
CE_TRACE(ce, "active\n");
intel_context_get(ce); intel_context_get(ce);
err = intel_ring_pin(ce->ring); err = intel_ring_pin(ce->ring);
......
...@@ -147,11 +147,7 @@ static void mark_innocent(struct i915_request *rq) ...@@ -147,11 +147,7 @@ static void mark_innocent(struct i915_request *rq)
void __i915_request_reset(struct i915_request *rq, bool guilty) void __i915_request_reset(struct i915_request *rq, bool guilty)
{ {
GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n", RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
rq->engine->name,
rq->fence.context,
rq->fence.seqno,
yesno(guilty));
GEM_BUG_ON(i915_request_completed(rq)); GEM_BUG_ON(i915_request_completed(rq));
...@@ -624,7 +620,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) ...@@ -624,7 +620,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
*/ */
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) { for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
GEM_TRACE("engine_mask=%x\n", engine_mask); GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
preempt_disable(); preempt_disable();
ret = reset(gt, engine_mask, retry); ret = reset(gt, engine_mask, retry);
preempt_enable(); preempt_enable();
...@@ -784,8 +780,7 @@ static void nop_submit_request(struct i915_request *request) ...@@ -784,8 +780,7 @@ static void nop_submit_request(struct i915_request *request)
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
unsigned long flags; unsigned long flags;
GEM_TRACE("%s fence %llx:%lld -> -EIO\n", RQ_TRACE(request, "-EIO\n");
engine->name, request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);
spin_lock_irqsave(&engine->active.lock, flags); spin_lock_irqsave(&engine->active.lock, flags);
...@@ -812,7 +807,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) ...@@ -812,7 +807,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
intel_engine_dump(engine, &p, "%s\n", engine->name); intel_engine_dump(engine, &p, "%s\n", engine->name);
} }
GEM_TRACE("start\n"); GT_TRACE(gt, "start\n");
/* /*
* First, stop submission to hw, but do not yet complete requests by * First, stop submission to hw, but do not yet complete requests by
...@@ -843,7 +838,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) ...@@ -843,7 +838,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
reset_finish(gt, awake); reset_finish(gt, awake);
GEM_TRACE("end\n"); GT_TRACE(gt, "end\n");
} }
void intel_gt_set_wedged(struct intel_gt *gt) void intel_gt_set_wedged(struct intel_gt *gt)
...@@ -869,7 +864,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -869,7 +864,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags)) if (test_bit(I915_WEDGED_ON_INIT, &gt->reset.flags))
return false; return false;
GEM_TRACE("start\n"); GT_TRACE(gt, "start\n");
/* /*
* Before unwedging, make sure that all pending operations * Before unwedging, make sure that all pending operations
...@@ -931,7 +926,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) ...@@ -931,7 +926,7 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
*/ */
intel_engines_reset_default_submission(gt); intel_engines_reset_default_submission(gt);
GEM_TRACE("end\n"); GT_TRACE(gt, "end\n");
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */ smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &gt->reset.flags); clear_bit(I915_WEDGED, &gt->reset.flags);
...@@ -1006,7 +1001,7 @@ void intel_gt_reset(struct intel_gt *gt, ...@@ -1006,7 +1001,7 @@ void intel_gt_reset(struct intel_gt *gt,
intel_engine_mask_t awake; intel_engine_mask_t awake;
int ret; int ret;
GEM_TRACE("flags=%lx\n", gt->reset.flags); GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
might_sleep(); might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags)); GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment