Commit 6a96be24 authored by Michał Winiarski's avatar Michał Winiarski Committed by Chris Wilson

drm/i915/guc: Get rid of GuC log runtime

Runtime is not a very good name. Let's also move counting relay
overflows inside relay struct.

v2: Rename things rather than remove the struct (Chris)
Signed-off-by: default avatarMichał Winiarski <michal.winiarski@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Reviewed-by: default avatarSagar Arun Kamble <sagar.a.kamble@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20180319095348.9716-7-michal.winiarski@intel.com
parent b8299c71
...@@ -2347,8 +2347,8 @@ static void i915_guc_log_info(struct seq_file *m, ...@@ -2347,8 +2347,8 @@ static void i915_guc_log_info(struct seq_file *m,
seq_printf(m, "\tTotal flush interrupt count: %u\n", seq_printf(m, "\tTotal flush interrupt count: %u\n",
guc->log.flush_interrupt_count); guc->log.flush_interrupt_count);
seq_printf(m, "\tCapture miss count: %u\n", seq_printf(m, "\tRelay full count: %u\n",
guc->log.capture_miss_count); guc->log.relay.full_count);
} }
static void i915_guc_client_info(struct seq_file *m, static void i915_guc_client_info(struct seq_file *m,
......
...@@ -87,9 +87,10 @@ int intel_guc_init_wq(struct intel_guc *guc) ...@@ -87,9 +87,10 @@ int intel_guc_init_wq(struct intel_guc *guc)
* or scheduled later on resume. This way the handling of work * or scheduled later on resume. This way the handling of work
* item can be kept same between system suspend & rpm suspend. * item can be kept same between system suspend & rpm suspend.
*/ */
guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log", guc->log.relay.flush_wq =
WQ_HIGHPRI | WQ_FREEZABLE); alloc_ordered_workqueue("i915-guc_log",
if (!guc->log.runtime.flush_wq) { WQ_HIGHPRI | WQ_FREEZABLE);
if (!guc->log.relay.flush_wq) {
DRM_ERROR("Couldn't allocate workqueue for GuC log\n"); DRM_ERROR("Couldn't allocate workqueue for GuC log\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -112,7 +113,7 @@ int intel_guc_init_wq(struct intel_guc *guc) ...@@ -112,7 +113,7 @@ int intel_guc_init_wq(struct intel_guc *guc)
guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt", guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
WQ_HIGHPRI); WQ_HIGHPRI);
if (!guc->preempt_wq) { if (!guc->preempt_wq) {
destroy_workqueue(guc->log.runtime.flush_wq); destroy_workqueue(guc->log.relay.flush_wq);
DRM_ERROR("Couldn't allocate workqueue for GuC " DRM_ERROR("Couldn't allocate workqueue for GuC "
"preemption\n"); "preemption\n");
return -ENOMEM; return -ENOMEM;
...@@ -130,7 +131,7 @@ void intel_guc_fini_wq(struct intel_guc *guc) ...@@ -130,7 +131,7 @@ void intel_guc_fini_wq(struct intel_guc *guc)
USES_GUC_SUBMISSION(dev_priv)) USES_GUC_SUBMISSION(dev_priv))
destroy_workqueue(guc->preempt_wq); destroy_workqueue(guc->preempt_wq);
destroy_workqueue(guc->log.runtime.flush_wq); destroy_workqueue(guc->log.relay.flush_wq);
} }
static int guc_shared_data_create(struct intel_guc *guc) static int guc_shared_data_create(struct intel_guc *guc)
...@@ -390,8 +391,8 @@ void intel_guc_to_host_event_handler(struct intel_guc *guc) ...@@ -390,8 +391,8 @@ void intel_guc_to_host_event_handler(struct intel_guc *guc)
if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER | if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) { INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)) {
queue_work(guc->log.runtime.flush_wq, queue_work(guc->log.relay.flush_wq,
&guc->log.runtime.flush_work); &guc->log.relay.flush_work);
guc->log.flush_interrupt_count++; guc->log.flush_interrupt_count++;
} }
......
...@@ -171,10 +171,10 @@ static void guc_move_to_next_buf(struct intel_guc_log *log) ...@@ -171,10 +171,10 @@ static void guc_move_to_next_buf(struct intel_guc_log *log)
smp_wmb(); smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */ /* All data has been written, so now move the offset of sub buffer. */
relay_reserve(log->runtime.relay_chan, log->vma->obj->base.size); relay_reserve(log->relay.channel, log->vma->obj->base.size);
/* Switch to the next sub buffer */ /* Switch to the next sub buffer */
relay_flush(log->runtime.relay_chan); relay_flush(log->relay.channel);
} }
static void *guc_get_write_buffer(struct intel_guc_log *log) static void *guc_get_write_buffer(struct intel_guc_log *log)
...@@ -188,7 +188,7 @@ static void *guc_get_write_buffer(struct intel_guc_log *log) ...@@ -188,7 +188,7 @@ static void *guc_get_write_buffer(struct intel_guc_log *log)
* done without using relay_reserve() along with relay_write(). So its * done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone. * better to use relay_reserve() alone.
*/ */
return relay_reserve(log->runtime.relay_chan, 0); return relay_reserve(log->relay.channel, 0);
} }
static bool guc_check_log_buf_overflow(struct intel_guc_log *log, static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
...@@ -239,13 +239,13 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) ...@@ -239,13 +239,13 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
void *src_data, *dst_data; void *src_data, *dst_data;
bool new_overflow; bool new_overflow;
mutex_lock(&log->runtime.lock); mutex_lock(&log->relay.lock);
if (WARN_ON(!log->runtime.buf_addr)) if (WARN_ON(!log->relay.buf_addr))
goto out_unlock; goto out_unlock;
/* Get the pointer to shared GuC log buffer */ /* Get the pointer to shared GuC log buffer */
log_buf_state = src_data = log->runtime.buf_addr; log_buf_state = src_data = log->relay.buf_addr;
/* Get the pointer to local buffer to store the logs */ /* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); log_buf_snapshot_state = dst_data = guc_get_write_buffer(log);
...@@ -256,7 +256,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) ...@@ -256,7 +256,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
* getting consumed by User at a slow rate. * getting consumed by User at a slow rate.
*/ */
DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n"); DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
log->capture_miss_count++; log->relay.full_count++;
goto out_unlock; goto out_unlock;
} }
...@@ -330,20 +330,20 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) ...@@ -330,20 +330,20 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
guc_move_to_next_buf(log); guc_move_to_next_buf(log);
out_unlock: out_unlock:
mutex_unlock(&log->runtime.lock); mutex_unlock(&log->relay.lock);
} }
static void capture_logs_work(struct work_struct *work) static void capture_logs_work(struct work_struct *work)
{ {
struct intel_guc_log *log = struct intel_guc_log *log =
container_of(work, struct intel_guc_log, runtime.flush_work); container_of(work, struct intel_guc_log, relay.flush_work);
guc_log_capture_logs(log); guc_log_capture_logs(log);
} }
static bool guc_log_has_runtime(struct intel_guc_log *log) static bool guc_log_relay_enabled(struct intel_guc_log *log)
{ {
return log->runtime.buf_addr; return log->relay.buf_addr;
} }
static int guc_log_map(struct intel_guc_log *log) static int guc_log_map(struct intel_guc_log *log)
...@@ -353,7 +353,7 @@ static int guc_log_map(struct intel_guc_log *log) ...@@ -353,7 +353,7 @@ static int guc_log_map(struct intel_guc_log *log)
void *vaddr; void *vaddr;
int ret; int ret;
lockdep_assert_held(&log->runtime.lock); lockdep_assert_held(&log->relay.lock);
if (!log->vma) if (!log->vma)
return -ENODEV; return -ENODEV;
...@@ -375,23 +375,23 @@ static int guc_log_map(struct intel_guc_log *log) ...@@ -375,23 +375,23 @@ static int guc_log_map(struct intel_guc_log *log)
return PTR_ERR(vaddr); return PTR_ERR(vaddr);
} }
log->runtime.buf_addr = vaddr; log->relay.buf_addr = vaddr;
return 0; return 0;
} }
static void guc_log_unmap(struct intel_guc_log *log) static void guc_log_unmap(struct intel_guc_log *log)
{ {
lockdep_assert_held(&log->runtime.lock); lockdep_assert_held(&log->relay.lock);
i915_gem_object_unpin_map(log->vma->obj); i915_gem_object_unpin_map(log->vma->obj);
log->runtime.buf_addr = NULL; log->relay.buf_addr = NULL;
} }
void intel_guc_log_init_early(struct intel_guc_log *log) void intel_guc_log_init_early(struct intel_guc_log *log)
{ {
mutex_init(&log->runtime.lock); mutex_init(&log->relay.lock);
INIT_WORK(&log->runtime.flush_work, capture_logs_work); INIT_WORK(&log->relay.flush_work, capture_logs_work);
} }
static int guc_log_relay_create(struct intel_guc_log *log) static int guc_log_relay_create(struct intel_guc_log *log)
...@@ -402,7 +402,7 @@ static int guc_log_relay_create(struct intel_guc_log *log) ...@@ -402,7 +402,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
size_t n_subbufs, subbuf_size; size_t n_subbufs, subbuf_size;
int ret; int ret;
lockdep_assert_held(&log->runtime.lock); lockdep_assert_held(&log->relay.lock);
/* Keep the size of sub buffers same as shared log buffer */ /* Keep the size of sub buffers same as shared log buffer */
subbuf_size = GUC_LOG_SIZE; subbuf_size = GUC_LOG_SIZE;
...@@ -427,17 +427,17 @@ static int guc_log_relay_create(struct intel_guc_log *log) ...@@ -427,17 +427,17 @@ static int guc_log_relay_create(struct intel_guc_log *log)
} }
GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
log->runtime.relay_chan = guc_log_relay_chan; log->relay.channel = guc_log_relay_chan;
return 0; return 0;
} }
static void guc_log_relay_destroy(struct intel_guc_log *log) static void guc_log_relay_destroy(struct intel_guc_log *log)
{ {
lockdep_assert_held(&log->runtime.lock); lockdep_assert_held(&log->relay.lock);
relay_close(log->runtime.relay_chan); relay_close(log->relay.channel);
log->runtime.relay_chan = NULL; log->relay.channel = NULL;
} }
static void guc_log_capture_logs(struct intel_guc_log *log) static void guc_log_capture_logs(struct intel_guc_log *log)
...@@ -557,9 +557,9 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) ...@@ -557,9 +557,9 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
{ {
int ret; int ret;
mutex_lock(&log->runtime.lock); mutex_lock(&log->relay.lock);
if (guc_log_has_runtime(log)) { if (guc_log_relay_enabled(log)) {
ret = -EEXIST; ret = -EEXIST;
goto out_unlock; goto out_unlock;
} }
...@@ -582,7 +582,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) ...@@ -582,7 +582,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
if (ret) if (ret)
goto out_relay; goto out_relay;
mutex_unlock(&log->runtime.lock); mutex_unlock(&log->relay.lock);
guc_flush_log_msg_enable(log_to_guc(log)); guc_flush_log_msg_enable(log_to_guc(log));
...@@ -591,14 +591,14 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) ...@@ -591,14 +591,14 @@ int intel_guc_log_relay_open(struct intel_guc_log *log)
* the flush notification. This means that we need to unconditionally * the flush notification. This means that we need to unconditionally
* flush on relay enabling, since GuC only notifies us once. * flush on relay enabling, since GuC only notifies us once.
*/ */
queue_work(log->runtime.flush_wq, &log->runtime.flush_work); queue_work(log->relay.flush_wq, &log->relay.flush_work);
return 0; return 0;
out_relay: out_relay:
guc_log_relay_destroy(log); guc_log_relay_destroy(log);
out_unlock: out_unlock:
mutex_unlock(&log->runtime.lock); mutex_unlock(&log->relay.lock);
return ret; return ret;
} }
...@@ -612,7 +612,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) ...@@ -612,7 +612,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
* Before initiating the forceful flush, wait for any pending/ongoing * Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen. * flush to complete otherwise forceful flush may not actually happen.
*/ */
flush_work(&log->runtime.flush_work); flush_work(&log->relay.flush_work);
intel_runtime_pm_get(i915); intel_runtime_pm_get(i915);
guc_log_flush(guc); guc_log_flush(guc);
...@@ -625,11 +625,11 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log) ...@@ -625,11 +625,11 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
void intel_guc_log_relay_close(struct intel_guc_log *log) void intel_guc_log_relay_close(struct intel_guc_log *log)
{ {
guc_flush_log_msg_disable(log_to_guc(log)); guc_flush_log_msg_disable(log_to_guc(log));
flush_work(&log->runtime.flush_work); flush_work(&log->relay.flush_work);
mutex_lock(&log->runtime.lock); mutex_lock(&log->relay.lock);
GEM_BUG_ON(!guc_log_has_runtime(log)); GEM_BUG_ON(!guc_log_relay_enabled(log));
guc_log_unmap(log); guc_log_unmap(log);
guc_log_relay_destroy(log); guc_log_relay_destroy(log);
mutex_unlock(&log->runtime.lock); mutex_unlock(&log->relay.lock);
} }
...@@ -43,16 +43,15 @@ struct intel_guc; ...@@ -43,16 +43,15 @@ struct intel_guc;
struct intel_guc_log { struct intel_guc_log {
u32 flags; u32 flags;
struct i915_vma *vma; struct i915_vma *vma;
/* The runtime stuff gets created only when GuC logging gets enabled */
struct { struct {
void *buf_addr; void *buf_addr;
struct workqueue_struct *flush_wq; struct workqueue_struct *flush_wq;
struct work_struct flush_work; struct work_struct flush_work;
struct rchan *relay_chan; struct rchan *channel;
struct mutex lock; struct mutex lock;
} runtime; u32 full_count;
} relay;
/* logging related stats */ /* logging related stats */
u32 capture_miss_count;
u32 flush_interrupt_count; u32 flush_interrupt_count;
u32 prev_overflow_count[GUC_MAX_LOG_BUFFER]; u32 prev_overflow_count[GUC_MAX_LOG_BUFFER];
u32 total_overflow_count[GUC_MAX_LOG_BUFFER]; u32 total_overflow_count[GUC_MAX_LOG_BUFFER];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment