Commit 1bef3409 authored by Robert Bragg's avatar Robert Bragg Committed by Ben Widawsky

drm/i915/perf: remove perf.hook_lock

In earlier iterations of the i915-perf driver we had a number of
callbacks/hooks from other parts of the i915 driver to e.g. notify us
when a legacy context was pinned and these could run asynchronously with
respect to the stream file operations and might also run in atomic
context.

dev_priv->perf.hook_lock had been for serialising access to state needed
within these callbacks, but as the code has evolved some of the hooks
have gone away or are implemented to avoid needing to lock any state.

The remaining use of this lock was actually redundant considering how
the gen7 oacontrol state used to be updated as part of a context pin
hook.
Signed-off-by: default avatarRobert Bragg <robert@sixbynine.org>
Signed-off-by: default avatarLionel Landwerlin <lionel.g.landwerlin@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarBen Widawsky <ben@bwidawsk.net>
parent 155e941f
...@@ -2396,8 +2396,6 @@ struct drm_i915_private { ...@@ -2396,8 +2396,6 @@ struct drm_i915_private {
struct mutex lock; struct mutex lock;
struct list_head streams; struct list_head streams;
spinlock_t hook_lock;
struct { struct {
struct i915_perf_stream *exclusive_stream; struct i915_perf_stream *exclusive_stream;
......
...@@ -1836,9 +1836,18 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv) ...@@ -1836,9 +1836,18 @@ static void gen8_disable_metric_set(struct drm_i915_private *dev_priv)
gen8_configure_all_contexts(dev_priv, false); gen8_configure_all_contexts(dev_priv, false);
} }
static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv) static void gen7_oa_enable(struct drm_i915_private *dev_priv)
{ {
lockdep_assert_held(&dev_priv->perf.hook_lock); /*
* Reset buf pointers so we don't forward reports from before now.
*
* Think carefully if considering trying to avoid this, since it
* also ensures status flags and the buffer itself are cleared
* in error paths, and we have checks for invalid reports based
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
gen7_init_oa_buffer(dev_priv);
if (dev_priv->perf.oa.exclusive_stream->enabled) { if (dev_priv->perf.oa.exclusive_stream->enabled) {
struct i915_gem_context *ctx = struct i915_gem_context *ctx =
...@@ -1861,25 +1870,6 @@ static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv) ...@@ -1861,25 +1870,6 @@ static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_OACONTROL, 0); I915_WRITE(GEN7_OACONTROL, 0);
} }
static void gen7_oa_enable(struct drm_i915_private *dev_priv)
{
unsigned long flags;
/* Reset buf pointers so we don't forward reports from before now.
*
* Think carefully if considering trying to avoid this, since it
* also ensures status flags and the buffer itself are cleared
* in error paths, and we have checks for invalid reports based
* on the assumption that certain fields are written to zeroed
* memory which this helps maintains.
*/
gen7_init_oa_buffer(dev_priv);
spin_lock_irqsave(&dev_priv->perf.hook_lock, flags);
gen7_update_oacontrol_locked(dev_priv);
spin_unlock_irqrestore(&dev_priv->perf.hook_lock, flags);
}
static void gen8_oa_enable(struct drm_i915_private *dev_priv) static void gen8_oa_enable(struct drm_i915_private *dev_priv)
{ {
u32 report_format = dev_priv->perf.oa.oa_buffer.format; u32 report_format = dev_priv->perf.oa.oa_buffer.format;
...@@ -3098,7 +3088,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv) ...@@ -3098,7 +3088,6 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->perf.streams); INIT_LIST_HEAD(&dev_priv->perf.streams);
mutex_init(&dev_priv->perf.lock); mutex_init(&dev_priv->perf.lock);
spin_lock_init(&dev_priv->perf.hook_lock);
spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock); spin_lock_init(&dev_priv->perf.oa.oa_buffer.ptr_lock);
oa_sample_rate_hard_limit = oa_sample_rate_hard_limit =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment