Commit b5add959 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Make fb_tracking.lock a spinlock

We only need a very lightweight mechanism here as the locking is only
used for co-ordinating a bitfield.

v2: Move the cheap unlikely tests into the caller
v3: Move the kerneldoc into the header (now separated out into
intel_fronbuffer.h for better kerneldoc and readability)
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtien <joonas.lahtinen@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1470324762-2545-20-git-send-email-chris@chris-wilson.co.uk
parent 5d723d7a
...@@ -1669,7 +1669,7 @@ struct intel_pipe_crc { ...@@ -1669,7 +1669,7 @@ struct intel_pipe_crc {
}; };
struct i915_frontbuffer_tracking { struct i915_frontbuffer_tracking {
struct mutex lock; spinlock_t lock;
/* /*
* Tracking bits for delayed frontbuffer flushing du to gpu activity or * Tracking bits for delayed frontbuffer flushing du to gpu activity or
......
...@@ -4455,7 +4455,7 @@ i915_gem_load_init(struct drm_device *dev) ...@@ -4455,7 +4455,7 @@ i915_gem_load_init(struct drm_device *dev)
dev_priv->mm.interruptible = true; dev_priv->mm.interruptible = true;
mutex_init(&dev_priv->fb_tracking.lock); spin_lock_init(&dev_priv->fb_tracking.lock);
} }
void i915_gem_load_cleanup(struct drm_device *dev) void i915_gem_load_cleanup(struct drm_device *dev)
......
...@@ -66,35 +66,19 @@ ...@@ -66,35 +66,19 @@
#include "intel_frontbuffer.h" #include "intel_frontbuffer.h"
#include "i915_drv.h" #include "i915_drv.h"
/** void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
* intel_fb_obj_invalidate - invalidate frontbuffer object enum fb_op_origin origin)
* @obj: GEM object to invalidate
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
* be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (!obj->frontbuffer_bits)
return;
if (origin == ORIGIN_CS) { if (origin == ORIGIN_CS) {
mutex_lock(&dev_priv->fb_tracking.lock); spin_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.busy_bits dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
|= obj->frontbuffer_bits; dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
dev_priv->fb_tracking.flip_bits spin_unlock(&dev_priv->fb_tracking.lock);
&= ~obj->frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
} }
intel_psr_invalidate(dev, obj->frontbuffer_bits); intel_psr_invalidate(dev, obj->frontbuffer_bits);
...@@ -121,9 +105,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev, ...@@ -121,9 +105,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
/* Delay flushing when rings are still busy.*/ /* Delay flushing when rings are still busy.*/
mutex_lock(&dev_priv->fb_tracking.lock); spin_lock(&dev_priv->fb_tracking.lock);
frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits; frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
mutex_unlock(&dev_priv->fb_tracking.lock); spin_unlock(&dev_priv->fb_tracking.lock);
if (!frontbuffer_bits) if (!frontbuffer_bits)
return; return;
...@@ -133,18 +117,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev, ...@@ -133,18 +117,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
intel_fbc_flush(dev_priv, frontbuffer_bits, origin); intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
} }
/** void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
* intel_fb_obj_flush - flush frontbuffer object bool retire,
* @obj: GEM object to flush enum fb_op_origin origin)
* @retire: set when retiring asynchronous rendering
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again. If @retire is true
* then any delayed flushes will be unblocked.
*/
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire, enum fb_op_origin origin)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
...@@ -152,21 +127,18 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj, ...@@ -152,21 +127,18 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
WARN_ON(!mutex_is_locked(&dev->struct_mutex)); WARN_ON(!mutex_is_locked(&dev->struct_mutex));
if (!obj->frontbuffer_bits)
return;
frontbuffer_bits = obj->frontbuffer_bits; frontbuffer_bits = obj->frontbuffer_bits;
if (retire) { if (retire) {
mutex_lock(&dev_priv->fb_tracking.lock); spin_lock(&dev_priv->fb_tracking.lock);
/* Filter out new bits since rendering started. */ /* Filter out new bits since rendering started. */
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits; frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock); spin_unlock(&dev_priv->fb_tracking.lock);
} }
intel_frontbuffer_flush(dev, frontbuffer_bits, origin); if (frontbuffer_bits)
intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
} }
/** /**
...@@ -186,11 +158,11 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev, ...@@ -186,11 +158,11 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->fb_tracking.lock); spin_lock(&dev_priv->fb_tracking.lock);
dev_priv->fb_tracking.flip_bits |= frontbuffer_bits; dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
/* Remove stale busy bits due to the old buffer. */ /* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock); spin_unlock(&dev_priv->fb_tracking.lock);
intel_psr_single_frame_update(dev, frontbuffer_bits); intel_psr_single_frame_update(dev, frontbuffer_bits);
} }
...@@ -210,13 +182,14 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev, ...@@ -210,13 +182,14 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->fb_tracking.lock); spin_lock(&dev_priv->fb_tracking.lock);
/* Mask any cancelled flips. */ /* Mask any cancelled flips. */
frontbuffer_bits &= dev_priv->fb_tracking.flip_bits; frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits; dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock); spin_unlock(&dev_priv->fb_tracking.lock);
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); if (frontbuffer_bits)
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
} }
/** /**
...@@ -235,10 +208,10 @@ void intel_frontbuffer_flip(struct drm_device *dev, ...@@ -235,10 +208,10 @@ void intel_frontbuffer_flip(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
mutex_lock(&dev_priv->fb_tracking.lock); spin_lock(&dev_priv->fb_tracking.lock);
/* Remove stale busy bits due to the old buffer. */ /* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits; dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock); spin_unlock(&dev_priv->fb_tracking.lock);
intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP); intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
} }
...@@ -28,15 +28,57 @@ struct drm_device; ...@@ -28,15 +28,57 @@ struct drm_device;
struct drm_i915_private; struct drm_i915_private;
struct drm_i915_gem_object; struct drm_i915_gem_object;
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev, void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits); unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev, void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits); unsigned frontbuffer_bits);
void intel_frontbuffer_flip(struct drm_device *dev, void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits); unsigned frontbuffer_bits);
void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
enum fb_op_origin origin); void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin);
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire,
enum fb_op_origin origin);
/**
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
* be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
* until the rendering completes or a flip on this frontbuffer plane is
* scheduled.
*/
static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
if (!obj->frontbuffer_bits)
return;
__intel_fb_obj_invalidate(obj, origin);
}
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
* @retire: set when retiring asynchronous rendering
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again. If @retire is true
* then any delayed flushes will be unblocked.
*/
static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
bool retire,
enum fb_op_origin origin)
{
if (!obj->frontbuffer_bits)
return;
__intel_fb_obj_flush(obj, retire, origin);
}
#endif /* __INTEL_FRONTBUFFER_H__ */ #endif /* __INTEL_FRONTBUFFER_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment