Commit d91e6578 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Introduce struct intel_wakeref

For controlling runtime pm of the GT and engines, we would like to have
a callback to do extra work the first time we wake up and the last time
we drop the wakeref. This first/last access needs serialisation and so
we encompass a mutex with the regular intel_wakeref_t tracker.

v2: Drop the _once naming and report the errors.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc; Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-1-chris@chris-wilson.co.uk
parent 112ed2d3
...@@ -50,6 +50,7 @@ i915-y += i915_drv.o \ ...@@ -50,6 +50,7 @@ i915-y += i915_drv.o \
intel_device_info.o \ intel_device_info.o \
intel_pm.o \ intel_pm.o \
intel_runtime_pm.o \ intel_runtime_pm.o \
intel_wakeref.o \
intel_uncore.o intel_uncore.o
# core library code # core library code
......
...@@ -31,7 +31,8 @@ header_test := \ ...@@ -31,7 +31,8 @@ header_test := \
intel_psr.h \ intel_psr.h \
intel_sdvo.h \ intel_sdvo.h \
intel_sprite.h \ intel_sprite.h \
intel_tv.h intel_tv.h \
intel_wakeref.h
quiet_cmd_header_test = HDRTEST $@ quiet_cmd_header_test = HDRTEST $@
cmd_header_test = echo "\#include \"$(<F)\"" > $@ cmd_header_test = echo "\#include \"$(<F)\"" > $@
......
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#include "intel_opregion.h" #include "intel_opregion.h"
#include "intel_uc.h" #include "intel_uc.h"
#include "intel_uncore.h" #include "intel_uncore.h"
#include "intel_wakeref.h"
#include "intel_wopcm.h" #include "intel_wopcm.h"
#include "i915_gem.h" #include "i915_gem.h"
...@@ -134,8 +135,6 @@ bool i915_error_injected(void); ...@@ -134,8 +135,6 @@ bool i915_error_injected(void);
__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
fmt, ##__VA_ARGS__) fmt, ##__VA_ARGS__)
typedef depot_stack_handle_t intel_wakeref_t;
enum hpd_pin { enum hpd_pin {
HPD_NONE = 0, HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
......
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#include "intel_drv.h"
#include "intel_wakeref.h"
int __intel_wakeref_get_first(struct drm_i915_private *i915,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
/*
* Treat get/put as different subclasses, as we may need to run
* the put callback from under the shrinker and do not want to
* cross-contanimate that callback with any extra work performed
* upon acquiring the wakeref.
*/
mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
if (!atomic_read(&wf->count)) {
int err;
wf->wakeref = intel_runtime_pm_get(i915);
err = fn(wf);
if (unlikely(err)) {
intel_runtime_pm_put(i915, wf->wakeref);
mutex_unlock(&wf->mutex);
return err;
}
smp_mb__before_atomic(); /* release wf->count */
}
atomic_inc(&wf->count);
mutex_unlock(&wf->mutex);
return 0;
}
int __intel_wakeref_put_last(struct drm_i915_private *i915,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
int err;
err = fn(wf);
if (likely(!err))
intel_runtime_pm_put(i915, wf->wakeref);
else
atomic_inc(&wf->count);
mutex_unlock(&wf->mutex);
return err;
}
void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
{
__mutex_init(&wf->mutex, "wakeref", key);
atomic_set(&wf->count, 0);
}
/*
* SPDX-License-Identifier: MIT
*
* Copyright © 2019 Intel Corporation
*/
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
#include <linux/atomic.h>
#include <linux/mutex.h>
#include <linux/stackdepot.h>
struct drm_i915_private;
typedef depot_stack_handle_t intel_wakeref_t;
struct intel_wakeref {
atomic_t count;
struct mutex mutex;
intel_wakeref_t wakeref;
};
void __intel_wakeref_init(struct intel_wakeref *wf,
struct lock_class_key *key);
#define intel_wakeref_init(wf) do { \
static struct lock_class_key __key; \
\
__intel_wakeref_init((wf), &__key); \
} while (0)
int __intel_wakeref_get_first(struct drm_i915_private *i915,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf));
int __intel_wakeref_put_last(struct drm_i915_private *i915,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf));
/**
* intel_wakeref_get: Acquire the wakeref
* @i915: the drm_i915_private device
* @wf: the wakeref
* @fn: callback for acquired the wakeref, called only on first acquire.
*
* Acquire a hold on the wakeref. The first user to do so, will acquire
* the runtime pm wakeref and then call the @fn underneath the wakeref
* mutex.
*
* Note that @fn is allowed to fail, in which case the runtime-pm wakeref
* will be released and the acquisition unwound, and an error reported.
*
* Returns: 0 if the wakeref was acquired successfully, or a negative error
* code otherwise.
*/
static inline int
intel_wakeref_get(struct drm_i915_private *i915,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
if (unlikely(!atomic_inc_not_zero(&wf->count)))
return __intel_wakeref_get_first(i915, wf, fn);
return 0;
}
/**
* intel_wakeref_put: Release the wakeref
* @i915: the drm_i915_private device
* @wf: the wakeref
* @fn: callback for releasing the wakeref, called only on final release.
*
* Release our hold on the wakeref. When there are no more users,
* the runtime pm wakeref will be released after the @fn callback is called
* underneath the wakeref mutex.
*
* Note that @fn is allowed to fail, in which case the runtime-pm wakeref
* is retained and an error reported.
*
* Returns: 0 if the wakeref was released successfully, or a negative error
* code otherwise.
*/
static inline int
intel_wakeref_put(struct drm_i915_private *i915,
struct intel_wakeref *wf,
int (*fn)(struct intel_wakeref *wf))
{
if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
return __intel_wakeref_put_last(i915, wf, fn);
return 0;
}
/**
* intel_wakeref_lock: Lock the wakeref (mutex)
* @wf: the wakeref
*
* Locks the wakeref to prevent it being acquired or released. New users
* can still adjust the counter, but the wakeref itself (and callback)
* cannot be acquired or released.
*/
static inline void
intel_wakeref_lock(struct intel_wakeref *wf)
__acquires(wf->mutex)
{
mutex_lock(&wf->mutex);
}
/**
* intel_wakeref_unlock: Unlock the wakeref
* @wf: the wakeref
*
* Releases a previously acquired intel_wakeref_lock().
*/
static inline void
intel_wakeref_unlock(struct intel_wakeref *wf)
__releases(wf->mutex)
{
mutex_unlock(&wf->mutex);
}
/**
* intel_wakeref_active: Query whether the wakeref is currently held
* @wf: the wakeref
*
* Returns: true if the wakeref is currently held.
*/
static inline bool
intel_wakeref_active(struct intel_wakeref *wf)
{
return atomic_read(&wf->count);
}
#endif /* INTEL_WAKEREF_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment