Commit b49e894c authored by Andrzej Hajda's avatar Andrzej Hajda

drm/i915: Replace custom intel runtime_pm tracker with ref_tracker library

Beside reusing existing code, the main advantage of ref_tracker is
tracking per instance of wakeref. It allows also to catch double
put.
On the other side we lose information about the first acquire and
the last release, but the advantages outweigh it.
Signed-off-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231030-ref_tracker_i915-v1-1-006fe6b96421@intel.com
parent dfed6b58
...@@ -24,7 +24,9 @@ config DRM_I915_DEBUG ...@@ -24,7 +24,9 @@ config DRM_I915_DEBUG
select DEBUG_FS select DEBUG_FS
select PREEMPT_COUNT select PREEMPT_COUNT
select I2C_CHARDEV select I2C_CHARDEV
select REF_TRACKER
select STACKDEPOT select STACKDEPOT
select STACKTRACE
select DRM_DP_AUX_CHARDEV select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
...@@ -231,7 +233,9 @@ config DRM_I915_DEBUG_RUNTIME_PM ...@@ -231,7 +233,9 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM" bool "Enable extra state checking for runtime PM"
depends on DRM_I915 depends on DRM_I915
default n default n
select REF_TRACKER
select STACKDEPOT select STACKDEPOT
select STACKTRACE
help help
Choose this option to turn on extra state checking for the Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during runtime PM functionality. This may introduce overhead during
......
...@@ -405,7 +405,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains) ...@@ -405,7 +405,7 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
struct drm_i915_private, struct drm_i915_private,
display.power.domains); display.power.domains);
drm_dbg(&i915->drm, "async_put_wakeref %u\n", drm_dbg(&i915->drm, "async_put_wakeref %lu\n",
power_domains->async_put_wakeref); power_domains->async_put_wakeref);
print_power_domains(power_domains, "async_put_domains[0]", print_power_domains(power_domains, "async_put_domains[0]",
......
...@@ -1037,7 +1037,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) ...@@ -1037,7 +1037,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_power_domains_driver_remove(i915); intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm); enable_rpm_wakeref_asserts(&i915->runtime_pm);
intel_runtime_pm_driver_release(&i915->runtime_pm); intel_runtime_pm_driver_last_release(&i915->runtime_pm);
} }
static bool suspend_to_idle(struct drm_i915_private *dev_priv) static bool suspend_to_idle(struct drm_i915_private *dev_priv)
......
...@@ -52,182 +52,37 @@ ...@@ -52,182 +52,37 @@
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
#include <linux/sort.h>
#define STACKDEPTH 8
static noinline depot_stack_handle_t __save_depot_stack(void)
{
unsigned long entries[STACKDEPTH];
unsigned int n;
n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
}
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{ {
spin_lock_init(&rpm->debug.lock); ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
stack_depot_init();
} }
static noinline depot_stack_handle_t static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{ {
depot_stack_handle_t stack, *stacks; if (!rpm->available || rpm->no_wakeref_tracking)
unsigned long flags;
if (rpm->no_wakeref_tracking)
return -1;
stack = __save_depot_stack();
if (!stack)
return -1; return -1;
spin_lock_irqsave(&rpm->debug.lock, flags); return intel_ref_tracker_alloc(&rpm->debug);
if (!rpm->debug.count)
rpm->debug.last_acquire = stack;
stacks = krealloc(rpm->debug.owners,
(rpm->debug.count + 1) * sizeof(*stacks),
GFP_NOWAIT | __GFP_NOWARN);
if (stacks) {
stacks[rpm->debug.count++] = stack;
rpm->debug.owners = stacks;
} else {
stack = -1;
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
return stack;
} }
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
depot_stack_handle_t stack) intel_wakeref_t wakeref)
{
struct drm_i915_private *i915 = container_of(rpm,
struct drm_i915_private,
runtime_pm);
unsigned long flags, n;
bool found = false;
if (unlikely(stack == -1))
return;
spin_lock_irqsave(&rpm->debug.lock, flags);
for (n = rpm->debug.count; n--; ) {
if (rpm->debug.owners[n] == stack) {
memmove(rpm->debug.owners + n,
rpm->debug.owners + n + 1,
(--rpm->debug.count - n) * sizeof(stack));
found = true;
break;
}
}
spin_unlock_irqrestore(&rpm->debug.lock, flags);
if (drm_WARN(&i915->drm, !found,
"Unmatched wakeref (tracking %lu), count %u\n",
rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
stack = READ_ONCE(rpm->debug.last_release);
if (stack) {
stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
}
kfree(buf);
}
}
static int cmphandle(const void *_a, const void *_b)
{
const depot_stack_handle_t * const a = _a, * const b = _b;
if (*a < *b)
return -1;
else if (*a > *b)
return 1;
else
return 0;
}
static void
__print_intel_runtime_pm_wakeref(struct drm_printer *p,
const struct intel_runtime_pm_debug *dbg)
{ {
unsigned long i; if (!rpm->available || rpm->no_wakeref_tracking)
char *buf;
buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return; return;
if (dbg->last_acquire) { intel_ref_tracker_free(&rpm->debug, wakeref);
stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref last acquired:\n%s", buf);
}
if (dbg->last_release) {
stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref last released:\n%s", buf);
}
drm_printf(p, "Wakeref count: %lu\n", dbg->count);
sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
for (i = 0; i < dbg->count; i++) {
depot_stack_handle_t stack = dbg->owners[i];
unsigned long rep;
rep = 1;
while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
rep++, i++;
stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
}
kfree(buf);
}
static noinline void
__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
struct intel_runtime_pm_debug *saved)
{
*saved = *debug;
debug->owners = NULL;
debug->count = 0;
debug->last_release = __save_depot_stack();
} }
static void static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
{ {
if (debug->count) { ref_tracker_dir_exit(&rpm->debug);
struct drm_printer p = drm_debug_printer("i915");
__print_intel_runtime_pm_wakeref(&p, debug);
}
kfree(debug->owners);
} }
static noinline void static noinline void
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{ {
struct intel_runtime_pm_debug dbg = {};
unsigned long flags; unsigned long flags;
if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count, if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
...@@ -235,60 +90,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm) ...@@ -235,60 +90,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
flags)) flags))
return; return;
__untrack_all_wakerefs(&rpm->debug, &dbg); ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT);
spin_unlock_irqrestore(&rpm->debug.lock, flags);
dump_and_free_wakeref_tracking(&dbg);
}
static noinline void
untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
spin_lock_irqsave(&rpm->debug.lock, flags);
__untrack_all_wakerefs(&rpm->debug, &dbg);
spin_unlock_irqrestore(&rpm->debug.lock, flags); spin_unlock_irqrestore(&rpm->debug.lock, flags);
dump_and_free_wakeref_tracking(&dbg);
} }
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p) struct drm_printer *p)
{ {
struct intel_runtime_pm_debug dbg = {}; intel_ref_tracker_show(&rpm->debug, p);
do {
unsigned long alloc = dbg.count;
depot_stack_handle_t *s;
spin_lock_irq(&rpm->debug.lock);
dbg.count = rpm->debug.count;
if (dbg.count <= alloc) {
memcpy(dbg.owners,
rpm->debug.owners,
dbg.count * sizeof(*s));
}
dbg.last_acquire = rpm->debug.last_acquire;
dbg.last_release = rpm->debug.last_release;
spin_unlock_irq(&rpm->debug.lock);
if (dbg.count <= alloc)
break;
s = krealloc(dbg.owners,
dbg.count * sizeof(*s),
GFP_NOWAIT | __GFP_NOWARN);
if (!s)
goto out;
dbg.owners = s;
} while (1);
__print_intel_runtime_pm_wakeref(p, &dbg);
out:
kfree(dbg.owners);
} }
#else #else
...@@ -297,14 +106,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) ...@@ -297,14 +106,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{ {
} }
static depot_stack_handle_t static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{ {
return -1; return -1;
} }
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
intel_wakeref_t wref) intel_wakeref_t wakeref)
{ {
} }
...@@ -639,7 +448,11 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm) ...@@ -639,7 +448,11 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
"i915 raw-wakerefs=%d wakelocks=%d on cleanup\n", "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
intel_rpm_raw_wakeref_count(count), intel_rpm_raw_wakeref_count(count),
intel_rpm_wakelock_count(count)); intel_rpm_wakelock_count(count));
}
void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm)
{
intel_runtime_pm_driver_release(rpm);
untrack_all_intel_runtime_pm_wakerefs(rpm); untrack_all_intel_runtime_pm_wakerefs(rpm);
} }
......
...@@ -77,15 +77,7 @@ struct intel_runtime_pm { ...@@ -77,15 +77,7 @@ struct intel_runtime_pm {
* paired rpm_put) we can remove corresponding pairs of and keep * paired rpm_put) we can remove corresponding pairs of and keep
* the array trimmed to active wakerefs. * the array trimmed to active wakerefs.
*/ */
struct intel_runtime_pm_debug { struct ref_tracker_dir debug;
spinlock_t lock;
depot_stack_handle_t last_acquire;
depot_stack_handle_t last_release;
depot_stack_handle_t *owners;
unsigned long count;
} debug;
#endif #endif
}; };
...@@ -189,6 +181,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm); ...@@ -189,6 +181,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm); void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
......
...@@ -191,3 +191,31 @@ void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf) ...@@ -191,3 +191,31 @@ void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
intel_wakeref_auto(wf, 0); intel_wakeref_auto(wf, 0);
INTEL_WAKEREF_BUG_ON(wf->wakeref); INTEL_WAKEREF_BUG_ON(wf->wakeref);
} }
void intel_ref_tracker_show(struct ref_tracker_dir *dir,
struct drm_printer *p)
{
const size_t buf_size = PAGE_SIZE;
char *buf, *sb, *se;
size_t count;
buf = kmalloc(buf_size, GFP_NOWAIT);
if (!buf)
return;
count = ref_tracker_dir_snprint(dir, buf, buf_size);
if (!count)
goto free;
/* printk does not like big buffers, so we split it */
for (sb = buf; *sb; sb = se + 1) {
se = strchrnul(sb, '\n');
drm_printf(p, "%.*s", (int)(se - sb + 1), sb);
if (!*se)
break;
}
if (count >= buf_size)
drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
count + 1 - buf_size);
free:
kfree(buf);
}
...@@ -7,16 +7,25 @@ ...@@ -7,16 +7,25 @@
#ifndef INTEL_WAKEREF_H #ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H #define INTEL_WAKEREF_H
#include <drm/drm_print.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/bits.h> #include <linux/bits.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/ref_tracker.h>
#include <linux/slab.h>
#include <linux/stackdepot.h> #include <linux/stackdepot.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
typedef unsigned long intel_wakeref_t;
#define INTEL_REFTRACK_DEAD_COUNT 16
#define INTEL_REFTRACK_PRINT_LIMIT 16
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr) #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
#else #else
...@@ -26,8 +35,6 @@ ...@@ -26,8 +35,6 @@
struct intel_runtime_pm; struct intel_runtime_pm;
struct intel_wakeref; struct intel_wakeref;
typedef depot_stack_handle_t intel_wakeref_t;
struct intel_wakeref_ops { struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf); int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf); int (*put)(struct intel_wakeref *wf);
...@@ -261,6 +268,30 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf) ...@@ -261,6 +268,30 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf)
*/ */
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf); int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1))
static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir)
{
struct ref_tracker *user = NULL;
ref_tracker_alloc(dir, &user, GFP_NOWAIT);
return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF;
}
static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir,
intel_wakeref_t handle)
{
struct ref_tracker *user;
user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle;
ref_tracker_free(dir, &user);
}
void intel_ref_tracker_show(struct ref_tracker_dir *dir,
struct drm_printer *p);
struct intel_wakeref_auto { struct intel_wakeref_auto {
struct drm_i915_private *i915; struct drm_i915_private *i915;
struct timer_list timer; struct timer_list timer;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment