Commit 656f72bb authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge back earlier PM core material for v5.1.

parents 74fb4486 85945c28
......@@ -427,6 +427,7 @@ __cpu_device_create(struct device *parent, void *drvdata,
dev->parent = parent;
dev->groups = groups;
dev->release = device_create_release;
device_set_pm_not_required(dev);
dev_set_drvdata(dev, drvdata);
retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
......
......@@ -65,10 +65,15 @@ static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce)
if (IS_ERR(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
} else {
clk_prepare(ce->clk);
ce->status = PCE_STATUS_ACQUIRED;
dev_dbg(dev, "Clock %pC con_id %s managed by runtime PM.\n",
ce->clk, ce->con_id);
if (clk_prepare(ce->clk)) {
ce->status = PCE_STATUS_ERROR;
dev_err(dev, "clk_prepare() failed\n");
} else {
ce->status = PCE_STATUS_ACQUIRED;
dev_dbg(dev,
"Clock %pC con_id %s managed by runtime PM.\n",
ce->clk, ce->con_id);
}
}
}
......
......@@ -124,6 +124,10 @@ void device_pm_unlock(void)
*/
void device_pm_add(struct device *dev)
{
/* Skip PM setup/initialization. */
if (device_pm_not_required(dev))
return;
pr_debug("PM: Adding info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
device_pm_check_callbacks(dev);
......@@ -142,6 +146,9 @@ void device_pm_add(struct device *dev)
*/
void device_pm_remove(struct device *dev)
{
if (device_pm_not_required(dev))
return;
pr_debug("PM: Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
complete_all(&dev->power.completion);
......@@ -1741,8 +1748,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->power.direct_complete) {
if (pm_runtime_status_suspended(dev)) {
pm_runtime_disable(dev);
if (pm_runtime_status_suspended(dev))
if (pm_runtime_status_suspended(dev)) {
pm_dev_dbg(dev, state, "direct-complete ");
goto Complete;
}
pm_runtime_enable(dev);
}
......
......@@ -66,20 +66,30 @@ static int rpm_suspend(struct device *dev, int rpmflags);
*/
void update_pm_runtime_accounting(struct device *dev)
{
unsigned long now = jiffies;
unsigned long delta;
u64 now, last, delta;
delta = now - dev->power.accounting_timestamp;
if (dev->power.disable_depth > 0)
return;
last = dev->power.accounting_timestamp;
now = ktime_get_mono_fast_ns();
dev->power.accounting_timestamp = now;
if (dev->power.disable_depth > 0)
/*
* Because ktime_get_mono_fast_ns() is not monotonic during
* timekeeping updates, ensure that 'now' is after the last saved
* timesptamp.
*/
if (now < last)
return;
delta = now - last;
if (dev->power.runtime_status == RPM_SUSPENDED)
dev->power.suspended_jiffies += delta;
dev->power.suspended_time += delta;
else
dev->power.active_jiffies += delta;
dev->power.active_time += delta;
}
static void __update_runtime_status(struct device *dev, enum rpm_status status)
......@@ -88,6 +98,22 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status)
dev->power.runtime_status = status;
}
u64 pm_runtime_suspended_time(struct device *dev)
{
u64 time;
unsigned long flags;
spin_lock_irqsave(&dev->power.lock, flags);
update_pm_runtime_accounting(dev);
time = dev->power.suspended_time;
spin_unlock_irqrestore(&dev->power.lock, flags);
return time;
}
EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
/**
* pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
* @dev: Device to handle.
......@@ -129,24 +155,21 @@ static void pm_runtime_cancel_pending(struct device *dev)
u64 pm_runtime_autosuspend_expiration(struct device *dev)
{
int autosuspend_delay;
u64 last_busy, expires = 0;
u64 now = ktime_get_mono_fast_ns();
u64 expires;
if (!dev->power.use_autosuspend)
goto out;
return 0;
autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
if (autosuspend_delay < 0)
goto out;
last_busy = READ_ONCE(dev->power.last_busy);
return 0;
expires = last_busy + (u64)autosuspend_delay * NSEC_PER_MSEC;
if (expires <= now)
expires = 0; /* Already expired. */
expires = READ_ONCE(dev->power.last_busy);
expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
if (expires > ktime_get_mono_fast_ns())
return expires; /* Expires in the future */
out:
return expires;
return 0;
}
EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
......@@ -1276,6 +1299,9 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
pm_runtime_put_noidle(dev);
}
/* Update time accounting before disabling PM-runtime. */
update_pm_runtime_accounting(dev);
if (!dev->power.disable_depth++)
__pm_runtime_barrier(dev);
......@@ -1294,10 +1320,15 @@ void pm_runtime_enable(struct device *dev)
spin_lock_irqsave(&dev->power.lock, flags);
if (dev->power.disable_depth > 0)
if (dev->power.disable_depth > 0) {
dev->power.disable_depth--;
else
/* About to enable runtime pm, set accounting_timestamp to now */
if (!dev->power.disable_depth)
dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
} else {
dev_warn(dev, "Unbalanced %s!\n", __func__);
}
WARN(!dev->power.disable_depth &&
dev->power.runtime_status == RPM_SUSPENDED &&
......@@ -1494,7 +1525,6 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
dev->power.accounting_timestamp = jiffies;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
......
......@@ -125,9 +125,12 @@ static ssize_t runtime_active_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
u64 tmp;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
ret = sprintf(buf, "%i\n", jiffies_to_msecs(dev->power.active_jiffies));
tmp = dev->power.active_time;
do_div(tmp, NSEC_PER_MSEC);
ret = sprintf(buf, "%llu\n", tmp);
spin_unlock_irq(&dev->power.lock);
return ret;
}
......@@ -138,10 +141,12 @@ static ssize_t runtime_suspended_time_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
u64 tmp;
spin_lock_irq(&dev->power.lock);
update_pm_runtime_accounting(dev);
ret = sprintf(buf, "%i\n",
jiffies_to_msecs(dev->power.suspended_jiffies));
tmp = dev->power.suspended_time;
do_div(tmp, NSEC_PER_MSEC);
ret = sprintf(buf, "%llu\n", tmp);
spin_unlock_irq(&dev->power.lock);
return ret;
}
......@@ -648,6 +653,10 @@ int dpm_sysfs_add(struct device *dev)
{
int rc;
/* No need to create PM sysfs if explicitly disabled. */
if (device_pm_not_required(dev))
return 0;
rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
if (rc)
return rc;
......@@ -727,6 +736,8 @@ void rpm_sysfs_remove(struct device *dev)
void dpm_sysfs_remove(struct device *dev)
{
if (device_pm_not_required(dev))
return;
sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
dev_pm_qos_constraints_destroy(dev);
rpm_sysfs_remove(dev);
......
......@@ -5,6 +5,7 @@
*/
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
#include "i915_drv.h"
......@@ -478,7 +479,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
* counter value.
*/
spin_lock_irqsave(&i915->pmu.lock, flags);
spin_lock(&kdev->power.lock);
/*
* After the above branch intel_runtime_pm_get_if_in_use failed
......@@ -491,16 +491,13 @@ static u64 get_rc6(struct drm_i915_private *i915)
* suspended and if not we cannot do better than report the last
* known RC6 value.
*/
if (kdev->power.runtime_status == RPM_SUSPENDED) {
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
i915->pmu.suspended_jiffies_last =
kdev->power.suspended_jiffies;
if (pm_runtime_status_suspended(kdev)) {
val = pm_runtime_suspended_time(kdev);
val = kdev->power.suspended_jiffies -
i915->pmu.suspended_jiffies_last;
val += jiffies - kdev->power.accounting_timestamp;
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
i915->pmu.suspended_time_last = val;
val = jiffies_to_nsecs(val);
val -= i915->pmu.suspended_time_last;
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
......@@ -510,7 +507,6 @@ static u64 get_rc6(struct drm_i915_private *i915)
val = i915->pmu.sample[__I915_SAMPLE_RC6].cur;
}
spin_unlock(&kdev->power.lock);
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
......
......@@ -97,9 +97,9 @@ struct i915_pmu {
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
/**
* @suspended_jiffies_last: Cached suspend time from PM core.
* @suspended_time_last: Cached suspend time from PM core.
*/
unsigned long suspended_jiffies_last;
u64 suspended_time_last;
/**
* @i915_attr: Memory block holding device attributes.
*/
......
......@@ -1165,6 +1165,16 @@ static inline bool device_async_suspend_enabled(struct device *dev)
return !!dev->power.async_suspend;
}
static inline bool device_pm_not_required(struct device *dev)
{
return dev->power.no_pm;
}
static inline void device_set_pm_not_required(struct device *dev)
{
dev->power.no_pm = true;
}
static inline void dev_pm_syscore_device(struct device *dev, bool val)
{
#ifdef CONFIG_PM_SLEEP
......
......@@ -592,6 +592,7 @@ struct dev_pm_info {
bool is_suspended:1; /* Ditto */
bool is_noirq_suspended:1;
bool is_late_suspended:1;
bool no_pm:1;
bool early_init:1; /* Owned by the PM core */
bool direct_complete:1; /* Owned by the PM core */
u32 driver_flags;
......@@ -633,9 +634,9 @@ struct dev_pm_info {
int runtime_error;
int autosuspend_delay;
u64 last_busy;
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
u64 active_time;
u64 suspended_time;
u64 accounting_timestamp;
#endif
struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */
void (*set_latency_tolerance)(struct device *, s32);
......
......@@ -113,6 +113,8 @@ static inline bool pm_runtime_is_irq_safe(struct device *dev)
return dev->power.irq_safe;
}
extern u64 pm_runtime_suspended_time(struct device *dev);
#else /* !CONFIG_PM */
static inline bool queue_pm_work(struct work_struct *work) { return false; }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment