Commit 4650b279 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branches 'pm-cpuidle' and 'pm-cpufreq'

* pm-cpuidle:
  cpuidle: governor: Add new governors to cpuidle_governors again
  cpuidle: menu: Avoid overflows when computing variance

* pm-cpufreq:
  cpufreq: intel_pstate: Fix up iowait_boost computation
  cpufreq: pxa2xx: remove incorrect __init annotation
  cpufreq: Improve kerneldoc comments for cpufreq_cpu_get/put()
...@@ -206,17 +206,15 @@ unsigned int cpufreq_generic_get(unsigned int cpu) ...@@ -206,17 +206,15 @@ unsigned int cpufreq_generic_get(unsigned int cpu)
EXPORT_SYMBOL_GPL(cpufreq_generic_get); EXPORT_SYMBOL_GPL(cpufreq_generic_get);
/** /**
* cpufreq_cpu_get: returns policy for a cpu and marks it busy. * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
* @cpu: CPU to find the policy for.
* *
* @cpu: cpu to find policy for. * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
* the kobject reference counter of that policy. Return a valid policy on
* success or NULL on failure.
* *
* This returns policy for 'cpu', returns NULL if it doesn't exist. * The policy returned by this function has to be released with the help of
* It also increments the kobject reference count to mark it busy and so would * cpufreq_cpu_put() to balance its kobject reference counter properly.
* require a corresponding call to cpufreq_cpu_put() to decrement it back.
* If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
* freed as that depends on the kobj count.
*
* Return: A valid policy on success, otherwise NULL on failure.
*/ */
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{ {
...@@ -243,12 +241,8 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) ...@@ -243,12 +241,8 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
EXPORT_SYMBOL_GPL(cpufreq_cpu_get); EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
/** /**
* cpufreq_cpu_put: Decrements the usage count of a policy * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
* * @policy: cpufreq policy returned by cpufreq_cpu_get().
* @policy: policy earlier returned by cpufreq_cpu_get().
*
* This decrements the kobject reference count incremented earlier by calling
* cpufreq_cpu_get().
*/ */
void cpufreq_cpu_put(struct cpufreq_policy *policy) void cpufreq_cpu_put(struct cpufreq_policy *policy)
{ {
......
...@@ -1762,7 +1762,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time, ...@@ -1762,7 +1762,7 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
/* Start over if the CPU may have been idle. */ /* Start over if the CPU may have been idle. */
if (delta_ns > TICK_NSEC) { if (delta_ns > TICK_NSEC) {
cpu->iowait_boost = ONE_EIGHTH_FP; cpu->iowait_boost = ONE_EIGHTH_FP;
} else if (cpu->iowait_boost) { } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
cpu->iowait_boost <<= 1; cpu->iowait_boost <<= 1;
if (cpu->iowait_boost > int_tofp(1)) if (cpu->iowait_boost > int_tofp(1))
cpu->iowait_boost = int_tofp(1); cpu->iowait_boost = int_tofp(1);
......
...@@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) ...@@ -143,7 +143,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return ret; return ret;
} }
static void __init pxa_cpufreq_init_voltages(void) static void pxa_cpufreq_init_voltages(void)
{ {
vcc_core = regulator_get(NULL, "vcc_core"); vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) { if (IS_ERR(vcc_core)) {
...@@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) ...@@ -159,7 +159,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return 0; return 0;
} }
static void __init pxa_cpufreq_init_voltages(void) { } static void pxa_cpufreq_init_voltages(void) { }
#endif #endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table, static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
......
...@@ -89,6 +89,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov) ...@@ -89,6 +89,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
mutex_lock(&cpuidle_lock); mutex_lock(&cpuidle_lock);
if (__cpuidle_find_governor(gov->name) == NULL) { if (__cpuidle_find_governor(gov->name) == NULL) {
ret = 0; ret = 0;
list_add_tail(&gov->governor_list, &cpuidle_governors);
if (!cpuidle_curr_governor || if (!cpuidle_curr_governor ||
!strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) || !strncasecmp(param_governor, gov->name, CPUIDLE_NAME_LEN) ||
(cpuidle_curr_governor->rating < gov->rating && (cpuidle_curr_governor->rating < gov->rating &&
......
...@@ -186,7 +186,7 @@ static unsigned int get_typical_interval(struct menu_device *data, ...@@ -186,7 +186,7 @@ static unsigned int get_typical_interval(struct menu_device *data,
unsigned int min, max, thresh, avg; unsigned int min, max, thresh, avg;
uint64_t sum, variance; uint64_t sum, variance;
thresh = UINT_MAX; /* Discard outliers above this value */ thresh = INT_MAX; /* Discard outliers above this value */
again: again:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment