Commit c8541203 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge back new material for v4.7.

parents 21f8a99c f47b72a1
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
obj-y += core.o cpu.o obj-y += core.o cpu.o
obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_DEBUG_FS) += debugfs.o obj-$(CONFIG_DEBUG_FS) += debugfs.o
This diff is collapsed.
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/of.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "opp.h" #include "opp.h"
...@@ -119,8 +118,66 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev, ...@@ -119,8 +118,66 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev,
EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */ #endif /* CONFIG_CPU_FREQ */
/* Required only for V1 bindings, as v2 can manage it from DT itself */ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) {
struct device *cpu_dev;
int cpu;
WARN_ON(cpumask_empty(cpumask));
for_each_cpu(cpu, cpumask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
if (of)
dev_pm_opp_of_remove_table(cpu_dev);
else
dev_pm_opp_remove_table(cpu_dev);
}
}
/**
* dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
* @cpumask: cpumask for which OPP table needs to be removed
*
* This removes the OPP tables for CPUs present in the @cpumask.
* This should be used to remove all the OPPs entries associated with
* the cpus in @cpumask.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
{
_dev_pm_opp_cpumask_remove_table(cpumask, false);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
/**
* dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
* @cpu_dev: CPU device for which we do this operation
* @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
*
* This marks OPP table of the @cpu_dev as shared by the CPUs present in
* @cpumask.
*
* Returns -ENODEV if OPP table isn't already present.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
const struct cpumask *cpumask)
{ {
struct opp_device *opp_dev; struct opp_device *opp_dev;
struct opp_table *opp_table; struct opp_table *opp_table;
...@@ -131,7 +188,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) ...@@ -131,7 +188,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
opp_table = _find_opp_table(cpu_dev); opp_table = _find_opp_table(cpu_dev);
if (IS_ERR(opp_table)) { if (IS_ERR(opp_table)) {
ret = -EINVAL; ret = PTR_ERR(opp_table);
goto unlock; goto unlock;
} }
...@@ -152,6 +209,9 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) ...@@ -152,6 +209,9 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
__func__, cpu); __func__, cpu);
continue; continue;
} }
/* Mark opp-table as multiple CPUs are sharing it now */
opp_table->shared_opp = true;
} }
unlock: unlock:
mutex_unlock(&opp_table_lock); mutex_unlock(&opp_table_lock);
...@@ -160,112 +220,47 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) ...@@ -160,112 +220,47 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
#ifdef CONFIG_OF /**
void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
{ * @cpu_dev: CPU device for which we do this operation
struct device *cpu_dev; * @cpumask: cpumask to update with information of sharing CPUs
int cpu; *
* This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
WARN_ON(cpumask_empty(cpumask));
for_each_cpu(cpu, cpumask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
dev_pm_opp_of_remove_table(cpu_dev);
}
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
{
struct device *cpu_dev;
int cpu, ret = 0;
WARN_ON(cpumask_empty(cpumask));
for_each_cpu(cpu, cpumask) {
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
cpu);
continue;
}
ret = dev_pm_opp_of_add_table(cpu_dev);
if (ret) {
pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
__func__, cpu, ret);
/* Free all other OPPs */
dev_pm_opp_of_cpumask_remove_table(cpumask);
break;
}
}
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
/*
* Works only for OPP v2 bindings.
* *
* Returns -ENOENT if operating-points-v2 bindings aren't supported. * Returns -ENODEV if OPP table isn't already present.
*
* Locking: The internal opp_table and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
* to keep the integrity of the internal data structures. Callers should ensure
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/ */
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{ {
struct device_node *np, *tmp_np; struct opp_device *opp_dev;
struct device *tcpu_dev; struct opp_table *opp_table;
int cpu, ret = 0; int ret = 0;
/* Get OPP descriptor node */
np = _of_get_opp_desc_node(cpu_dev);
if (!np) {
dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
return -ENOENT;
}
cpumask_set_cpu(cpu_dev->id, cpumask);
/* OPPs are shared ? */
if (!of_property_read_bool(np, "opp-shared"))
goto put_cpu_node;
for_each_possible_cpu(cpu) {
if (cpu == cpu_dev->id)
continue;
tcpu_dev = get_cpu_device(cpu); mutex_lock(&opp_table_lock);
if (!tcpu_dev) {
dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
__func__, cpu);
ret = -ENODEV;
goto put_cpu_node;
}
/* Get OPP descriptor node */ opp_table = _find_opp_table(cpu_dev);
tmp_np = _of_get_opp_desc_node(tcpu_dev); if (IS_ERR(opp_table)) {
if (!tmp_np) { ret = PTR_ERR(opp_table);
dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", goto unlock;
__func__); }
ret = -ENOENT;
goto put_cpu_node;
}
/* CPUs are sharing opp node */ cpumask_clear(cpumask);
if (np == tmp_np)
cpumask_set_cpu(cpu, cpumask);
of_node_put(tmp_np); if (opp_table->shared_opp) {
list_for_each_entry(opp_dev, &opp_table->dev_list, node)
cpumask_set_cpu(opp_dev->dev->id, cpumask);
} else {
cpumask_set_cpu(cpu_dev->id, cpumask);
} }
put_cpu_node: unlock:
of_node_put(np); mutex_unlock(&opp_table_lock);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
#endif
This diff is collapsed.
...@@ -28,6 +28,8 @@ struct regulator; ...@@ -28,6 +28,8 @@ struct regulator;
/* Lock to allow exclusive modification to the device and opp lists */ /* Lock to allow exclusive modification to the device and opp lists */
extern struct mutex opp_table_lock; extern struct mutex opp_table_lock;
extern struct list_head opp_tables;
/* /*
* Internal data structure organization with the OPP layer library is as * Internal data structure organization with the OPP layer library is as
* follows: * follows:
...@@ -183,6 +185,18 @@ struct opp_table { ...@@ -183,6 +185,18 @@ struct opp_table {
struct opp_table *_find_opp_table(struct device *dev); struct opp_table *_find_opp_table(struct device *dev);
struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
struct device_node *_of_get_opp_desc_node(struct device *dev); struct device_node *_of_get_opp_desc_node(struct device *dev);
void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify);
int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic);
void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
#ifdef CONFIG_OF
void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
#else
static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
#endif
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void opp_debug_remove_one(struct dev_pm_opp *opp); void opp_debug_remove_one(struct dev_pm_opp *opp);
......
...@@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev); ...@@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev);
int dev_pm_opp_set_regulator(struct device *dev, const char *name); int dev_pm_opp_set_regulator(struct device *dev, const char *name);
void dev_pm_opp_put_regulator(struct device *dev); void dev_pm_opp_put_regulator(struct device *dev);
int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
void dev_pm_opp_remove_table(struct device *dev);
void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
#else #else
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{ {
...@@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) ...@@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
unsigned long freq, bool available) unsigned long freq, bool available)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-ENOTSUPP);
} }
static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
unsigned long *freq) unsigned long *freq)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-ENOTSUPP);
} }
static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
unsigned long *freq) unsigned long *freq)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-ENOTSUPP);
} }
static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt) unsigned long u_volt)
{ {
return -EINVAL; return -ENOTSUPP;
} }
static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
...@@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq) ...@@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
struct device *dev) struct device *dev)
{ {
return ERR_PTR(-EINVAL); return ERR_PTR(-ENOTSUPP);
} }
static inline int dev_pm_opp_set_supported_hw(struct device *dev, static inline int dev_pm_opp_set_supported_hw(struct device *dev,
const u32 *versions, const u32 *versions,
unsigned int count) unsigned int count)
{ {
return -EINVAL; return -ENOTSUPP;
} }
static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
{ {
return -EINVAL; return -ENOTSUPP;
} }
static inline void dev_pm_opp_put_prop_name(struct device *dev) {} static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
{ {
return -EINVAL; return -ENOTSUPP;
} }
static inline void dev_pm_opp_put_regulator(struct device *dev) {} static inline void dev_pm_opp_put_regulator(struct device *dev) {}
static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
{
return -ENOTSUPP;
}
static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
{
return -ENOTSUPP;
}
static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{ {
return -EINVAL; return -EINVAL;
} }
static inline void dev_pm_opp_remove_table(struct device *dev)
{
}
static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
{
}
#endif /* CONFIG_PM_OPP */ #endif /* CONFIG_PM_OPP */
#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
int dev_pm_opp_of_add_table(struct device *dev); int dev_pm_opp_of_add_table(struct device *dev);
void dev_pm_opp_of_remove_table(struct device *dev); void dev_pm_opp_of_remove_table(struct device *dev);
int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
#else #else
static inline int dev_pm_opp_of_add_table(struct device *dev) static inline int dev_pm_opp_of_add_table(struct device *dev)
{ {
return -EINVAL; return -ENOTSUPP;
} }
static inline void dev_pm_opp_of_remove_table(struct device *dev) static inline void dev_pm_opp_of_remove_table(struct device *dev)
{ {
} }
static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
{
return -ENOSYS;
}
static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
{ {
return -ENOTSUPP;
} }
static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
{ {
return -ENOSYS;
} }
static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
{ {
return -ENOSYS; return -ENOTSUPP;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment