Commit 7a330a54 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

* pm-cpufreq: (60 commits)
  cpufreq: pmac32-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: pmac64-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: maple-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: arm_big_little: remove device tree parsing for cpu nodes
  cpufreq: kirkwood-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: spear-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: highbank-cpufreq: remove device tree parsing for cpu nodes
  cpufreq: cpufreq-cpu0: remove device tree parsing for cpu nodes
  cpufreq: imx6q-cpufreq: remove device tree parsing for cpu nodes
  drivers/bus: arm-cci: avoid parsing DT for cpu device nodes
  ARM: mvebu: remove device tree parsing for cpu nodes
  ARM: topology: remove hwid/MPIDR dependency from cpu_capacity
  of/device: add helper to get cpu device node from logical cpu index
  driver/core: cpu: initialize of_node in cpu's device struture
  ARM: DT/kernel: define ARM specific arch_match_cpu_phys_id
  of: move of_get_cpu_node implementation to DT core library
  powerpc: refactor of_get_cpu_node to support other architectures
  openrisc: remove undefined of_get_cpu_node declaration
  microblaze: remove undefined of_get_cpu_node declaration
  cpufreq: fix bad unlock balance on !CONFIG_SMP
  ...
parents c7878810 09198f8f
...@@ -50,8 +50,6 @@ What shall this struct cpufreq_driver contain? ...@@ -50,8 +50,6 @@ What shall this struct cpufreq_driver contain?
cpufreq_driver.name - The name of this driver. cpufreq_driver.name - The name of this driver.
cpufreq_driver.owner - THIS_MODULE;
cpufreq_driver.init - A pointer to the per-CPU initialization cpufreq_driver.init - A pointer to the per-CPU initialization
function. function.
......
...@@ -169,6 +169,11 @@ void __init arm_dt_init_cpu_maps(void) ...@@ -169,6 +169,11 @@ void __init arm_dt_init_cpu_maps(void)
} }
} }
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
}
/** /**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt_phys: physical address of dt blob * @dt_phys: physical address of dt blob
......
...@@ -74,12 +74,8 @@ struct cpu_efficiency table_efficiency[] = { ...@@ -74,12 +74,8 @@ struct cpu_efficiency table_efficiency[] = {
{NULL, }, {NULL, },
}; };
struct cpu_capacity { unsigned long *__cpu_capacity;
unsigned long hwid; #define cpu_capacity(cpu) __cpu_capacity[cpu]
unsigned long capacity;
};
struct cpu_capacity *cpu_capacity;
unsigned long middle_capacity = 1; unsigned long middle_capacity = 1;
...@@ -100,15 +96,19 @@ static void __init parse_dt_topology(void) ...@@ -100,15 +96,19 @@ static void __init parse_dt_topology(void)
unsigned long capacity = 0; unsigned long capacity = 0;
int alloc_size, cpu = 0; int alloc_size, cpu = 0;
alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity);
cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
while ((cn = of_find_node_by_type(cn, "cpu"))) { for_each_possible_cpu(cpu) {
const u32 *rate, *reg; const u32 *rate;
int len; int len;
if (cpu >= num_possible_cpus()) /* too early to use cpu->of_node */
break; cn = of_get_cpu_node(cpu, NULL);
if (!cn) {
pr_err("missing device node for CPU %d\n", cpu);
continue;
}
for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
if (of_device_is_compatible(cn, cpu_eff->compatible)) if (of_device_is_compatible(cn, cpu_eff->compatible))
...@@ -124,12 +124,6 @@ static void __init parse_dt_topology(void) ...@@ -124,12 +124,6 @@ static void __init parse_dt_topology(void)
continue; continue;
} }
reg = of_get_property(cn, "reg", &len);
if (!reg || len != 4) {
pr_err("%s missing reg property\n", cn->full_name);
continue;
}
capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
/* Save min capacity of the system */ /* Save min capacity of the system */
...@@ -140,13 +134,9 @@ static void __init parse_dt_topology(void) ...@@ -140,13 +134,9 @@ static void __init parse_dt_topology(void)
if (capacity > max_capacity) if (capacity > max_capacity)
max_capacity = capacity; max_capacity = capacity;
cpu_capacity[cpu].capacity = capacity; cpu_capacity(cpu) = capacity;
cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
} }
if (cpu < num_possible_cpus())
cpu_capacity[cpu].hwid = (unsigned long)(-1);
/* If min and max capacities are equals, we bypass the update of the /* If min and max capacities are equals, we bypass the update of the
* cpu_scale because all CPUs have the same capacity. Otherwise, we * cpu_scale because all CPUs have the same capacity. Otherwise, we
* compute a middle_capacity factor that will ensure that the capacity * compute a middle_capacity factor that will ensure that the capacity
...@@ -154,9 +144,7 @@ static void __init parse_dt_topology(void) ...@@ -154,9 +144,7 @@ static void __init parse_dt_topology(void)
* SCHED_POWER_SCALE, which is the default value, but with the * SCHED_POWER_SCALE, which is the default value, but with the
* constraint explained near table_efficiency[]. * constraint explained near table_efficiency[].
*/ */
if (min_capacity == max_capacity) if (4*max_capacity < (3*(max_capacity + min_capacity)))
cpu_capacity[0].hwid = (unsigned long)(-1);
else if (4*max_capacity < (3*(max_capacity + min_capacity)))
middle_capacity = (min_capacity + max_capacity) middle_capacity = (min_capacity + max_capacity)
>> (SCHED_POWER_SHIFT+1); >> (SCHED_POWER_SHIFT+1);
else else
...@@ -170,23 +158,12 @@ static void __init parse_dt_topology(void) ...@@ -170,23 +158,12 @@ static void __init parse_dt_topology(void)
* boot. The update of all CPUs is in O(n^2) for heteregeneous system but the * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
* function returns directly for SMP system. * function returns directly for SMP system.
*/ */
void update_cpu_power(unsigned int cpu, unsigned long hwid) void update_cpu_power(unsigned int cpu)
{ {
unsigned int idx = 0; if (!cpu_capacity(cpu))
/* look for the cpu's hwid in the cpu capacity table */
for (idx = 0; idx < num_possible_cpus(); idx++) {
if (cpu_capacity[idx].hwid == hwid)
break;
if (cpu_capacity[idx].hwid == -1)
return;
}
if (idx == num_possible_cpus())
return; return;
set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
printk(KERN_INFO "CPU%u: update cpu_power %lu\n", printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
cpu, arch_scale_freq_power(NULL, cpu)); cpu, arch_scale_freq_power(NULL, cpu));
...@@ -194,7 +171,7 @@ void update_cpu_power(unsigned int cpu, unsigned long hwid) ...@@ -194,7 +171,7 @@ void update_cpu_power(unsigned int cpu, unsigned long hwid)
#else #else
static inline void parse_dt_topology(void) {} static inline void parse_dt_topology(void) {}
static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} static inline void update_cpu_power(unsigned int cpuid) {}
#endif #endif
/* /*
...@@ -281,7 +258,7 @@ void store_cpu_topology(unsigned int cpuid) ...@@ -281,7 +258,7 @@ void store_cpu_topology(unsigned int cpuid)
update_siblings_masks(cpuid); update_siblings_masks(cpuid);
update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); update_cpu_power(cpuid);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id, cpuid, cpu_topology[cpuid].thread_id,
......
...@@ -254,13 +254,12 @@ static void __init imx6q_opp_init(struct device *cpu_dev) ...@@ -254,13 +254,12 @@ static void __init imx6q_opp_init(struct device *cpu_dev)
{ {
struct device_node *np; struct device_node *np;
np = of_find_node_by_path("/cpus/cpu@0"); np = of_node_get(cpu_dev->of_node);
if (!np) { if (!np) {
pr_warn("failed to find cpu0 node\n"); pr_warn("failed to find cpu0 node\n");
return; return;
} }
cpu_dev->of_node = np;
if (of_init_opp_table(cpu_dev)) { if (of_init_opp_table(cpu_dev)) {
pr_warn("failed to init OPP table\n"); pr_warn("failed to init OPP table\n");
goto put_node; goto put_node;
......
...@@ -29,46 +29,41 @@ ...@@ -29,46 +29,41 @@
#include "pmsu.h" #include "pmsu.h"
#include "coherency.h" #include "coherency.h"
static struct clk *__init get_cpu_clk(int cpu)
{
struct clk *cpu_clk;
struct device_node *np = of_get_cpu_node(cpu, NULL);
if (WARN(!np, "missing cpu node\n"))
return NULL;
cpu_clk = of_clk_get(np, 0);
if (WARN_ON(IS_ERR(cpu_clk)))
return NULL;
return cpu_clk;
}
void __init set_secondary_cpus_clock(void) void __init set_secondary_cpus_clock(void)
{ {
int thiscpu; int thiscpu, cpu;
unsigned long rate; unsigned long rate;
struct clk *cpu_clk = NULL; struct clk *cpu_clk;
struct device_node *np = NULL;
thiscpu = smp_processor_id(); thiscpu = smp_processor_id();
for_each_node_by_type(np, "cpu") { cpu_clk = get_cpu_clk(thiscpu);
int err; if (!cpu_clk)
int cpu;
err = of_property_read_u32(np, "reg", &cpu);
if (WARN_ON(err))
return;
if (cpu == thiscpu) {
cpu_clk = of_clk_get(np, 0);
break;
}
}
if (WARN_ON(IS_ERR(cpu_clk)))
return; return;
clk_prepare_enable(cpu_clk); clk_prepare_enable(cpu_clk);
rate = clk_get_rate(cpu_clk); rate = clk_get_rate(cpu_clk);
/* set all the other CPU clk to the same rate than the boot CPU */ /* set all the other CPU clk to the same rate than the boot CPU */
for_each_node_by_type(np, "cpu") { for_each_possible_cpu(cpu) {
int err; if (cpu == thiscpu)
int cpu; continue;
cpu_clk = get_cpu_clk(cpu);
err = of_property_read_u32(np, "reg", &cpu); if (!cpu_clk)
if (WARN_ON(err))
return; return;
if (cpu != thiscpu) {
cpu_clk = of_clk_get(np, 0);
clk_set_rate(cpu_clk, rate); clk_set_rate(cpu_clk, rate);
} }
}
} }
static void armada_xp_secondary_init(unsigned int cpu) static void armada_xp_secondary_init(unsigned int cpu)
......
...@@ -50,9 +50,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, ...@@ -50,9 +50,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void); extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, ...@@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void); extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
/* Get the MAC address */ /* Get the MAC address */
extern const void *of_get_mac_address(struct device_node *np); extern const void *of_get_mac_address(struct device_node *np);
......
...@@ -43,9 +43,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, ...@@ -43,9 +43,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void); extern void kdump_move_device_tree(void);
/* CPU OF node matching */
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
/* cache lookup */ /* cache lookup */
struct device_node *of_find_next_cache_node(struct device_node *np); struct device_node *of_find_next_cache_node(struct device_node *np);
......
...@@ -865,49 +865,10 @@ static int __init prom_reconfig_setup(void) ...@@ -865,49 +865,10 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup); __initcall(prom_reconfig_setup);
#endif #endif
/* Find the device node for a given logical cpu number, also returns the cpu bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
* local thread number (index in ibm,interrupt-server#s) if relevant and
* asked for (non NULL)
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{ {
int hardid; return (int)phys_id == get_hard_smp_processor_id(cpu);
struct device_node *np;
hardid = get_hard_smp_processor_id(cpu);
for_each_node_by_type(np, "cpu") {
const u32 *intserv;
unsigned int plen, t;
/* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
* fallback to "reg" property and assume no threads
*/
intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
&plen);
if (intserv == NULL) {
const u32 *reg = of_get_property(np, "reg", NULL);
if (reg == NULL)
continue;
if (*reg == hardid) {
if (thread)
*thread = 0;
return np;
}
} else {
plen /= sizeof(u32);
for (t = 0; t < plen; t++) {
if (hardid == intserv[t]) {
if (thread)
*thread = t;
return np;
}
}
}
}
return NULL;
} }
EXPORT_SYMBOL(of_get_cpu_node);
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG) #if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob; static struct debugfs_blob_wrapper flat_dt_blob;
......
...@@ -942,35 +942,6 @@ extern int set_tsc_mode(unsigned int val); ...@@ -942,35 +942,6 @@ extern int set_tsc_mode(unsigned int val);
extern u16 amd_get_nb_id(int cpu); extern u16 amd_get_nb_id(int cpu);
struct aperfmperf {
u64 aperf, mperf;
};
static inline void get_aperfmperf(struct aperfmperf *am)
{
WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
rdmsrl(MSR_IA32_APERF, am->aperf);
rdmsrl(MSR_IA32_MPERF, am->mperf);
}
#define APERFMPERF_SHIFT 10
static inline
unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
struct aperfmperf *new)
{
u64 aperf = new->aperf - old->aperf;
u64 mperf = new->mperf - old->mperf;
unsigned long ratio = aperf;
mperf >>= APERFMPERF_SHIFT;
if (mperf)
ratio = div64_u64(aperf, mperf);
return ratio;
}
extern unsigned long arch_align_stack(unsigned long sp); extern unsigned long arch_align_stack(unsigned long sp);
extern void free_init_pages(char *what, unsigned long begin, unsigned long end); extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/of.h>
#include "base.h" #include "base.h"
...@@ -289,6 +290,7 @@ int register_cpu(struct cpu *cpu, int num) ...@@ -289,6 +290,7 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.release = cpu_device_release; cpu->dev.release = cpu_device_release;
cpu->dev.offline_disabled = !cpu->hotpluggable; cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num); cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
cpu->dev.bus->uevent = arch_cpu_uevent; cpu->dev.bus->uevent = arch_cpu_uevent;
#endif #endif
......
...@@ -122,17 +122,8 @@ EXPORT_SYMBOL_GPL(cci_ace_get_port); ...@@ -122,17 +122,8 @@ EXPORT_SYMBOL_GPL(cci_ace_get_port);
static void __init cci_ace_init_ports(void) static void __init cci_ace_init_ports(void)
{ {
int port, ac, cpu; int port, cpu;
u64 hwid; struct device_node *cpun;
const u32 *cell;
struct device_node *cpun, *cpus;
cpus = of_find_node_by_path("/cpus");
if (WARN(!cpus, "Missing cpus node, bailing out\n"))
return;
if (WARN_ON(of_property_read_u32(cpus, "#address-cells", &ac)))
ac = of_n_addr_cells(cpus);
/* /*
* Port index look-up speeds up the function disabling ports by CPU, * Port index look-up speeds up the function disabling ports by CPU,
...@@ -141,18 +132,13 @@ static void __init cci_ace_init_ports(void) ...@@ -141,18 +132,13 @@ static void __init cci_ace_init_ports(void)
* The stashed index array is initialized for all possible CPUs * The stashed index array is initialized for all possible CPUs
* at probe time. * at probe time.
*/ */
for_each_child_of_node(cpus, cpun) { for_each_possible_cpu(cpu) {
if (of_node_cmp(cpun->type, "cpu")) /* too early to use cpu->of_node */
continue; cpun = of_get_cpu_node(cpu, NULL);
cell = of_get_property(cpun, "reg", NULL);
if (WARN(!cell, "%s: missing reg property\n", cpun->full_name))
continue;
hwid = of_read_number(cell, ac);
cpu = get_logical_index(hwid & MPIDR_HWID_BITMASK);
if (cpu < 0 || !cpu_possible(cpu)) if (WARN(!cpun, "Missing cpu device node\n"))
continue; continue;
port = __cci_ace_get_port(cpun, ACE_PORT); port = __cci_ace_get_port(cpun, ACE_PORT);
if (port < 0) if (port < 0)
continue; continue;
......
...@@ -17,37 +17,47 @@ config ARM_DT_BL_CPUFREQ ...@@ -17,37 +17,47 @@ config ARM_DT_BL_CPUFREQ
big.LITTLE platform. This gets frequency tables from DT. big.LITTLE platform. This gets frequency tables from DT.
config ARM_EXYNOS_CPUFREQ config ARM_EXYNOS_CPUFREQ
bool "SAMSUNG EXYNOS SoCs" bool
depends on ARCH_EXYNOS
select CPU_FREQ_TABLE select CPU_FREQ_TABLE
default y
help
This adds the CPUFreq driver common part for Samsung
EXYNOS SoCs.
If in doubt, say N.
config ARM_EXYNOS4210_CPUFREQ config ARM_EXYNOS4210_CPUFREQ
def_bool CPU_EXYNOS4210 bool "SAMSUNG EXYNOS4210"
depends on CPU_EXYNOS4210
default y
select ARM_EXYNOS_CPUFREQ
help help
This adds the CPUFreq driver for Samsung EXYNOS4210 This adds the CPUFreq driver for Samsung EXYNOS4210
SoC (S5PV310 or S5PC210). SoC (S5PV310 or S5PC210).
If in doubt, say N.
config ARM_EXYNOS4X12_CPUFREQ config ARM_EXYNOS4X12_CPUFREQ
def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412) bool "SAMSUNG EXYNOS4x12"
depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
default y
select ARM_EXYNOS_CPUFREQ
help help
This adds the CPUFreq driver for Samsung EXYNOS4X12 This adds the CPUFreq driver for Samsung EXYNOS4X12
SoC (EXYNOS4212 or EXYNOS4412). SoC (EXYNOS4212 or EXYNOS4412).
If in doubt, say N.
config ARM_EXYNOS5250_CPUFREQ config ARM_EXYNOS5250_CPUFREQ
def_bool SOC_EXYNOS5250 bool "SAMSUNG EXYNOS5250"
depends on SOC_EXYNOS5250
default y
select ARM_EXYNOS_CPUFREQ
help help
This adds the CPUFreq driver for Samsung EXYNOS5250 This adds the CPUFreq driver for Samsung EXYNOS5250
SoC. SoC.
If in doubt, say N.
config ARM_EXYNOS5440_CPUFREQ config ARM_EXYNOS5440_CPUFREQ
def_bool SOC_EXYNOS5440 bool "SAMSUNG EXYNOS5440"
depends on SOC_EXYNOS5440
depends on HAVE_CLK && PM_OPP && OF depends on HAVE_CLK && PM_OPP && OF
default y
select CPU_FREQ_TABLE select CPU_FREQ_TABLE
help help
This adds the CPUFreq driver for Samsung EXYNOS5440 This adds the CPUFreq driver for Samsung EXYNOS5440
...@@ -55,6 +65,8 @@ config ARM_EXYNOS5440_CPUFREQ ...@@ -55,6 +65,8 @@ config ARM_EXYNOS5440_CPUFREQ
different than previous exynos controllers so not using different than previous exynos controllers so not using
the common exynos framework. the common exynos framework.
If in doubt, say N.
config ARM_HIGHBANK_CPUFREQ config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based" tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK depends on ARCH_HIGHBANK
......
...@@ -23,7 +23,7 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o ...@@ -23,7 +23,7 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers. # powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod. # speedstep-* is preferred over p4-clockmod.
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
......
...@@ -45,7 +45,6 @@ ...@@ -45,7 +45,6 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include "mperf.h"
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver"); MODULE_DESCRIPTION("ACPI Processor P-States Driver");
...@@ -198,7 +197,7 @@ static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) ...@@ -198,7 +197,7 @@ static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", boost_enabled); return sprintf(buf, "%u\n", boost_enabled);
} }
static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb); cpufreq_freq_attr_rw(cpb);
#endif #endif
static int check_est_cpu(unsigned int cpuid) static int check_est_cpu(unsigned int cpuid)
...@@ -710,7 +709,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -710,7 +709,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
return blacklisted; return blacklisted;
#endif #endif
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return -ENOMEM; return -ENOMEM;
...@@ -800,7 +799,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -800,7 +799,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg; goto err_unreg;
} }
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * data->freq_table = kmalloc(sizeof(*data->freq_table) *
(perf->state_count+1), GFP_KERNEL); (perf->state_count+1), GFP_KERNEL);
if (!data->freq_table) { if (!data->freq_table) {
result = -ENOMEM; result = -ENOMEM;
...@@ -861,10 +860,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -861,10 +860,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* notify BIOS that we exist */ /* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE); acpi_processor_notify_smm(THIS_MODULE);
/* Check for APERF/MPERF support in hardware */
if (boot_cpu_has(X86_FEATURE_APERFMPERF))
acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
pr_debug("CPU%u - ACPI performance management activated.\n", cpu); pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++) for (i = 0; i < perf->state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
...@@ -941,7 +936,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = { ...@@ -941,7 +936,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.exit = acpi_cpufreq_cpu_exit, .exit = acpi_cpufreq_cpu_exit,
.resume = acpi_cpufreq_resume, .resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq", .name = "acpi-cpufreq",
.owner = THIS_MODULE,
.attr = acpi_cpufreq_attr, .attr = acpi_cpufreq_attr,
}; };
......
...@@ -19,12 +19,11 @@ ...@@ -19,12 +19,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of_device.h>
#include <linux/opp.h> #include <linux/opp.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -34,27 +33,13 @@ ...@@ -34,27 +33,13 @@
/* get cpu node with valid operating-points */ /* get cpu node with valid operating-points */
static struct device_node *get_cpu_node_with_valid_op(int cpu) static struct device_node *get_cpu_node_with_valid_op(int cpu)
{ {
struct device_node *np = NULL, *parent; struct device_node *np = of_cpu_device_node_get(cpu);
int count = 0;
parent = of_find_node_by_path("/cpus");
if (!parent) {
pr_err("failed to find OF /cpus\n");
return NULL;
}
for_each_child_of_node(parent, np) {
if (count++ != cpu)
continue;
if (!of_get_property(np, "operating-points", NULL)) { if (!of_get_property(np, "operating-points", NULL)) {
of_node_put(np); of_node_put(np);
np = NULL; np = NULL;
} }
break;
}
of_node_put(parent);
return np; return np;
} }
...@@ -63,11 +48,12 @@ static int dt_init_opp_table(struct device *cpu_dev) ...@@ -63,11 +48,12 @@ static int dt_init_opp_table(struct device *cpu_dev)
struct device_node *np; struct device_node *np;
int ret; int ret;
np = get_cpu_node_with_valid_op(cpu_dev->id); np = of_node_get(cpu_dev->of_node);
if (!np) if (!np) {
return -ENODATA; pr_err("failed to find cpu%d node\n", cpu_dev->id);
return -ENOENT;
}
cpu_dev->of_node = np;
ret = of_init_opp_table(cpu_dev); ret = of_init_opp_table(cpu_dev);
of_node_put(np); of_node_put(np);
...@@ -79,9 +65,11 @@ static int dt_get_transition_latency(struct device *cpu_dev) ...@@ -79,9 +65,11 @@ static int dt_get_transition_latency(struct device *cpu_dev)
struct device_node *np; struct device_node *np;
u32 transition_latency = CPUFREQ_ETERNAL; u32 transition_latency = CPUFREQ_ETERNAL;
np = get_cpu_node_with_valid_op(cpu_dev->id); np = of_node_get(cpu_dev->of_node);
if (!np) if (!np) {
pr_info("Failed to find cpu node. Use CPUFREQ_ETERNAL transition latency\n");
return CPUFREQ_ETERNAL; return CPUFREQ_ETERNAL;
}
of_property_read_u32(np, "clock-latency", &transition_latency); of_property_read_u32(np, "clock-latency", &transition_latency);
of_node_put(np); of_node_put(np);
......
...@@ -108,7 +108,6 @@ static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -108,7 +108,6 @@ static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
static struct cpufreq_driver at32_driver = { static struct cpufreq_driver at32_driver = {
.name = "at32ap", .name = "at32ap",
.owner = THIS_MODULE,
.init = at32_cpufreq_driver_init, .init = at32_cpufreq_driver_init,
.verify = at32_verify_speed, .verify = at32_verify_speed,
.target = at32_set_target, .target = at32_set_target,
......
...@@ -225,7 +225,6 @@ static struct cpufreq_driver bfin_driver = { ...@@ -225,7 +225,6 @@ static struct cpufreq_driver bfin_driver = {
.get = bfin_getfreq_khz, .get = bfin_getfreq_khz,
.init = __bfin_cpu_init, .init = __bfin_cpu_init,
.name = "bfin cpufreq", .name = "bfin cpufreq",
.owner = THIS_MODULE,
.attr = bfin_freq_attr, .attr = bfin_freq_attr,
}; };
......
...@@ -69,7 +69,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, ...@@ -69,7 +69,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
if (cpu_reg) { if (!IS_ERR(cpu_reg)) {
rcu_read_lock(); rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz); opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) { if (IS_ERR(opp)) {
...@@ -90,7 +90,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy, ...@@ -90,7 +90,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
freqs.new / 1000, volt ? volt / 1000 : -1); freqs.new / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */ /* scaling up? scale voltage before frequency */
if (cpu_reg && freqs.new > freqs.old) { if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol); ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) { if (ret) {
pr_err("failed to scale voltage up: %d\n", ret); pr_err("failed to scale voltage up: %d\n", ret);
...@@ -102,14 +102,14 @@ static int cpu0_set_target(struct cpufreq_policy *policy, ...@@ -102,14 +102,14 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
ret = clk_set_rate(cpu_clk, freq_exact); ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) { if (ret) {
pr_err("failed to set clock rate: %d\n", ret); pr_err("failed to set clock rate: %d\n", ret);
if (cpu_reg) if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol); regulator_set_voltage_tol(cpu_reg, volt_old, tol);
freqs.new = freqs.old; freqs.new = freqs.old;
goto post_notify; goto post_notify;
} }
/* scaling down? scale voltage after frequency */ /* scaling down? scale voltage after frequency */
if (cpu_reg && freqs.new < freqs.old) { if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol); ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) { if (ret) {
pr_err("failed to scale voltage down: %d\n", ret); pr_err("failed to scale voltage down: %d\n", ret);
...@@ -174,29 +174,17 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { ...@@ -174,29 +174,17 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
static int cpu0_cpufreq_probe(struct platform_device *pdev) static int cpu0_cpufreq_probe(struct platform_device *pdev)
{ {
struct device_node *np, *parent; struct device_node *np;
int ret; int ret;
parent = of_find_node_by_path("/cpus"); cpu_dev = &pdev->dev;
if (!parent) {
pr_err("failed to find OF /cpus\n");
return -ENOENT;
}
for_each_child_of_node(parent, np) {
if (of_get_property(np, "operating-points", NULL))
break;
}
np = of_node_get(cpu_dev->of_node);
if (!np) { if (!np) {
pr_err("failed to find cpu0 node\n"); pr_err("failed to find cpu0 node\n");
ret = -ENOENT; return -ENOENT;
goto out_put_parent;
} }
cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) { if (IS_ERR(cpu_reg)) {
/* /*
...@@ -210,7 +198,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) ...@@ -210,7 +198,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
} }
pr_warn("failed to get cpu0 regulator: %ld\n", pr_warn("failed to get cpu0 regulator: %ld\n",
PTR_ERR(cpu_reg)); PTR_ERR(cpu_reg));
cpu_reg = NULL;
} }
cpu_clk = devm_clk_get(cpu_dev, NULL); cpu_clk = devm_clk_get(cpu_dev, NULL);
...@@ -269,15 +256,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) ...@@ -269,15 +256,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
} }
of_node_put(np); of_node_put(np);
of_node_put(parent);
return 0; return 0;
out_free_table: out_free_table:
opp_free_cpufreq_table(cpu_dev, &freq_table); opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node: out_put_node:
of_node_put(np); of_node_put(np);
out_put_parent:
of_node_put(parent);
return ret; return ret;
} }
......
...@@ -379,7 +379,6 @@ static struct cpufreq_driver nforce2_driver = { ...@@ -379,7 +379,6 @@ static struct cpufreq_driver nforce2_driver = {
.get = nforce2_get, .get = nforce2_get,
.init = nforce2_cpu_init, .init = nforce2_cpu_init,
.exit = nforce2_cpu_exit, .exit = nforce2_cpu_exit,
.owner = THIS_MODULE,
}; };
#ifdef MODULE #ifdef MODULE
......
...@@ -17,24 +17,17 @@ ...@@ -17,24 +17,17 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cputime.h> #include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/tick.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/slab.h> #include <linux/init.h>
#include <linux/cpu.h> #include <linux/kernel_stat.h>
#include <linux/completion.h> #include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/tick.h>
#include <trace/events/power.h> #include <trace/events/power.h>
/** /**
...@@ -44,8 +37,10 @@ ...@@ -44,8 +37,10 @@
*/ */
static struct cpufreq_driver *cpufreq_driver; static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
static DEFINE_RWLOCK(cpufreq_driver_lock); static DEFINE_RWLOCK(cpufreq_driver_lock);
static DEFINE_MUTEX(cpufreq_governor_lock); static DEFINE_MUTEX(cpufreq_governor_lock);
static LIST_HEAD(cpufreq_policy_list);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */ /* This one keeps track of the previously set governor of a removed CPU */
...@@ -69,15 +64,14 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); ...@@ -69,15 +64,14 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
* - Lock should not be held across * - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP); * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/ */
static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \ #define lock_policy_rwsem(mode, cpu) \
static int lock_policy_rwsem_##mode(int cpu) \ static int lock_policy_rwsem_##mode(int cpu) \
{ \ { \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
BUG_ON(policy_cpu == -1); \ BUG_ON(!policy); \
down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
\ \
return 0; \ return 0; \
} }
...@@ -88,14 +82,20 @@ lock_policy_rwsem(write, cpu); ...@@ -88,14 +82,20 @@ lock_policy_rwsem(write, cpu);
#define unlock_policy_rwsem(mode, cpu) \ #define unlock_policy_rwsem(mode, cpu) \
static void unlock_policy_rwsem_##mode(int cpu) \ static void unlock_policy_rwsem_##mode(int cpu) \
{ \ { \
int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
BUG_ON(policy_cpu == -1); \ BUG_ON(!policy); \
up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
} }
unlock_policy_rwsem(read, cpu); unlock_policy_rwsem(read, cpu);
unlock_policy_rwsem(write, cpu); unlock_policy_rwsem(write, cpu);
/*
* rwsem to guarantee that cpufreq driver module doesn't unload during critical
* sections
*/
static DECLARE_RWSEM(cpufreq_rwsem);
/* internal prototypes */ /* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy, static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event); unsigned int event);
...@@ -183,78 +183,46 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) ...@@ -183,78 +183,46 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
} }
EXPORT_SYMBOL_GPL(get_cpu_idle_time); EXPORT_SYMBOL_GPL(get_cpu_idle_time);
static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{ {
struct cpufreq_policy *data; struct cpufreq_policy *policy = NULL;
unsigned long flags; unsigned long flags;
if (cpu >= nr_cpu_ids) if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
goto err_out; return NULL;
if (!down_read_trylock(&cpufreq_rwsem))
return NULL;
/* get the cpufreq driver */ /* get the cpufreq driver */
read_lock_irqsave(&cpufreq_driver_lock, flags); read_lock_irqsave(&cpufreq_driver_lock, flags);
if (!cpufreq_driver) if (cpufreq_driver) {
goto err_out_unlock;
if (!try_module_get(cpufreq_driver->owner))
goto err_out_unlock;
/* get the CPU */ /* get the CPU */
data = per_cpu(cpufreq_cpu_data, cpu); policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy)
if (!data) kobject_get(&policy->kobj);
goto err_out_put_module; }
if (!sysfs && !kobject_get(&data->kobj))
goto err_out_put_module;
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
return data;
err_out_put_module:
module_put(cpufreq_driver->owner);
err_out_unlock:
read_unlock_irqrestore(&cpufreq_driver_lock, flags); read_unlock_irqrestore(&cpufreq_driver_lock, flags);
err_out:
return NULL;
}
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) if (!policy)
{ up_read(&cpufreq_rwsem);
if (cpufreq_disabled())
return NULL;
return __cpufreq_cpu_get(cpu, false); return policy;
} }
EXPORT_SYMBOL_GPL(cpufreq_cpu_get); EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu) void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
return __cpufreq_cpu_get(cpu, true);
}
static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
{
if (!sysfs)
kobject_put(&data->kobj);
module_put(cpufreq_driver->owner);
}
void cpufreq_cpu_put(struct cpufreq_policy *data)
{ {
if (cpufreq_disabled()) if (cpufreq_disabled())
return; return;
__cpufreq_cpu_put(data, false); kobject_put(&policy->kobj);
up_read(&cpufreq_rwsem);
} }
EXPORT_SYMBOL_GPL(cpufreq_cpu_put); EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
{
__cpufreq_cpu_put(data, true);
}
/********************************************************************* /*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES * * EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/ *********************************************************************/
...@@ -459,8 +427,8 @@ show_one(scaling_min_freq, min); ...@@ -459,8 +427,8 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max); show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur); show_one(scaling_cur_freq, cur);
static int __cpufreq_set_policy(struct cpufreq_policy *data, static int __cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *policy); struct cpufreq_policy *new_policy);
/** /**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
...@@ -699,12 +667,12 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) ...@@ -699,12 +667,12 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct cpufreq_policy *policy = to_policy(kobj); struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr); struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy) if (!down_read_trylock(&cpufreq_rwsem))
goto no_policy; goto exit;
if (lock_policy_rwsem_read(policy->cpu) < 0) if (lock_policy_rwsem_read(policy->cpu) < 0)
goto fail; goto up_read;
if (fattr->show) if (fattr->show)
ret = fattr->show(policy, buf); ret = fattr->show(policy, buf);
...@@ -712,9 +680,10 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) ...@@ -712,9 +680,10 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ret = -EIO; ret = -EIO;
unlock_policy_rwsem_read(policy->cpu); unlock_policy_rwsem_read(policy->cpu);
fail:
cpufreq_cpu_put_sysfs(policy); up_read:
no_policy: up_read(&cpufreq_rwsem);
exit:
return ret; return ret;
} }
...@@ -724,12 +693,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, ...@@ -724,12 +693,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct cpufreq_policy *policy = to_policy(kobj); struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr); struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL; ssize_t ret = -EINVAL;
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy) if (!down_read_trylock(&cpufreq_rwsem))
goto no_policy; goto exit;
if (lock_policy_rwsem_write(policy->cpu) < 0) if (lock_policy_rwsem_write(policy->cpu) < 0)
goto fail; goto up_read;
if (fattr->store) if (fattr->store)
ret = fattr->store(policy, buf, count); ret = fattr->store(policy, buf, count);
...@@ -737,9 +706,10 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr, ...@@ -737,9 +706,10 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
ret = -EIO; ret = -EIO;
unlock_policy_rwsem_write(policy->cpu); unlock_policy_rwsem_write(policy->cpu);
fail:
cpufreq_cpu_put_sysfs(policy); up_read:
no_policy: up_read(&cpufreq_rwsem);
exit:
return ret; return ret;
} }
...@@ -805,41 +775,32 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr) ...@@ -805,41 +775,32 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr)
EXPORT_SYMBOL(cpufreq_sysfs_remove_file); EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
/* symlink affected CPUs */ /* symlink affected CPUs */
static int cpufreq_add_dev_symlink(unsigned int cpu, static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
struct cpufreq_policy *policy)
{ {
unsigned int j; unsigned int j;
int ret = 0; int ret = 0;
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
struct device *cpu_dev; struct device *cpu_dev;
if (j == cpu) if (j == policy->cpu)
continue; continue;
pr_debug("CPU %u already managed, adding link\n", j); pr_debug("Adding link for CPU: %u\n", j);
managed_policy = cpufreq_cpu_get(cpu);
cpu_dev = get_cpu_device(j); cpu_dev = get_cpu_device(j);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq"); "cpufreq");
if (ret) { if (ret)
cpufreq_cpu_put(managed_policy); break;
return ret;
}
} }
return ret; return ret;
} }
static int cpufreq_add_dev_interface(unsigned int cpu, static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
struct cpufreq_policy *policy,
struct device *dev) struct device *dev)
{ {
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr; struct freq_attr **drv_attr;
unsigned long flags;
int ret = 0; int ret = 0;
unsigned int j;
/* prepare interface data */ /* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
...@@ -871,18 +832,24 @@ static int cpufreq_add_dev_interface(unsigned int cpu, ...@@ -871,18 +832,24 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
goto err_out_kobj_put; goto err_out_kobj_put;
} }
write_lock_irqsave(&cpufreq_driver_lock, flags); ret = cpufreq_add_dev_symlink(policy);
for_each_cpu(j, policy->cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
}
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
ret = cpufreq_add_dev_symlink(cpu, policy);
if (ret) if (ret)
goto err_out_kobj_put; goto err_out_kobj_put;
memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); return ret;
err_out_kobj_put:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
return ret;
}
static void cpufreq_init_policy(struct cpufreq_policy *policy)
{
struct cpufreq_policy new_policy;
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
/* assure that the starting sequence is run in __cpufreq_set_policy */ /* assure that the starting sequence is run in __cpufreq_set_policy */
policy->governor = NULL; policy->governor = NULL;
...@@ -896,72 +863,106 @@ static int cpufreq_add_dev_interface(unsigned int cpu, ...@@ -896,72 +863,106 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(policy); cpufreq_driver->exit(policy);
} }
return ret;
err_out_kobj_put:
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
return ret;
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
struct device *dev) unsigned int cpu, struct device *dev,
bool frozen)
{ {
struct cpufreq_policy *policy;
int ret = 0, has_target = !!cpufreq_driver->target; int ret = 0, has_target = !!cpufreq_driver->target;
unsigned long flags; unsigned long flags;
policy = cpufreq_cpu_get(sibling); if (has_target) {
WARN_ON(!policy); ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
if (has_target) pr_err("%s: Failed to stop governor\n", __func__);
__cpufreq_governor(policy, CPUFREQ_GOV_STOP); return ret;
}
}
lock_policy_rwsem_write(sibling); lock_policy_rwsem_write(policy->cpu);
write_lock_irqsave(&cpufreq_driver_lock, flags); write_lock_irqsave(&cpufreq_driver_lock, flags);
cpumask_set_cpu(cpu, policy->cpus); cpumask_set_cpu(cpu, policy->cpus);
per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
per_cpu(cpufreq_cpu_data, cpu) = policy; per_cpu(cpufreq_cpu_data, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(sibling); unlock_policy_rwsem_write(policy->cpu);
if (has_target) { if (has_target) {
__cpufreq_governor(policy, CPUFREQ_GOV_START); if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
__cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
pr_err("%s: Failed to start governor\n", __func__);
return ret;
}
} }
/* Don't touch sysfs links during light-weight init */
if (!frozen)
ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
if (ret) {
cpufreq_cpu_put(policy);
return ret;
}
return 0; return ret;
} }
#endif #endif
/** static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
* cpufreq_add_dev - add a CPU device {
* struct cpufreq_policy *policy;
* Adds the cpufreq interface for a CPU device. unsigned long flags;
*
* The Oracle says: try running cpufreq registration/unregistration concurrently write_lock_irqsave(&cpufreq_driver_lock, flags);
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) write_unlock_irqrestore(&cpufreq_driver_lock, flags);
return policy;
}
static struct cpufreq_policy *cpufreq_policy_alloc(void)
{
struct cpufreq_policy *policy;
policy = kzalloc(sizeof(*policy), GFP_KERNEL);
if (!policy)
return NULL;
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
goto err_free_policy;
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
INIT_LIST_HEAD(&policy->policy_list);
return policy;
err_free_cpumask:
free_cpumask_var(policy->cpus);
err_free_policy:
kfree(policy);
return NULL;
}
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
free_cpumask_var(policy->related_cpus);
free_cpumask_var(policy->cpus);
kfree(policy);
}
static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
bool frozen)
{ {
unsigned int j, cpu = dev->id; unsigned int j, cpu = dev->id;
int ret = -ENOMEM; int ret = -ENOMEM;
struct cpufreq_policy *policy; struct cpufreq_policy *policy;
unsigned long flags; unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
struct cpufreq_policy *tpolicy;
struct cpufreq_governor *gov; struct cpufreq_governor *gov;
int sibling;
#endif #endif
if (cpu_is_offline(cpu)) if (cpu_is_offline(cpu))
...@@ -977,43 +978,38 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) ...@@ -977,43 +978,38 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_cpu_put(policy); cpufreq_cpu_put(policy);
return 0; return 0;
} }
#endif
if (!down_read_trylock(&cpufreq_rwsem))
return 0;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* Check if this cpu was hot-unplugged earlier and has siblings */ /* Check if this cpu was hot-unplugged earlier and has siblings */
read_lock_irqsave(&cpufreq_driver_lock, flags); read_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_online_cpu(sibling) { list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
read_unlock_irqrestore(&cpufreq_driver_lock, flags); read_unlock_irqrestore(&cpufreq_driver_lock, flags);
return cpufreq_add_policy_cpu(cpu, sibling, dev); ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
up_read(&cpufreq_rwsem);
return ret;
} }
} }
read_unlock_irqrestore(&cpufreq_driver_lock, flags); read_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
#endif #endif
if (!try_module_get(cpufreq_driver->owner)) { if (frozen)
ret = -EINVAL; /* Restore the saved policy when doing light-weight init */
goto module_out; policy = cpufreq_policy_restore(cpu);
} else
policy = cpufreq_policy_alloc();
policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy) if (!policy)
goto nomem_out; goto nomem_out;
if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
goto err_free_policy;
if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
goto err_free_cpumask;
policy->cpu = cpu; policy->cpu = cpu;
policy->governor = CPUFREQ_DEFAULT_GOVERNOR; policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu)); cpumask_copy(policy->cpus, cpumask_of(cpu));
/* Initially set CPU itself as the policy_cpu */
per_cpu(cpufreq_policy_cpu, cpu) = cpu;
init_completion(&policy->kobj_unregister); init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update); INIT_WORK(&policy->update, handle_update);
...@@ -1050,12 +1046,26 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) ...@@ -1050,12 +1046,26 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
} }
#endif #endif
ret = cpufreq_add_dev_interface(cpu, policy, dev); write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!frozen) {
ret = cpufreq_add_dev_interface(policy, dev);
if (ret) if (ret)
goto err_out_unregister; goto err_out_unregister;
}
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpufreq_init_policy(policy);
kobject_uevent(&policy->kobj, KOBJ_ADD); kobject_uevent(&policy->kobj, KOBJ_ADD);
module_put(cpufreq_driver->owner); up_read(&cpufreq_rwsem);
pr_debug("initialization complete\n"); pr_debug("initialization complete\n");
return 0; return 0;
...@@ -1066,32 +1076,33 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) ...@@ -1066,32 +1076,33 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
per_cpu(cpufreq_cpu_data, j) = NULL; per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
kobject_put(&policy->kobj);
wait_for_completion(&policy->kobj_unregister);
err_set_policy_cpu: err_set_policy_cpu:
per_cpu(cpufreq_policy_cpu, cpu) = -1; cpufreq_policy_free(policy);
free_cpumask_var(policy->related_cpus);
err_free_cpumask:
free_cpumask_var(policy->cpus);
err_free_policy:
kfree(policy);
nomem_out: nomem_out:
module_put(cpufreq_driver->owner); up_read(&cpufreq_rwsem);
module_out:
return ret; return ret;
} }
static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) /**
* cpufreq_add_dev - add a CPU device
*
* Adds the cpufreq interface for a CPU device.
*
* The Oracle says: try running cpufreq registration/unregistration concurrently
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{ {
int j; return __cpufreq_add_dev(dev, sif, false);
}
static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
{
policy->last_cpu = policy->cpu; policy->last_cpu = policy->cpu;
policy->cpu = cpu; policy->cpu = cpu;
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_policy_cpu, j) = cpu;
#ifdef CONFIG_CPU_FREQ_TABLE #ifdef CONFIG_CPU_FREQ_TABLE
cpufreq_frequency_table_update_policy_cpu(policy); cpufreq_frequency_table_update_policy_cpu(policy);
#endif #endif
...@@ -1099,6 +1110,37 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) ...@@ -1099,6 +1110,37 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
CPUFREQ_UPDATE_POLICY_CPU, policy); CPUFREQ_UPDATE_POLICY_CPU, policy);
} }
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
unsigned int old_cpu, bool frozen)
{
struct device *cpu_dev;
int ret;
/* first sibling now owns the new sysfs dir */
cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
/* Don't touch sysfs files during light-weight tear-down */
if (frozen)
return cpu_dev->id;
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
if (ret) {
pr_err("%s: Failed to move kobj: %d", __func__, ret);
WARN_ON(lock_policy_rwsem_write(old_cpu));
cpumask_set_cpu(old_cpu, policy->cpus);
unlock_policy_rwsem_write(old_cpu);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
return -EINVAL;
}
return cpu_dev->id;
}
/** /**
* __cpufreq_remove_dev - remove a CPU device * __cpufreq_remove_dev - remove a CPU device
* *
...@@ -1107,111 +1149,126 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) ...@@ -1107,111 +1149,126 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
* This routine frees the rwsem before returning. * This routine frees the rwsem before returning.
*/ */
static int __cpufreq_remove_dev(struct device *dev, static int __cpufreq_remove_dev(struct device *dev,
struct subsys_interface *sif) struct subsys_interface *sif, bool frozen)
{ {
unsigned int cpu = dev->id, ret, cpus; unsigned int cpu = dev->id, cpus;
int new_cpu, ret;
unsigned long flags; unsigned long flags;
struct cpufreq_policy *data; struct cpufreq_policy *policy;
struct kobject *kobj; struct kobject *kobj;
struct completion *cmp; struct completion *cmp;
struct device *cpu_dev;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu); pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
write_lock_irqsave(&cpufreq_driver_lock, flags); write_lock_irqsave(&cpufreq_driver_lock, flags);
data = per_cpu(cpufreq_cpu_data, cpu); policy = per_cpu(cpufreq_cpu_data, cpu);
per_cpu(cpufreq_cpu_data, cpu) = NULL;
/* Save the policy somewhere when doing a light-weight tear-down */
if (frozen)
per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!data) { if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__); pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL; return -EINVAL;
} }
if (cpufreq_driver->target) if (cpufreq_driver->target) {
__cpufreq_governor(data, CPUFREQ_GOV_STOP); ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
if (ret) {
pr_err("%s: Failed to stop governor\n", __func__);
return ret;
}
}
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
if (!cpufreq_driver->setpolicy) if (!cpufreq_driver->setpolicy)
strncpy(per_cpu(cpufreq_cpu_governor, cpu), strncpy(per_cpu(cpufreq_cpu_governor, cpu),
data->governor->name, CPUFREQ_NAME_LEN); policy->governor->name, CPUFREQ_NAME_LEN);
#endif #endif
WARN_ON(lock_policy_rwsem_write(cpu)); WARN_ON(lock_policy_rwsem_write(cpu));
cpus = cpumask_weight(data->cpus); cpus = cpumask_weight(policy->cpus);
if (cpus > 1) if (cpus > 1)
cpumask_clear_cpu(cpu, data->cpus); cpumask_clear_cpu(cpu, policy->cpus);
unlock_policy_rwsem_write(cpu); unlock_policy_rwsem_write(cpu);
if (cpu != data->cpu) { if (cpu != policy->cpu && !frozen) {
sysfs_remove_link(&dev->kobj, "cpufreq"); sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) { } else if (cpus > 1) {
/* first sibling now owns the new sysfs dir */
cpu_dev = get_cpu_device(cpumask_first(data->cpus));
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
ret = kobject_move(&data->kobj, &cpu_dev->kobj);
if (ret) {
pr_err("%s: Failed to move kobj: %d", __func__, ret);
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
if (new_cpu >= 0) {
WARN_ON(lock_policy_rwsem_write(cpu)); WARN_ON(lock_policy_rwsem_write(cpu));
cpumask_set_cpu(cpu, data->cpus); update_policy_cpu(policy, new_cpu);
write_lock_irqsave(&cpufreq_driver_lock, flags);
per_cpu(cpufreq_cpu_data, cpu) = data;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
unlock_policy_rwsem_write(cpu); unlock_policy_rwsem_write(cpu);
ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, if (!frozen) {
"cpufreq"); pr_debug("%s: policy Kobject moved to cpu: %d "
return -EINVAL; "from: %d\n",__func__, new_cpu, cpu);
}
} }
WARN_ON(lock_policy_rwsem_write(cpu));
update_policy_cpu(data, cpu_dev->id);
unlock_policy_rwsem_write(cpu);
pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
__func__, cpu_dev->id, cpu);
} }
/* If cpu is last user of policy, free policy */ /* If cpu is last user of policy, free policy */
if (cpus == 1) { if (cpus == 1) {
if (cpufreq_driver->target) if (cpufreq_driver->target) {
__cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); ret = __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
if (ret) {
pr_err("%s: Failed to exit governor\n",
__func__);
return ret;
}
}
if (!frozen) {
lock_policy_rwsem_read(cpu); lock_policy_rwsem_read(cpu);
kobj = &data->kobj; kobj = &policy->kobj;
cmp = &data->kobj_unregister; cmp = &policy->kobj_unregister;
unlock_policy_rwsem_read(cpu); unlock_policy_rwsem_read(cpu);
kobject_put(kobj); kobject_put(kobj);
/* we need to make sure that the underlying kobj is actually /*
* not referenced anymore by anybody before we proceed with * We need to make sure that the underlying kobj is
* unloading. * actually not referenced anymore by anybody before we
* proceed with unloading.
*/ */
pr_debug("waiting for dropping of refcount\n"); pr_debug("waiting for dropping of refcount\n");
wait_for_completion(cmp); wait_for_completion(cmp);
pr_debug("wait complete\n"); pr_debug("wait complete\n");
}
/*
* Perform the ->exit() even during light-weight tear-down,
* since this is a core component, and is essential for the
* subsequent light-weight ->init() to succeed.
*/
if (cpufreq_driver->exit) if (cpufreq_driver->exit)
cpufreq_driver->exit(data); cpufreq_driver->exit(policy);
free_cpumask_var(data->related_cpus); /* Remove policy from list of active policies */
free_cpumask_var(data->cpus); write_lock_irqsave(&cpufreq_driver_lock, flags);
kfree(data); list_del(&policy->policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
if (!frozen)
cpufreq_policy_free(policy);
} else { } else {
pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
cpufreq_cpu_put(data);
if (cpufreq_driver->target) { if (cpufreq_driver->target) {
__cpufreq_governor(data, CPUFREQ_GOV_START); if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
__cpufreq_governor(data, CPUFREQ_GOV_LIMITS); (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
pr_err("%s: Failed to start governor\n",
__func__);
return ret;
}
} }
} }
per_cpu(cpufreq_policy_cpu, cpu) = -1; per_cpu(cpufreq_cpu_data, cpu) = NULL;
return 0; return 0;
} }
...@@ -1223,7 +1280,7 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) ...@@ -1223,7 +1280,7 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu)) if (cpu_is_offline(cpu))
return 0; return 0;
retval = __cpufreq_remove_dev(dev, sif); retval = __cpufreq_remove_dev(dev, sif, false);
return retval; return retval;
} }
...@@ -1344,10 +1401,9 @@ static unsigned int __cpufreq_get(unsigned int cpu) ...@@ -1344,10 +1401,9 @@ static unsigned int __cpufreq_get(unsigned int cpu)
unsigned int cpufreq_get(unsigned int cpu) unsigned int cpufreq_get(unsigned int cpu)
{ {
unsigned int ret_freq = 0; unsigned int ret_freq = 0;
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (!policy) if (!down_read_trylock(&cpufreq_rwsem))
goto out; return 0;
if (unlikely(lock_policy_rwsem_read(cpu))) if (unlikely(lock_policy_rwsem_read(cpu)))
goto out_policy; goto out_policy;
...@@ -1357,8 +1413,8 @@ unsigned int cpufreq_get(unsigned int cpu) ...@@ -1357,8 +1413,8 @@ unsigned int cpufreq_get(unsigned int cpu)
unlock_policy_rwsem_read(cpu); unlock_policy_rwsem_read(cpu);
out_policy: out_policy:
cpufreq_cpu_put(policy); up_read(&cpufreq_rwsem);
out:
return ret_freq; return ret_freq;
} }
EXPORT_SYMBOL(cpufreq_get); EXPORT_SYMBOL(cpufreq_get);
...@@ -1381,23 +1437,23 @@ static int cpufreq_bp_suspend(void) ...@@ -1381,23 +1437,23 @@ static int cpufreq_bp_suspend(void)
int ret = 0; int ret = 0;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct cpufreq_policy *cpu_policy; struct cpufreq_policy *policy;
pr_debug("suspending cpu %u\n", cpu); pr_debug("suspending cpu %u\n", cpu);
/* If there's no policy for the boot CPU, we have nothing to do. */ /* If there's no policy for the boot CPU, we have nothing to do. */
cpu_policy = cpufreq_cpu_get(cpu); policy = cpufreq_cpu_get(cpu);
if (!cpu_policy) if (!policy)
return 0; return 0;
if (cpufreq_driver->suspend) { if (cpufreq_driver->suspend) {
ret = cpufreq_driver->suspend(cpu_policy); ret = cpufreq_driver->suspend(policy);
if (ret) if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend " printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
"step on CPU %u\n", cpu_policy->cpu); "step on CPU %u\n", policy->cpu);
} }
cpufreq_cpu_put(cpu_policy); cpufreq_cpu_put(policy);
return ret; return ret;
} }
...@@ -1419,28 +1475,28 @@ static void cpufreq_bp_resume(void) ...@@ -1419,28 +1475,28 @@ static void cpufreq_bp_resume(void)
int ret = 0; int ret = 0;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct cpufreq_policy *cpu_policy; struct cpufreq_policy *policy;
pr_debug("resuming cpu %u\n", cpu); pr_debug("resuming cpu %u\n", cpu);
/* If there's no policy for the boot CPU, we have nothing to do. */ /* If there's no policy for the boot CPU, we have nothing to do. */
cpu_policy = cpufreq_cpu_get(cpu); policy = cpufreq_cpu_get(cpu);
if (!cpu_policy) if (!policy)
return; return;
if (cpufreq_driver->resume) { if (cpufreq_driver->resume) {
ret = cpufreq_driver->resume(cpu_policy); ret = cpufreq_driver->resume(policy);
if (ret) { if (ret) {
printk(KERN_ERR "cpufreq: resume failed in ->resume " printk(KERN_ERR "cpufreq: resume failed in ->resume "
"step on CPU %u\n", cpu_policy->cpu); "step on CPU %u\n", policy->cpu);
goto fail; goto fail;
} }
} }
schedule_work(&cpu_policy->update); schedule_work(&policy->update);
fail: fail:
cpufreq_cpu_put(cpu_policy); cpufreq_cpu_put(policy);
} }
static struct syscore_ops cpufreq_syscore_ops = { static struct syscore_ops cpufreq_syscore_ops = {
...@@ -1594,18 +1650,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, ...@@ -1594,18 +1650,6 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
} }
EXPORT_SYMBOL_GPL(cpufreq_driver_target); EXPORT_SYMBOL_GPL(cpufreq_driver_target);
int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
{
if (cpufreq_disabled())
return 0;
if (!cpufreq_driver->getavg)
return 0;
return cpufreq_driver->getavg(policy, cpu);
}
EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
/* /*
* when "event" is CPUFREQ_GOV_LIMITS * when "event" is CPUFREQ_GOV_LIMITS
*/ */
...@@ -1640,6 +1684,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, ...@@ -1640,6 +1684,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
} }
} }
if (event == CPUFREQ_GOV_POLICY_INIT)
if (!try_module_get(policy->governor->owner)) if (!try_module_get(policy->governor->owner))
return -EINVAL; return -EINVAL;
...@@ -1677,11 +1722,8 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, ...@@ -1677,11 +1722,8 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
mutex_unlock(&cpufreq_governor_lock); mutex_unlock(&cpufreq_governor_lock);
} }
/* we keep one module reference alive for if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
each CPU governed by this CPU */ ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
if ((event != CPUFREQ_GOV_START) || ret)
module_put(policy->governor->owner);
if ((event == CPUFREQ_GOV_STOP) && !ret)
module_put(policy->governor->owner); module_put(policy->governor->owner);
return ret; return ret;
...@@ -1761,7 +1803,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) ...@@ -1761,7 +1803,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
if (!cpu_policy) if (!cpu_policy)
return -EINVAL; return -EINVAL;
memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); memcpy(policy, cpu_policy, sizeof(*policy));
cpufreq_cpu_put(cpu_policy); cpufreq_cpu_put(cpu_policy);
return 0; return 0;
...@@ -1772,95 +1814,94 @@ EXPORT_SYMBOL(cpufreq_get_policy); ...@@ -1772,95 +1814,94 @@ EXPORT_SYMBOL(cpufreq_get_policy);
* data : current policy. * data : current policy.
* policy : policy to be set. * policy : policy to be set.
*/ */
static int __cpufreq_set_policy(struct cpufreq_policy *data, static int __cpufreq_set_policy(struct cpufreq_policy *policy,
struct cpufreq_policy *policy) struct cpufreq_policy *new_policy)
{ {
int ret = 0, failed = 1; int ret = 0, failed = 1;
pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
policy->min, policy->max); new_policy->min, new_policy->max);
memcpy(&policy->cpuinfo, &data->cpuinfo, memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
sizeof(struct cpufreq_cpuinfo));
if (policy->min > data->max || policy->max < data->min) { if (new_policy->min > policy->max || new_policy->max < policy->min) {
ret = -EINVAL; ret = -EINVAL;
goto error_out; goto error_out;
} }
/* verify the cpu speed can be set within this limit */ /* verify the cpu speed can be set within this limit */
ret = cpufreq_driver->verify(policy); ret = cpufreq_driver->verify(new_policy);
if (ret) if (ret)
goto error_out; goto error_out;
/* adjust if necessary - all reasons */ /* adjust if necessary - all reasons */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_ADJUST, policy); CPUFREQ_ADJUST, new_policy);
/* adjust if necessary - hardware incompatibility*/ /* adjust if necessary - hardware incompatibility*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_INCOMPATIBLE, policy); CPUFREQ_INCOMPATIBLE, new_policy);
/* /*
* verify the cpu speed can be set within this limit, which might be * verify the cpu speed can be set within this limit, which might be
* different to the first one * different to the first one
*/ */
ret = cpufreq_driver->verify(policy); ret = cpufreq_driver->verify(new_policy);
if (ret) if (ret)
goto error_out; goto error_out;
/* notification of the new policy */ /* notification of the new policy */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list, blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_NOTIFY, policy); CPUFREQ_NOTIFY, new_policy);
data->min = policy->min; policy->min = new_policy->min;
data->max = policy->max; policy->max = new_policy->max;
pr_debug("new min and max freqs are %u - %u kHz\n", pr_debug("new min and max freqs are %u - %u kHz\n",
data->min, data->max); policy->min, policy->max);
if (cpufreq_driver->setpolicy) { if (cpufreq_driver->setpolicy) {
data->policy = policy->policy; policy->policy = new_policy->policy;
pr_debug("setting range\n"); pr_debug("setting range\n");
ret = cpufreq_driver->setpolicy(policy); ret = cpufreq_driver->setpolicy(new_policy);
} else { } else {
if (policy->governor != data->governor) { if (new_policy->governor != policy->governor) {
/* save old, working values */ /* save old, working values */
struct cpufreq_governor *old_gov = data->governor; struct cpufreq_governor *old_gov = policy->governor;
pr_debug("governor switch\n"); pr_debug("governor switch\n");
/* end old governor */ /* end old governor */
if (data->governor) { if (policy->governor) {
__cpufreq_governor(data, CPUFREQ_GOV_STOP); __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
unlock_policy_rwsem_write(policy->cpu); unlock_policy_rwsem_write(new_policy->cpu);
__cpufreq_governor(data, __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT); CPUFREQ_GOV_POLICY_EXIT);
lock_policy_rwsem_write(policy->cpu); lock_policy_rwsem_write(new_policy->cpu);
} }
/* start new governor */ /* start new governor */
data->governor = policy->governor; policy->governor = new_policy->governor;
if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) { if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
failed = 0; failed = 0;
} else { } else {
unlock_policy_rwsem_write(policy->cpu); unlock_policy_rwsem_write(new_policy->cpu);
__cpufreq_governor(data, __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT); CPUFREQ_GOV_POLICY_EXIT);
lock_policy_rwsem_write(policy->cpu); lock_policy_rwsem_write(new_policy->cpu);
} }
} }
if (failed) { if (failed) {
/* new governor failed, so re-start old one */ /* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n", pr_debug("starting governor %s failed\n",
data->governor->name); policy->governor->name);
if (old_gov) { if (old_gov) {
data->governor = old_gov; policy->governor = old_gov;
__cpufreq_governor(data, __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_INIT); CPUFREQ_GOV_POLICY_INIT);
__cpufreq_governor(data, __cpufreq_governor(policy,
CPUFREQ_GOV_START); CPUFREQ_GOV_START);
} }
ret = -EINVAL; ret = -EINVAL;
...@@ -1869,7 +1910,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, ...@@ -1869,7 +1910,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
/* might be a policy change, too, so fall through */ /* might be a policy change, too, so fall through */
} }
pr_debug("governor: change or update limits\n"); pr_debug("governor: change or update limits\n");
__cpufreq_governor(data, CPUFREQ_GOV_LIMITS); ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
} }
error_out: error_out:
...@@ -1885,11 +1926,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, ...@@ -1885,11 +1926,11 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
*/ */
int cpufreq_update_policy(unsigned int cpu) int cpufreq_update_policy(unsigned int cpu)
{ {
struct cpufreq_policy *data = cpufreq_cpu_get(cpu); struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy policy; struct cpufreq_policy new_policy;
int ret; int ret;
if (!data) { if (!policy) {
ret = -ENODEV; ret = -ENODEV;
goto no_policy; goto no_policy;
} }
...@@ -1900,34 +1941,34 @@ int cpufreq_update_policy(unsigned int cpu) ...@@ -1900,34 +1941,34 @@ int cpufreq_update_policy(unsigned int cpu)
} }
pr_debug("updating policy for CPU %u\n", cpu); pr_debug("updating policy for CPU %u\n", cpu);
memcpy(&policy, data, sizeof(struct cpufreq_policy)); memcpy(&new_policy, policy, sizeof(*policy));
policy.min = data->user_policy.min; new_policy.min = policy->user_policy.min;
policy.max = data->user_policy.max; new_policy.max = policy->user_policy.max;
policy.policy = data->user_policy.policy; new_policy.policy = policy->user_policy.policy;
policy.governor = data->user_policy.governor; new_policy.governor = policy->user_policy.governor;
/* /*
* BIOS might change freq behind our back * BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change * -> ask driver for current freq and notify governors about a change
*/ */
if (cpufreq_driver->get) { if (cpufreq_driver->get) {
policy.cur = cpufreq_driver->get(cpu); new_policy.cur = cpufreq_driver->get(cpu);
if (!data->cur) { if (!policy->cur) {
pr_debug("Driver did not initialize current freq"); pr_debug("Driver did not initialize current freq");
data->cur = policy.cur; policy->cur = new_policy.cur;
} else { } else {
if (data->cur != policy.cur && cpufreq_driver->target) if (policy->cur != new_policy.cur && cpufreq_driver->target)
cpufreq_out_of_sync(cpu, data->cur, cpufreq_out_of_sync(cpu, policy->cur,
policy.cur); new_policy.cur);
} }
} }
ret = __cpufreq_set_policy(data, &policy); ret = __cpufreq_set_policy(policy, &new_policy);
unlock_policy_rwsem_write(cpu); unlock_policy_rwsem_write(cpu);
fail: fail:
cpufreq_cpu_put(data); cpufreq_cpu_put(policy);
no_policy: no_policy:
return ret; return ret;
} }
...@@ -1938,21 +1979,26 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, ...@@ -1938,21 +1979,26 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
{ {
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
struct device *dev; struct device *dev;
bool frozen = false;
dev = get_cpu_device(cpu); dev = get_cpu_device(cpu);
if (dev) { if (dev) {
switch (action) {
if (action & CPU_TASKS_FROZEN)
frozen = true;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: __cpufreq_add_dev(dev, NULL, frozen);
cpufreq_add_dev(dev, NULL); cpufreq_update_policy(cpu);
break; break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN: __cpufreq_remove_dev(dev, NULL, frozen);
__cpufreq_remove_dev(dev, NULL);
break; break;
case CPU_DOWN_FAILED: case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN: __cpufreq_add_dev(dev, NULL, frozen);
cpufreq_add_dev(dev, NULL);
break; break;
} }
} }
...@@ -2059,9 +2105,13 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) ...@@ -2059,9 +2105,13 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
subsys_interface_unregister(&cpufreq_interface); subsys_interface_unregister(&cpufreq_interface);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier); unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
down_write(&cpufreq_rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags); write_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL; cpufreq_driver = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags); write_unlock_irqrestore(&cpufreq_driver_lock, flags);
up_write(&cpufreq_rwsem);
return 0; return 0;
} }
...@@ -2074,10 +2124,8 @@ static int __init cpufreq_core_init(void) ...@@ -2074,10 +2124,8 @@ static int __init cpufreq_core_init(void)
if (cpufreq_disabled()) if (cpufreq_disabled())
return -ENODEV; return -ENODEV;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu)
per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
cpufreq_global_kobject = kobject_create(); cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject); BUG_ON(!cpufreq_global_kobject);
......
...@@ -11,19 +11,7 @@ ...@@ -11,19 +11,7 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/percpu-defs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include "cpufreq_governor.h" #include "cpufreq_governor.h"
/* Conservative governor macros */ /* Conservative governor macros */
...@@ -329,7 +317,7 @@ static int cs_init(struct dbs_data *dbs_data) ...@@ -329,7 +317,7 @@ static int cs_init(struct dbs_data *dbs_data)
{ {
struct cs_dbs_tuners *tuners; struct cs_dbs_tuners *tuners;
tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL); tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) { if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__); pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM; return -ENOMEM;
......
...@@ -16,15 +16,9 @@ ...@@ -16,15 +16,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cputime.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/mutex.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "cpufreq_governor.h" #include "cpufreq_governor.h"
...@@ -53,7 +47,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) ...@@ -53,7 +47,7 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
policy = cdbs->cur_policy; policy = cdbs->cur_policy;
/* Get Absolute Load (in terms of freq for ondemand gov) */ /* Get Absolute Load */
for_each_cpu(j, policy->cpus) { for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs; struct cpu_dbs_common_info *j_cdbs;
u64 cur_wall_time, cur_idle_time; u64 cur_wall_time, cur_idle_time;
...@@ -104,14 +98,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) ...@@ -104,14 +98,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
load = 100 * (wall_time - idle_time) / wall_time; load = 100 * (wall_time - idle_time) / wall_time;
if (dbs_data->cdata->governor == GOV_ONDEMAND) {
int freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
load *= freq_avg;
}
if (load > max_load) if (load > max_load)
max_load = load; max_load = load;
} }
......
...@@ -18,10 +18,9 @@ ...@@ -18,10 +18,9 @@
#define _CPUFREQ_GOVERNOR_H #define _CPUFREQ_GOVERNOR_H
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/kobject.h> #include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/sysfs.h>
/* /*
* The polling frequency depends on the capability of the processor. Default * The polling frequency depends on the capability of the processor. Default
...@@ -169,7 +168,6 @@ struct od_dbs_tuners { ...@@ -169,7 +168,6 @@ struct od_dbs_tuners {
unsigned int sampling_rate; unsigned int sampling_rate;
unsigned int sampling_down_factor; unsigned int sampling_down_factor;
unsigned int up_threshold; unsigned int up_threshold;
unsigned int adj_up_threshold;
unsigned int powersave_bias; unsigned int powersave_bias;
unsigned int io_is_busy; unsigned int io_is_busy;
}; };
...@@ -223,7 +221,7 @@ struct od_ops { ...@@ -223,7 +221,7 @@ struct od_ops {
void (*powersave_bias_init_cpu)(int cpu); void (*powersave_bias_init_cpu)(int cpu);
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation); unsigned int freq_next, unsigned int relation);
void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq); void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
}; };
struct cs_ops { struct cs_ops {
......
...@@ -12,28 +12,16 @@ ...@@ -12,28 +12,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h> #include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/kobject.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/percpu-defs.h> #include <linux/percpu-defs.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/types.h>
#include <linux/cpu.h>
#include "cpufreq_governor.h" #include "cpufreq_governor.h"
/* On-demand governor macros */ /* On-demand governor macros */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80) #define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1) #define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000) #define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95) #define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (11) #define MIN_FREQUENCY_UP_THRESHOLD (11)
...@@ -144,31 +132,27 @@ static void ondemand_powersave_bias_init(void) ...@@ -144,31 +132,27 @@ static void ondemand_powersave_bias_init(void)
} }
} }
static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
{ {
struct dbs_data *dbs_data = p->governor_data; struct dbs_data *dbs_data = policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners; struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (od_tuners->powersave_bias) if (od_tuners->powersave_bias)
freq = od_ops.powersave_bias_target(p, freq, freq = od_ops.powersave_bias_target(policy, freq,
CPUFREQ_RELATION_H); CPUFREQ_RELATION_H);
else if (p->cur == p->max) else if (policy->cur == policy->max)
return; return;
__cpufreq_driver_target(p, freq, od_tuners->powersave_bias ? __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
} }
/* /*
* Every sampling_rate, we check, if current idle time is less than 20% * Every sampling_rate, we check, if current idle time is less than 20%
* (default), then we try to increase frequency. Every sampling_rate, we look * (default), then we try to increase frequency. Else, we adjust the frequency
* for the lowest frequency which can sustain the load while keeping idle time * proportional to load.
* over 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency. Frequency reduction
* happens at minimum steps of 5% (default) of current frequency
*/ */
static void od_check_cpu(int cpu, unsigned int load_freq) static void od_check_cpu(int cpu, unsigned int load)
{ {
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
...@@ -178,29 +162,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq) ...@@ -178,29 +162,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
dbs_info->freq_lo = 0; dbs_info->freq_lo = 0;
/* Check for frequency increase */ /* Check for frequency increase */
if (load_freq > od_tuners->up_threshold * policy->cur) { if (load > od_tuners->up_threshold) {
/* If switching to max speed, apply sampling_down_factor */ /* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max) if (policy->cur < policy->max)
dbs_info->rate_mult = dbs_info->rate_mult =
od_tuners->sampling_down_factor; od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max); dbs_freq_increase(policy, policy->max);
return; return;
} } else {
/* Calculate the next frequency proportional to load */
/* Check for frequency decrease */
/* if we cannot reduce the frequency anymore, break out early */
if (policy->cur == policy->min)
return;
/*
* The optimal frequency is the frequency that is the lowest that can
* support the current CPU usage without triggering the up policy. To be
* safe, we focus 10 points under the threshold.
*/
if (load_freq < od_tuners->adj_up_threshold
* policy->cur) {
unsigned int freq_next; unsigned int freq_next;
freq_next = load_freq / od_tuners->adj_up_threshold; freq_next = load * policy->cpuinfo.max_freq / 100;
/* No longer fully busy, reset rate_mult */ /* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1; dbs_info->rate_mult = 1;
...@@ -374,9 +346,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, ...@@ -374,9 +346,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
input < MIN_FREQUENCY_UP_THRESHOLD) { input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL; return -EINVAL;
} }
/* Calculate the new adj_up_threshold */
od_tuners->adj_up_threshold += input;
od_tuners->adj_up_threshold -= od_tuners->up_threshold;
od_tuners->up_threshold = input; od_tuners->up_threshold = input;
return count; return count;
...@@ -513,7 +482,7 @@ static int od_init(struct dbs_data *dbs_data) ...@@ -513,7 +482,7 @@ static int od_init(struct dbs_data *dbs_data)
u64 idle_time; u64 idle_time;
int cpu; int cpu;
tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL); tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) { if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__); pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM; return -ENOMEM;
...@@ -525,8 +494,6 @@ static int od_init(struct dbs_data *dbs_data) ...@@ -525,8 +494,6 @@ static int od_init(struct dbs_data *dbs_data)
if (idle_time != -1ULL) { if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */ /* Idle micro accounting is supported. Use finer thresholds */
tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/* /*
* In nohz/micro accounting case we set the minimum frequency * In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred * not depending on HZ, but fixed (very low). The deferred
...@@ -535,8 +502,6 @@ static int od_init(struct dbs_data *dbs_data) ...@@ -535,8 +502,6 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else { } else {
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
DEF_FREQUENCY_DOWN_DIFFERENTIAL;
/* For correct statistics, we need 10 ticks for each measure */ /* For correct statistics, we need 10 ticks for each measure */
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
......
...@@ -12,10 +12,9 @@ ...@@ -12,10 +12,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
static int cpufreq_governor_performance(struct cpufreq_policy *policy, static int cpufreq_governor_performance(struct cpufreq_policy *policy,
unsigned int event) unsigned int event)
......
...@@ -12,10 +12,9 @@ ...@@ -12,10 +12,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h>
static int cpufreq_governor_powersave(struct cpufreq_policy *policy, static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
unsigned int event) unsigned int event)
......
...@@ -9,17 +9,10 @@ ...@@ -9,17 +9,10 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/sysfs.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/jiffies.h> #include <linux/slab.h>
#include <linux/percpu.h>
#include <linux/kobject.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <asm/cputime.h> #include <asm/cputime.h>
static spinlock_t cpufreq_stats_lock; static spinlock_t cpufreq_stats_lock;
...@@ -200,22 +193,22 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, ...@@ -200,22 +193,22 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
{ {
unsigned int i, j, count = 0, ret = 0; unsigned int i, j, count = 0, ret = 0;
struct cpufreq_stats *stat; struct cpufreq_stats *stat;
struct cpufreq_policy *data; struct cpufreq_policy *current_policy;
unsigned int alloc_size; unsigned int alloc_size;
unsigned int cpu = policy->cpu; unsigned int cpu = policy->cpu;
if (per_cpu(cpufreq_stats_table, cpu)) if (per_cpu(cpufreq_stats_table, cpu))
return -EBUSY; return -EBUSY;
stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL); stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if ((stat) == NULL) if ((stat) == NULL)
return -ENOMEM; return -ENOMEM;
data = cpufreq_cpu_get(cpu); current_policy = cpufreq_cpu_get(cpu);
if (data == NULL) { if (current_policy == NULL) {
ret = -EINVAL; ret = -EINVAL;
goto error_get_fail; goto error_get_fail;
} }
ret = sysfs_create_group(&data->kobj, &stats_attr_group); ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
if (ret) if (ret)
goto error_out; goto error_out;
...@@ -258,10 +251,10 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy, ...@@ -258,10 +251,10 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->last_time = get_jiffies_64(); stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur); stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock); spin_unlock(&cpufreq_stats_lock);
cpufreq_cpu_put(data); cpufreq_cpu_put(current_policy);
return 0; return 0;
error_out: error_out:
cpufreq_cpu_put(data); cpufreq_cpu_put(current_policy);
error_get_fail: error_get_fail:
kfree(stat); kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL; per_cpu(cpufreq_stats_table, cpu) = NULL;
...@@ -348,16 +341,10 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb, ...@@ -348,16 +341,10 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
unsigned int cpu = (unsigned long)hcpu; unsigned int cpu = (unsigned long)hcpu;
switch (action) { switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
cpufreq_update_policy(cpu);
break;
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
cpufreq_stats_free_sysfs(cpu); cpufreq_stats_free_sysfs(cpu);
break; break;
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu); cpufreq_stats_free_table(cpu);
break; break;
} }
...@@ -390,8 +377,6 @@ static int __init cpufreq_stats_init(void) ...@@ -390,8 +377,6 @@ static int __init cpufreq_stats_init(void)
return ret; return ret;
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
for_each_online_cpu(cpu)
cpufreq_update_policy(cpu);
ret = cpufreq_register_notifier(&notifier_trans_block, ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER); CPUFREQ_TRANSITION_NOTIFIER);
......
...@@ -111,7 +111,6 @@ static struct cpufreq_driver cris_freq_driver = { ...@@ -111,7 +111,6 @@ static struct cpufreq_driver cris_freq_driver = {
.init = cris_freq_cpu_init, .init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit, .exit = cris_freq_cpu_exit,
.name = "cris_freq", .name = "cris_freq",
.owner = THIS_MODULE,
.attr = cris_freq_attr, .attr = cris_freq_attr,
}; };
......
...@@ -108,7 +108,6 @@ static struct cpufreq_driver cris_freq_driver = { ...@@ -108,7 +108,6 @@ static struct cpufreq_driver cris_freq_driver = {
.init = cris_freq_cpu_init, .init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit, .exit = cris_freq_cpu_exit,
.name = "cris_freq", .name = "cris_freq",
.owner = THIS_MODULE,
.attr = cris_freq_attr, .attr = cris_freq_attr,
}; };
......
...@@ -54,7 +54,7 @@ static struct acpi_processor_performance *eps_acpi_cpu_perf; ...@@ -54,7 +54,7 @@ static struct acpi_processor_performance *eps_acpi_cpu_perf;
/* Minimum necessary to get acpi_processor_get_bios_limit() working */ /* Minimum necessary to get acpi_processor_get_bios_limit() working */
static int eps_acpi_init(void) static int eps_acpi_init(void)
{ {
eps_acpi_cpu_perf = kzalloc(sizeof(struct acpi_processor_performance), eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf),
GFP_KERNEL); GFP_KERNEL);
if (!eps_acpi_cpu_perf) if (!eps_acpi_cpu_perf)
return -ENOMEM; return -ENOMEM;
...@@ -366,7 +366,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy) ...@@ -366,7 +366,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
states = 2; states = 2;
/* Allocate private data and frequency table for current cpu */ /* Allocate private data and frequency table for current cpu */
centaur = kzalloc(sizeof(struct eps_cpu_data) centaur = kzalloc(sizeof(*centaur)
+ (states + 1) * sizeof(struct cpufreq_frequency_table), + (states + 1) * sizeof(struct cpufreq_frequency_table),
GFP_KERNEL); GFP_KERNEL);
if (!centaur) if (!centaur)
...@@ -436,7 +436,6 @@ static struct cpufreq_driver eps_driver = { ...@@ -436,7 +436,6 @@ static struct cpufreq_driver eps_driver = {
.exit = eps_cpu_exit, .exit = eps_cpu_exit,
.get = eps_get, .get = eps_get,
.name = "e_powersaver", .name = "e_powersaver",
.owner = THIS_MODULE,
.attr = eps_attr, .attr = eps_attr,
}; };
......
...@@ -274,7 +274,6 @@ static struct cpufreq_driver elanfreq_driver = { ...@@ -274,7 +274,6 @@ static struct cpufreq_driver elanfreq_driver = {
.init = elanfreq_cpu_init, .init = elanfreq_cpu_init,
.exit = elanfreq_cpu_exit, .exit = elanfreq_cpu_exit,
.name = "elanfreq", .name = "elanfreq",
.owner = THIS_MODULE,
.attr = elanfreq_attr, .attr = elanfreq_attr,
}; };
......
...@@ -289,7 +289,7 @@ static int __init exynos_cpufreq_init(void) ...@@ -289,7 +289,7 @@ static int __init exynos_cpufreq_init(void)
{ {
int ret = -EINVAL; int ret = -EINVAL;
exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL); exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
if (!exynos_info) if (!exynos_info)
return -ENOMEM; return -ENOMEM;
...@@ -332,7 +332,6 @@ static int __init exynos_cpufreq_init(void) ...@@ -332,7 +332,6 @@ static int __init exynos_cpufreq_init(void)
regulator_put(arm_regulator); regulator_put(arm_regulator);
err_vdd_arm: err_vdd_arm:
kfree(exynos_info); kfree(exynos_info);
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL; return -EINVAL;
} }
late_initcall(exynos_cpufreq_init); late_initcall(exynos_cpufreq_init);
...@@ -43,6 +43,27 @@ struct exynos_dvfs_info { ...@@ -43,6 +43,27 @@ struct exynos_dvfs_info {
bool (*need_apll_change)(unsigned int, unsigned int); bool (*need_apll_change)(unsigned int, unsigned int);
}; };
#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *); extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
#else
static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
return -EOPNOTSUPP;
}
#endif
#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *); extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
#else
static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
{
return -EOPNOTSUPP;
}
#endif
#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *); extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
#else
static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
{
return -EOPNOTSUPP;
}
#endif
...@@ -238,6 +238,9 @@ static int exynos_target(struct cpufreq_policy *policy, ...@@ -238,6 +238,9 @@ static int exynos_target(struct cpufreq_policy *policy,
freqs.old = dvfs_info->cur_frequency; freqs.old = dvfs_info->cur_frequency;
freqs.new = freq_table[index].frequency; freqs.new = freq_table[index].frequency;
if (freqs.old == freqs.new)
goto out;
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Set the target frequency in all C0_3_PSTATE register */ /* Set the target frequency in all C0_3_PSTATE register */
......
...@@ -11,10 +11,8 @@ ...@@ -11,10 +11,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/module.h>
/********************************************************************* /*********************************************************************
* FREQUENCY TABLE HELPERS * * FREQUENCY TABLE HELPERS *
......
...@@ -183,7 +183,7 @@ static void gx_write_byte(int reg, int value) ...@@ -183,7 +183,7 @@ static void gx_write_byte(int reg, int value)
* gx_detect_chipset: * gx_detect_chipset:
* *
**/ **/
static __init struct pci_dev *gx_detect_chipset(void) static struct pci_dev * __init gx_detect_chipset(void)
{ {
struct pci_dev *gx_pci = NULL; struct pci_dev *gx_pci = NULL;
...@@ -446,7 +446,6 @@ static struct cpufreq_driver gx_suspmod_driver = { ...@@ -446,7 +446,6 @@ static struct cpufreq_driver gx_suspmod_driver = {
.target = cpufreq_gx_target, .target = cpufreq_gx_target,
.init = cpufreq_gx_cpu_init, .init = cpufreq_gx_cpu_init,
.name = "gx-suspmod", .name = "gx-suspmod",
.owner = THIS_MODULE,
}; };
static int __init cpufreq_gx_init(void) static int __init cpufreq_gx_init(void)
...@@ -466,7 +465,7 @@ static int __init cpufreq_gx_init(void) ...@@ -466,7 +465,7 @@ static int __init cpufreq_gx_init(void)
pr_debug("geode suspend modulation available.\n"); pr_debug("geode suspend modulation available.\n");
params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); params = kzalloc(sizeof(*params), GFP_KERNEL);
if (params == NULL) if (params == NULL)
return -ENOMEM; return -ENOMEM;
......
...@@ -69,23 +69,17 @@ static int hb_cpufreq_driver_init(void) ...@@ -69,23 +69,17 @@ static int hb_cpufreq_driver_init(void)
if (!of_machine_is_compatible("calxeda,highbank")) if (!of_machine_is_compatible("calxeda,highbank"))
return -ENODEV; return -ENODEV;
for_each_child_of_node(of_find_node_by_path("/cpus"), np)
if (of_get_property(np, "operating-points", NULL))
break;
if (!np) {
pr_err("failed to find highbank cpufreq node\n");
return -ENOENT;
}
cpu_dev = get_cpu_device(0); cpu_dev = get_cpu_device(0);
if (!cpu_dev) { if (!cpu_dev) {
pr_err("failed to get highbank cpufreq device\n"); pr_err("failed to get highbank cpufreq device\n");
ret = -ENODEV; return -ENODEV;
goto out_put_node;
} }
cpu_dev->of_node = np; np = of_node_get(cpu_dev->of_node);
if (!np) {
pr_err("failed to find highbank cpufreq node\n");
return -ENOENT;
}
cpu_clk = clk_get(cpu_dev, NULL); cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) { if (IS_ERR(cpu_clk)) {
......
...@@ -274,7 +274,7 @@ acpi_cpufreq_cpu_init ( ...@@ -274,7 +274,7 @@ acpi_cpufreq_cpu_init (
pr_debug("acpi_cpufreq_cpu_init\n"); pr_debug("acpi_cpufreq_cpu_init\n");
data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
return (-ENOMEM); return (-ENOMEM);
...@@ -304,7 +304,7 @@ acpi_cpufreq_cpu_init ( ...@@ -304,7 +304,7 @@ acpi_cpufreq_cpu_init (
} }
/* alloc freq_table */ /* alloc freq_table */
data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * data->freq_table = kmalloc(sizeof(*data->freq_table) *
(data->acpi_data.state_count + 1), (data->acpi_data.state_count + 1),
GFP_KERNEL); GFP_KERNEL);
if (!data->freq_table) { if (!data->freq_table) {
...@@ -409,7 +409,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = { ...@@ -409,7 +409,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.init = acpi_cpufreq_cpu_init, .init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit, .exit = acpi_cpufreq_cpu_exit,
.name = "acpi-cpufreq", .name = "acpi-cpufreq",
.owner = THIS_MODULE,
.attr = acpi_cpufreq_attr, .attr = acpi_cpufreq_attr,
}; };
......
...@@ -221,14 +221,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) ...@@ -221,14 +221,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
cpu_dev = &pdev->dev; cpu_dev = &pdev->dev;
np = of_find_node_by_path("/cpus/cpu@0"); np = of_node_get(cpu_dev->of_node);
if (!np) { if (!np) {
dev_err(cpu_dev, "failed to find cpu0 node\n"); dev_err(cpu_dev, "failed to find cpu0 node\n");
return -ENOENT; return -ENOENT;
} }
cpu_dev->of_node = np;
arm_clk = devm_clk_get(cpu_dev, "arm"); arm_clk = devm_clk_get(cpu_dev, "arm");
pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys"); pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw"); pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
......
...@@ -665,7 +665,6 @@ static struct cpufreq_driver intel_pstate_driver = { ...@@ -665,7 +665,6 @@ static struct cpufreq_driver intel_pstate_driver = {
.init = intel_pstate_cpu_init, .init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit, .exit = intel_pstate_cpu_exit,
.name = "intel_pstate", .name = "intel_pstate",
.owner = THIS_MODULE,
}; };
static int __initdata no_load; static int __initdata no_load;
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/clk-provider.h> #include <linux/clk-provider.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/of.h> #include <linux/of_device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
...@@ -158,7 +158,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = { ...@@ -158,7 +158,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.init = kirkwood_cpufreq_cpu_init, .init = kirkwood_cpufreq_cpu_init,
.exit = kirkwood_cpufreq_cpu_exit, .exit = kirkwood_cpufreq_cpu_exit,
.name = "kirkwood-cpufreq", .name = "kirkwood-cpufreq",
.owner = THIS_MODULE,
.attr = kirkwood_cpufreq_attr, .attr = kirkwood_cpufreq_attr,
}; };
...@@ -175,9 +174,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) ...@@ -175,9 +174,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(priv.base)) if (IS_ERR(priv.base))
return PTR_ERR(priv.base); return PTR_ERR(priv.base);
np = of_find_node_by_path("/cpus/cpu@0"); np = of_cpu_device_node_get(0);
if (!np) if (!np) {
dev_err(&pdev->dev, "failed to get cpu device node\n");
return -ENODEV; return -ENODEV;
}
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk"); priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) { if (IS_ERR(priv.cpu_clk)) {
......
...@@ -948,7 +948,6 @@ static struct cpufreq_driver longhaul_driver = { ...@@ -948,7 +948,6 @@ static struct cpufreq_driver longhaul_driver = {
.init = longhaul_cpu_init, .init = longhaul_cpu_init,
.exit = longhaul_cpu_exit, .exit = longhaul_cpu_exit,
.name = "longhaul", .name = "longhaul",
.owner = THIS_MODULE,
.attr = longhaul_attr, .attr = longhaul_attr,
}; };
......
...@@ -286,7 +286,6 @@ static struct cpufreq_driver longrun_driver = { ...@@ -286,7 +286,6 @@ static struct cpufreq_driver longrun_driver = {
.get = longrun_get, .get = longrun_get,
.init = longrun_cpu_init, .init = longrun_cpu_init,
.name = "longrun", .name = "longrun",
.owner = THIS_MODULE,
}; };
static const struct x86_cpu_id longrun_ids[] = { static const struct x86_cpu_id longrun_ids[] = {
......
...@@ -158,7 +158,6 @@ static struct freq_attr *loongson2_table_attr[] = { ...@@ -158,7 +158,6 @@ static struct freq_attr *loongson2_table_attr[] = {
}; };
static struct cpufreq_driver loongson2_cpufreq_driver = { static struct cpufreq_driver loongson2_cpufreq_driver = {
.owner = THIS_MODULE,
.name = "loongson2", .name = "loongson2",
.init = loongson2_cpufreq_cpu_init, .init = loongson2_cpufreq_cpu_init,
.verify = loongson2_cpufreq_verify, .verify = loongson2_cpufreq_verify,
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/of.h> #include <linux/of_device.h>
#define DBG(fmt...) pr_debug(fmt) #define DBG(fmt...) pr_debug(fmt)
...@@ -190,7 +190,6 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -190,7 +190,6 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
static struct cpufreq_driver maple_cpufreq_driver = { static struct cpufreq_driver maple_cpufreq_driver = {
.name = "maple", .name = "maple",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.init = maple_cpufreq_cpu_init, .init = maple_cpufreq_cpu_init,
.verify = maple_cpufreq_verify, .verify = maple_cpufreq_verify,
...@@ -201,7 +200,6 @@ static struct cpufreq_driver maple_cpufreq_driver = { ...@@ -201,7 +200,6 @@ static struct cpufreq_driver maple_cpufreq_driver = {
static int __init maple_cpufreq_init(void) static int __init maple_cpufreq_init(void)
{ {
struct device_node *cpus;
struct device_node *cpunode; struct device_node *cpunode;
unsigned int psize; unsigned int psize;
unsigned long max_freq; unsigned long max_freq;
...@@ -217,24 +215,11 @@ static int __init maple_cpufreq_init(void) ...@@ -217,24 +215,11 @@ static int __init maple_cpufreq_init(void)
!of_machine_is_compatible("Momentum,Apache")) !of_machine_is_compatible("Momentum,Apache"))
return 0; return 0;
cpus = of_find_node_by_path("/cpus");
if (cpus == NULL) {
DBG("No /cpus node !\n");
return -ENODEV;
}
/* Get first CPU node */ /* Get first CPU node */
for (cpunode = NULL; cpunode = of_cpu_device_node_get(0);
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
if (reg == NULL || (*reg) != 0)
continue;
if (!strcmp(cpunode->type, "cpu"))
break;
}
if (cpunode == NULL) { if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n"); printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
goto bail_cpus; goto bail_noprops;
} }
/* Check 970FX for now */ /* Check 970FX for now */
...@@ -290,14 +275,11 @@ static int __init maple_cpufreq_init(void) ...@@ -290,14 +275,11 @@ static int __init maple_cpufreq_init(void)
rc = cpufreq_register_driver(&maple_cpufreq_driver); rc = cpufreq_register_driver(&maple_cpufreq_driver);
of_node_put(cpunode); of_node_put(cpunode);
of_node_put(cpus);
return rc; return rc;
bail_noprops: bail_noprops:
of_node_put(cpunode); of_node_put(cpunode);
bail_cpus:
of_node_put(cpus);
return rc; return rc;
} }
......
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include "mperf.h"
static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
/* Called via smp_call_function_single(), on the target CPU */
static void read_measured_perf_ctrs(void *_cur)
{
struct aperfmperf *am = _cur;
get_aperfmperf(am);
}
/*
* Return the measured active (C0) frequency on this CPU since last call
* to this function.
* Input: cpu number
* Return: Average CPU frequency in terms of max frequency (zero on error)
*
* We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
* over a period of time, while CPU is in C0 state.
* IA32_MPERF counts at the rate of max advertised frequency
* IA32_APERF counts at the rate of actual CPU frequency
* Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
* no meaning should be associated with absolute values of these MSRs.
*/
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu)
{
struct aperfmperf perf;
unsigned long ratio;
unsigned int retval;
if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
return 0;
ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
per_cpu(acfreq_old_perf, cpu) = perf;
retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
return retval;
}
EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
MODULE_LICENSE("GPL");
/*
* (c) 2010 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*/
unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
unsigned int cpu);
...@@ -279,7 +279,6 @@ static struct cpufreq_driver p4clockmod_driver = { ...@@ -279,7 +279,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.exit = cpufreq_p4_cpu_exit, .exit = cpufreq_p4_cpu_exit,
.get = cpufreq_p4_get, .get = cpufreq_p4_get,
.name = "p4-clockmod", .name = "p4-clockmod",
.owner = THIS_MODULE,
.attr = p4clockmod_attr, .attr = p4clockmod_attr,
}; };
......
...@@ -297,7 +297,6 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy, ...@@ -297,7 +297,6 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
static struct cpufreq_driver pas_cpufreq_driver = { static struct cpufreq_driver pas_cpufreq_driver = {
.name = "pas-cpufreq", .name = "pas-cpufreq",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.init = pas_cpufreq_cpu_init, .init = pas_cpufreq_cpu_init,
.exit = pas_cpufreq_cpu_exit, .exit = pas_cpufreq_cpu_exit,
......
...@@ -587,7 +587,6 @@ static struct cpufreq_driver pcc_cpufreq_driver = { ...@@ -587,7 +587,6 @@ static struct cpufreq_driver pcc_cpufreq_driver = {
.init = pcc_cpufreq_cpu_init, .init = pcc_cpufreq_cpu_init,
.exit = pcc_cpufreq_cpu_exit, .exit = pcc_cpufreq_cpu_exit,
.name = "pcc-cpufreq", .name = "pcc-cpufreq",
.owner = THIS_MODULE,
}; };
static int __init pcc_cpufreq_init(void) static int __init pcc_cpufreq_init(void)
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <linux/of_device.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -477,7 +478,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = { ...@@ -477,7 +478,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.flags = CPUFREQ_PM_NO_WARN, .flags = CPUFREQ_PM_NO_WARN,
.attr = pmac_cpu_freqs_attr, .attr = pmac_cpu_freqs_attr,
.name = "powermac", .name = "powermac",
.owner = THIS_MODULE,
}; };
...@@ -649,8 +649,8 @@ static int __init pmac_cpufreq_setup(void) ...@@ -649,8 +649,8 @@ static int __init pmac_cpufreq_setup(void)
if (strstr(cmd_line, "nocpufreq")) if (strstr(cmd_line, "nocpufreq"))
return 0; return 0;
/* Assume only one CPU */ /* Get first CPU node */
cpunode = of_find_node_by_type(NULL, "cpu"); cpunode = of_cpu_device_node_get(0);
if (!cpunode) if (!cpunode)
goto out; goto out;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/of_device.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/machdep.h> #include <asm/machdep.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -371,7 +372,6 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) ...@@ -371,7 +372,6 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
static struct cpufreq_driver g5_cpufreq_driver = { static struct cpufreq_driver g5_cpufreq_driver = {
.name = "powermac", .name = "powermac",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.init = g5_cpufreq_cpu_init, .init = g5_cpufreq_cpu_init,
.verify = g5_cpufreq_verify, .verify = g5_cpufreq_verify,
...@@ -383,9 +383,8 @@ static struct cpufreq_driver g5_cpufreq_driver = { ...@@ -383,9 +383,8 @@ static struct cpufreq_driver g5_cpufreq_driver = {
#ifdef CONFIG_PMAC_SMU #ifdef CONFIG_PMAC_SMU
static int __init g5_neo2_cpufreq_init(struct device_node *cpus) static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
{ {
struct device_node *cpunode;
unsigned int psize, ssize; unsigned int psize, ssize;
unsigned long max_freq; unsigned long max_freq;
char *freq_method, *volt_method; char *freq_method, *volt_method;
...@@ -405,20 +404,6 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) ...@@ -405,20 +404,6 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
else else
return -ENODEV; return -ENODEV;
/* Get first CPU node */
for (cpunode = NULL;
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
if (reg == NULL || (*reg) != 0)
continue;
if (!strcmp(cpunode->type, "cpu"))
break;
}
if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
return -ENODEV;
}
/* Check 970FX for now */ /* Check 970FX for now */
valp = of_get_property(cpunode, "cpu-version", NULL); valp = of_get_property(cpunode, "cpu-version", NULL);
if (!valp) { if (!valp) {
...@@ -447,9 +432,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) ...@@ -447,9 +432,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
if (!shdr) if (!shdr)
goto bail_noprops; goto bail_noprops;
g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
ssize = (shdr->len * sizeof(u32)) - ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr);
sizeof(struct smu_sdbp_header); g5_fvt_count = ssize / sizeof(*g5_fvt_table);
g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
g5_fvt_cur = 0; g5_fvt_cur = 0;
/* Sanity checking */ /* Sanity checking */
...@@ -537,9 +521,9 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus) ...@@ -537,9 +521,9 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
#endif /* CONFIG_PMAC_SMU */ #endif /* CONFIG_PMAC_SMU */
static int __init g5_pm72_cpufreq_init(struct device_node *cpus) static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
{ {
struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL; struct device_node *cpuid = NULL, *hwclock = NULL;
const u8 *eeprom = NULL; const u8 *eeprom = NULL;
const u32 *valp; const u32 *valp;
u64 max_freq, min_freq, ih, il; u64 max_freq, min_freq, ih, il;
...@@ -548,17 +532,6 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus) ...@@ -548,17 +532,6 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and" DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
" RackMac3,1...\n"); " RackMac3,1...\n");
/* Get first CPU node */
for (cpunode = NULL;
(cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
if (!strcmp(cpunode->type, "cpu"))
break;
}
if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
return -ENODEV;
}
/* Lookup the cpuid eeprom node */ /* Lookup the cpuid eeprom node */
cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
if (cpuid != NULL) if (cpuid != NULL)
...@@ -718,25 +691,25 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus) ...@@ -718,25 +691,25 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
static int __init g5_cpufreq_init(void) static int __init g5_cpufreq_init(void)
{ {
struct device_node *cpus; struct device_node *cpunode;
int rc = 0; int rc = 0;
cpus = of_find_node_by_path("/cpus"); /* Get first CPU node */
if (cpus == NULL) { cpunode = of_cpu_device_node_get(0);
DBG("No /cpus node !\n"); if (cpunode == NULL) {
pr_err("cpufreq: Can't find any CPU node\n");
return -ENODEV; return -ENODEV;
} }
if (of_machine_is_compatible("PowerMac7,2") || if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3") || of_machine_is_compatible("PowerMac7,3") ||
of_machine_is_compatible("RackMac3,1")) of_machine_is_compatible("RackMac3,1"))
rc = g5_pm72_cpufreq_init(cpus); rc = g5_pm72_cpufreq_init(cpunode);
#ifdef CONFIG_PMAC_SMU #ifdef CONFIG_PMAC_SMU
else else
rc = g5_neo2_cpufreq_init(cpus); rc = g5_neo2_cpufreq_init(cpunode);
#endif /* CONFIG_PMAC_SMU */ #endif /* CONFIG_PMAC_SMU */
of_node_put(cpus);
return rc; return rc;
} }
......
...@@ -207,7 +207,6 @@ static struct cpufreq_driver powernow_k6_driver = { ...@@ -207,7 +207,6 @@ static struct cpufreq_driver powernow_k6_driver = {
.exit = powernow_k6_cpu_exit, .exit = powernow_k6_cpu_exit,
.get = powernow_k6_get, .get = powernow_k6_get,
.name = "powernow-k6", .name = "powernow-k6",
.owner = THIS_MODULE,
.attr = powernow_k6_attr, .attr = powernow_k6_attr,
}; };
......
...@@ -177,7 +177,7 @@ static int get_ranges(unsigned char *pst) ...@@ -177,7 +177,7 @@ static int get_ranges(unsigned char *pst)
unsigned int speed; unsigned int speed;
u8 fid, vid; u8 fid, vid;
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * powernow_table = kzalloc((sizeof(*powernow_table) *
(number_scales + 1)), GFP_KERNEL); (number_scales + 1)), GFP_KERNEL);
if (!powernow_table) if (!powernow_table)
return -ENOMEM; return -ENOMEM;
...@@ -309,8 +309,7 @@ static int powernow_acpi_init(void) ...@@ -309,8 +309,7 @@ static int powernow_acpi_init(void)
goto err0; goto err0;
} }
acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance), acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL);
GFP_KERNEL);
if (!acpi_processor_perf) { if (!acpi_processor_perf) {
retval = -ENOMEM; retval = -ENOMEM;
goto err0; goto err0;
...@@ -346,7 +345,7 @@ static int powernow_acpi_init(void) ...@@ -346,7 +345,7 @@ static int powernow_acpi_init(void)
goto err2; goto err2;
} }
powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) * powernow_table = kzalloc((sizeof(*powernow_table) *
(number_scales + 1)), GFP_KERNEL); (number_scales + 1)), GFP_KERNEL);
if (!powernow_table) { if (!powernow_table) {
retval = -ENOMEM; retval = -ENOMEM;
...@@ -497,7 +496,7 @@ static int powernow_decode_bios(int maxfid, int startvid) ...@@ -497,7 +496,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
"relevant to this CPU).\n", "relevant to this CPU).\n",
psb->numpst); psb->numpst);
p += sizeof(struct psb_s); p += sizeof(*psb);
pst = (struct pst_s *) p; pst = (struct pst_s *) p;
...@@ -510,12 +509,12 @@ static int powernow_decode_bios(int maxfid, int startvid) ...@@ -510,12 +509,12 @@ static int powernow_decode_bios(int maxfid, int startvid)
(maxfid == pst->maxfid) && (maxfid == pst->maxfid) &&
(startvid == pst->startvid)) { (startvid == pst->startvid)) {
print_pst_entry(pst, j); print_pst_entry(pst, j);
p = (char *)pst + sizeof(struct pst_s); p = (char *)pst + sizeof(*pst);
ret = get_ranges(p); ret = get_ranges(p);
return ret; return ret;
} else { } else {
unsigned int k; unsigned int k;
p = (char *)pst + sizeof(struct pst_s); p = (char *)pst + sizeof(*pst);
for (k = 0; k < number_scales; k++) for (k = 0; k < number_scales; k++)
p += 2; p += 2;
} }
...@@ -717,7 +716,6 @@ static struct cpufreq_driver powernow_driver = { ...@@ -717,7 +716,6 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init, .init = powernow_cpu_init,
.exit = powernow_cpu_exit, .exit = powernow_cpu_exit,
.name = "powernow-k7", .name = "powernow-k7",
.owner = THIS_MODULE,
.attr = powernow_table_attr, .attr = powernow_table_attr,
}; };
......
...@@ -623,7 +623,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, ...@@ -623,7 +623,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
if (check_pst_table(data, pst, maxvid)) if (check_pst_table(data, pst, maxvid))
return -EINVAL; return -EINVAL;
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) powernow_table = kmalloc((sizeof(*powernow_table)
* (data->numps + 1)), GFP_KERNEL); * (data->numps + 1)), GFP_KERNEL);
if (!powernow_table) { if (!powernow_table) {
printk(KERN_ERR PFX "powernow_table memory alloc failure\n"); printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
...@@ -793,7 +793,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) ...@@ -793,7 +793,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
} }
/* fill in data->powernow_table */ /* fill in data->powernow_table */
powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) powernow_table = kmalloc((sizeof(*powernow_table)
* (data->acpi_data.state_count + 1)), GFP_KERNEL); * (data->acpi_data.state_count + 1)), GFP_KERNEL);
if (!powernow_table) { if (!powernow_table) {
pr_debug("powernow_table memory alloc failure\n"); pr_debug("powernow_table memory alloc failure\n");
...@@ -1106,7 +1106,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol) ...@@ -1106,7 +1106,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
if (rc) if (rc)
return -ENODEV; return -ENODEV;
data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) { if (!data) {
printk(KERN_ERR PFX "unable to alloc powernow_k8_data"); printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
return -ENOMEM; return -ENOMEM;
...@@ -1240,7 +1240,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = { ...@@ -1240,7 +1240,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.exit = powernowk8_cpu_exit, .exit = powernowk8_cpu_exit,
.get = powernowk8_get, .get = powernowk8_get,
.name = "powernow-k8", .name = "powernow-k8",
.owner = THIS_MODULE,
.attr = powernow_k8_attr, .attr = powernow_k8_attr,
}; };
......
...@@ -300,7 +300,6 @@ static struct freq_attr *corenet_cpufreq_attr[] = { ...@@ -300,7 +300,6 @@ static struct freq_attr *corenet_cpufreq_attr[] = {
static struct cpufreq_driver ppc_corenet_cpufreq_driver = { static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.name = "ppc_cpufreq", .name = "ppc_cpufreq",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
.init = corenet_cpufreq_cpu_init, .init = corenet_cpufreq_cpu_init,
.exit = __exit_p(corenet_cpufreq_cpu_exit), .exit = __exit_p(corenet_cpufreq_cpu_exit),
......
...@@ -181,7 +181,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = { ...@@ -181,7 +181,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
.init = cbe_cpufreq_cpu_init, .init = cbe_cpufreq_cpu_init,
.exit = cbe_cpufreq_cpu_exit, .exit = cbe_cpufreq_cpu_exit,
.name = "cbe-cpufreq", .name = "cbe-cpufreq",
.owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS, .flags = CPUFREQ_CONST_LOOPS,
}; };
......
...@@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) ...@@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
return ret; return ret;
} }
static __init void pxa_cpufreq_init_voltages(void) static void __init pxa_cpufreq_init_voltages(void)
{ {
vcc_core = regulator_get(NULL, "vcc_core"); vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) { if (IS_ERR(vcc_core)) {
...@@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq) ...@@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
return 0; return 0;
} }
static __init void pxa_cpufreq_init_voltages(void) { } static void __init pxa_cpufreq_init_voltages(void) { }
#endif #endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table, static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
......
...@@ -213,10 +213,12 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) ...@@ -213,10 +213,12 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
policy->cur = policy->min = policy->max; policy->cur = policy->min = policy->max;
if (cpu_is_pxa300() || cpu_is_pxa310()) if (cpu_is_pxa300() || cpu_is_pxa310())
ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs)); ret = setup_freqs_table(policy, pxa300_freqs,
ARRAY_SIZE(pxa300_freqs));
if (cpu_is_pxa320()) if (cpu_is_pxa320())
ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs)); ret = setup_freqs_table(policy, pxa320_freqs,
ARRAY_SIZE(pxa320_freqs));
if (ret) { if (ret) {
pr_err("failed to setup frequency table\n"); pr_err("failed to setup frequency table\n");
......
...@@ -524,7 +524,6 @@ static struct freq_attr *s3c2416_cpufreq_attr[] = { ...@@ -524,7 +524,6 @@ static struct freq_attr *s3c2416_cpufreq_attr[] = {
}; };
static struct cpufreq_driver s3c2416_cpufreq_driver = { static struct cpufreq_driver s3c2416_cpufreq_driver = {
.owner = THIS_MODULE,
.flags = 0, .flags = 0,
.verify = s3c2416_cpufreq_verify_speed, .verify = s3c2416_cpufreq_verify_speed,
.target = s3c2416_cpufreq_set_target, .target = s3c2416_cpufreq_set_target,
......
...@@ -392,7 +392,7 @@ static int s3c_cpufreq_init(struct cpufreq_policy *policy) ...@@ -392,7 +392,7 @@ static int s3c_cpufreq_init(struct cpufreq_policy *policy)
return 0; return 0;
} }
static __init int s3c_cpufreq_initclks(void) static int __init s3c_cpufreq_initclks(void)
{ {
_clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll"); _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
_clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal"); _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
...@@ -522,7 +522,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board) ...@@ -522,7 +522,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
/* Copy the board information so that each board can make this /* Copy the board information so that each board can make this
* initdata. */ * initdata. */
ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL); ours = kzalloc(sizeof(*ours), GFP_KERNEL);
if (ours == NULL) { if (ours == NULL) {
printk(KERN_ERR "%s: no memory\n", __func__); printk(KERN_ERR "%s: no memory\n", __func__);
return -ENOMEM; return -ENOMEM;
...@@ -615,7 +615,7 @@ static int s3c_cpufreq_build_freq(void) ...@@ -615,7 +615,7 @@ static int s3c_cpufreq_build_freq(void)
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0); size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
size++; size++;
ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL); ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL);
if (!ftab) { if (!ftab) {
printk(KERN_ERR "%s: no memory for tables\n", __func__); printk(KERN_ERR "%s: no memory for tables\n", __func__);
return -ENOMEM; return -ENOMEM;
...@@ -691,7 +691,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls, ...@@ -691,7 +691,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
struct cpufreq_frequency_table *vals; struct cpufreq_frequency_table *vals;
unsigned int size; unsigned int size;
size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1); size = sizeof(*vals) * (plls_no + 1);
vals = kmalloc(size, GFP_KERNEL); vals = kmalloc(size, GFP_KERNEL);
if (vals) { if (vals) {
......
...@@ -263,7 +263,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -263,7 +263,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
} }
static struct cpufreq_driver s3c64xx_cpufreq_driver = { static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.owner = THIS_MODULE,
.flags = 0, .flags = 0,
.verify = s3c64xx_cpufreq_verify_speed, .verify = s3c64xx_cpufreq_verify_speed,
.target = s3c64xx_cpufreq_set_target, .target = s3c64xx_cpufreq_set_target,
......
...@@ -147,7 +147,6 @@ static struct cpufreq_driver sc520_freq_driver = { ...@@ -147,7 +147,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.init = sc520_freq_cpu_init, .init = sc520_freq_cpu_init,
.exit = sc520_freq_cpu_exit, .exit = sc520_freq_cpu_exit,
.name = "sc520_freq", .name = "sc520_freq",
.owner = THIS_MODULE,
.attr = sc520_freq_attr, .attr = sc520_freq_attr,
}; };
......
...@@ -160,7 +160,6 @@ static struct freq_attr *sh_freq_attr[] = { ...@@ -160,7 +160,6 @@ static struct freq_attr *sh_freq_attr[] = {
}; };
static struct cpufreq_driver sh_cpufreq_driver = { static struct cpufreq_driver sh_cpufreq_driver = {
.owner = THIS_MODULE,
.name = "sh", .name = "sh",
.get = sh_cpufreq_get, .get = sh_cpufreq_get,
.target = sh_cpufreq_target, .target = sh_cpufreq_target,
......
...@@ -351,12 +351,11 @@ static int __init us2e_freq_init(void) ...@@ -351,12 +351,11 @@ static int __init us2e_freq_init(void)
struct cpufreq_driver *driver; struct cpufreq_driver *driver;
ret = -ENOMEM; ret = -ENOMEM;
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver) if (!driver)
goto err_out; goto err_out;
us2e_freq_table = kzalloc( us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
(NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
GFP_KERNEL); GFP_KERNEL);
if (!us2e_freq_table) if (!us2e_freq_table)
goto err_out; goto err_out;
...@@ -366,7 +365,6 @@ static int __init us2e_freq_init(void) ...@@ -366,7 +365,6 @@ static int __init us2e_freq_init(void)
driver->target = us2e_freq_target; driver->target = us2e_freq_target;
driver->get = us2e_freq_get; driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit; driver->exit = us2e_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-IIe"); strcpy(driver->name, "UltraSPARC-IIe");
cpufreq_us2e_driver = driver; cpufreq_us2e_driver = driver;
......
...@@ -212,12 +212,11 @@ static int __init us3_freq_init(void) ...@@ -212,12 +212,11 @@ static int __init us3_freq_init(void)
struct cpufreq_driver *driver; struct cpufreq_driver *driver;
ret = -ENOMEM; ret = -ENOMEM;
driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver) if (!driver)
goto err_out; goto err_out;
us3_freq_table = kzalloc( us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
(NR_CPUS * sizeof(struct us3_freq_percpu_info)),
GFP_KERNEL); GFP_KERNEL);
if (!us3_freq_table) if (!us3_freq_table)
goto err_out; goto err_out;
...@@ -227,7 +226,6 @@ static int __init us3_freq_init(void) ...@@ -227,7 +226,6 @@ static int __init us3_freq_init(void)
driver->target = us3_freq_target; driver->target = us3_freq_target;
driver->get = us3_freq_get; driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit; driver->exit = us3_freq_cpu_exit;
driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-III"); strcpy(driver->name, "UltraSPARC-III");
cpufreq_us3_driver = driver; cpufreq_us3_driver = driver;
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -223,7 +223,7 @@ static int spear_cpufreq_driver_init(void) ...@@ -223,7 +223,7 @@ static int spear_cpufreq_driver_init(void)
const __be32 *val; const __be32 *val;
int cnt, i, ret; int cnt, i, ret;
np = of_find_node_by_path("/cpus/cpu@0"); np = of_cpu_device_node_get(0);
if (!np) { if (!np) {
pr_err("No cpu node found"); pr_err("No cpu node found");
return -ENODEV; return -ENODEV;
......
...@@ -575,7 +575,6 @@ static struct cpufreq_driver centrino_driver = { ...@@ -575,7 +575,6 @@ static struct cpufreq_driver centrino_driver = {
.target = centrino_target, .target = centrino_target,
.get = get_cur_freq, .get = get_cur_freq,
.attr = centrino_attr, .attr = centrino_attr,
.owner = THIS_MODULE,
}; };
/* /*
......
...@@ -378,7 +378,6 @@ static struct cpufreq_driver speedstep_driver = { ...@@ -378,7 +378,6 @@ static struct cpufreq_driver speedstep_driver = {
.init = speedstep_cpu_init, .init = speedstep_cpu_init,
.exit = speedstep_cpu_exit, .exit = speedstep_cpu_exit,
.get = speedstep_get, .get = speedstep_get,
.owner = THIS_MODULE,
.attr = speedstep_attr, .attr = speedstep_attr,
}; };
......
...@@ -375,7 +375,6 @@ static struct cpufreq_driver speedstep_driver = { ...@@ -375,7 +375,6 @@ static struct cpufreq_driver speedstep_driver = {
.exit = speedstep_cpu_exit, .exit = speedstep_cpu_exit,
.get = speedstep_get, .get = speedstep_get,
.resume = speedstep_resume, .resume = speedstep_resume,
.owner = THIS_MODULE,
.attr = speedstep_attr, .attr = speedstep_attr,
}; };
......
...@@ -24,7 +24,7 @@ static struct cpufreq_driver ucv2_driver; ...@@ -24,7 +24,7 @@ static struct cpufreq_driver ucv2_driver;
/* make sure that only the "userspace" governor is run /* make sure that only the "userspace" governor is run
* -- anything else wouldn't make sense on this platform, anyway. * -- anything else wouldn't make sense on this platform, anyway.
*/ */
int ucv2_verify_speed(struct cpufreq_policy *policy) static int ucv2_verify_speed(struct cpufreq_policy *policy)
{ {
if (policy->cpu) if (policy->cpu)
return -EINVAL; return -EINVAL;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
* 2 of the License, or (at your option) any later version. * 2 of the License, or (at your option) any later version.
*/ */
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/cpu.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -230,6 +231,100 @@ const void *of_get_property(const struct device_node *np, const char *name, ...@@ -230,6 +231,100 @@ const void *of_get_property(const struct device_node *np, const char *name,
} }
EXPORT_SYMBOL(of_get_property); EXPORT_SYMBOL(of_get_property);
/*
* arch_match_cpu_phys_id - Match the given logical CPU and physical id
*
* @cpu: logical cpu index of a core/thread
* @phys_id: physical identifier of a core/thread
*
* CPU logical to physical index mapping is architecture specific.
* However this __weak function provides a default match of physical
* id to logical cpu index. phys_id provided here is usually values read
* from the device tree which must match the hardware internal registers.
*
* Returns true if the physical identifier and the logical cpu index
* correspond to the same core/thread, false otherwise.
*/
bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return (u32)phys_id == cpu;
}
/**
* Checks if the given "prop_name" property holds the physical id of the
* core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
* NULL, local thread number within the core is returned in it.
*/
static bool __of_find_n_match_cpu_property(struct device_node *cpun,
const char *prop_name, int cpu, unsigned int *thread)
{
const __be32 *cell;
int ac, prop_len, tid;
u64 hwid;
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, prop_name, &prop_len);
if (!cell)
return false;
prop_len /= sizeof(*cell);
for (tid = 0; tid < prop_len; tid++) {
hwid = of_read_number(cell, ac);
if (arch_match_cpu_phys_id(cpu, hwid)) {
if (thread)
*thread = tid;
return true;
}
cell += ac;
}
return false;
}
/**
* of_get_cpu_node - Get device node associated with the given logical CPU
*
* @cpu: CPU number(logical index) for which device node is required
* @thread: if not NULL, local thread number within the physical core is
* returned
*
* The main purpose of this function is to retrieve the device node for the
* given logical CPU index. It should be used to initialize the of_node in
* cpu device. Once of_node in cpu device is populated, all the further
* references can use that instead.
*
* CPU logical to physical index mapping is architecture specific and is built
* before booting secondary cores. This function uses arch_match_cpu_phys_id
* which can be overridden by architecture specific implementation.
*
* Returns a node pointer for the logical cpu if found, else NULL.
*/
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
struct device_node *cpun, *cpus;
cpus = of_find_node_by_path("/cpus");
if (!cpus) {
pr_warn("Missing cpus node, bailing out\n");
return NULL;
}
for_each_child_of_node(cpus, cpun) {
if (of_node_cmp(cpun->type, "cpu"))
continue;
/* Check for non-standard "ibm,ppc-interrupt-server#s" property
* for thread ids on PowerPC. If it doesn't exist fallback to
* standard "reg" property.
*/
if (IS_ENABLED(CONFIG_PPC) &&
__of_find_n_match_cpu_property(cpun,
"ibm,ppc-interrupt-server#s", cpu, thread))
return cpun;
if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
return cpun;
}
return NULL;
}
EXPORT_SYMBOL(of_get_cpu_node);
/** Checks if the given "compat" string matches one of the strings in /** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property * the device's "compatible" property
*/ */
......
...@@ -28,6 +28,7 @@ struct cpu { ...@@ -28,6 +28,7 @@ struct cpu {
extern int register_cpu(struct cpu *cpu, int num); extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu); extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu); extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern int cpu_add_dev_attr(struct device_attribute *attr); extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr); extern void cpu_remove_dev_attr(struct device_attribute *attr);
......
...@@ -11,71 +11,36 @@ ...@@ -11,71 +11,36 @@
#ifndef _LINUX_CPUFREQ_H #ifndef _LINUX_CPUFREQ_H
#define _LINUX_CPUFREQ_H #define _LINUX_CPUFREQ_H
#include <asm/cputime.h> #include <linux/cpumask.h>
#include <linux/mutex.h> #include <linux/completion.h>
#include <linux/notifier.h>
#include <linux/threads.h>
#include <linux/kobject.h> #include <linux/kobject.h>
#include <linux/notifier.h>
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/cpumask.h>
#include <asm/div64.h>
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
/********************************************************************* /*********************************************************************
* CPUFREQ NOTIFIER INTERFACE * * CPUFREQ INTERFACE *
*********************************************************************/ *********************************************************************/
/*
#define CPUFREQ_TRANSITION_NOTIFIER (0) * Frequency values here are CPU kHz
#define CPUFREQ_POLICY_NOTIFIER (1) *
#ifdef CONFIG_CPU_FREQ
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
extern void disable_cpufreq(void);
#else /* CONFIG_CPU_FREQ */
static inline int cpufreq_register_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
unsigned int list)
{
return 0;
}
static inline void disable_cpufreq(void) { }
#endif /* CONFIG_CPU_FREQ */
/* if (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
* two generic policies are available:
*/
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
/* Frequency values here are CPU kHz so that hardware which doesn't run
* with some frequencies can complain without having to guess what per
* cent / per mille means.
* Maximum transition latency is in nanoseconds - if it's unknown, * Maximum transition latency is in nanoseconds - if it's unknown,
* CPUFREQ_ETERNAL shall be used. * CPUFREQ_ETERNAL shall be used.
*/ */
#define CPUFREQ_ETERNAL (-1)
#define CPUFREQ_NAME_LEN 16
/* Print length for names. Extra 1 space for accomodating '\n' in prints */
#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
struct cpufreq_governor; struct cpufreq_governor;
/* /sys/devices/system/cpu/cpufreq: entry point for global variables */ struct cpufreq_freqs {
extern struct kobject *cpufreq_global_kobject; unsigned int cpu; /* cpu nr */
int cpufreq_get_global_kobject(void); unsigned int old;
void cpufreq_put_global_kobject(void); unsigned int new;
int cpufreq_sysfs_create_file(const struct attribute *attr); u8 flags; /* flags of cpufreq_driver, see below. */
void cpufreq_sysfs_remove_file(const struct attribute *attr); };
#define CPUFREQ_ETERNAL (-1)
struct cpufreq_cpuinfo { struct cpufreq_cpuinfo {
unsigned int max_freq; unsigned int max_freq;
unsigned int min_freq; unsigned int min_freq;
...@@ -117,123 +82,103 @@ struct cpufreq_policy { ...@@ -117,123 +82,103 @@ struct cpufreq_policy {
struct cpufreq_real_policy user_policy; struct cpufreq_real_policy user_policy;
struct list_head policy_list;
struct kobject kobj; struct kobject kobj;
struct completion kobj_unregister; struct completion kobj_unregister;
int transition_ongoing; /* Tracks transition status */ int transition_ongoing; /* Tracks transition status */
}; };
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_UPDATE_POLICY_CPU (4)
/* Only for ACPI */ /* Only for ACPI */
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *policy);
static inline bool policy_is_shared(struct cpufreq_policy *policy) static inline bool policy_is_shared(struct cpufreq_policy *policy)
{ {
return cpumask_weight(policy->cpus) > 1; return cpumask_weight(policy->cpus) > 1;
} }
/******************** cpufreq transition notifiers *******************/ /* /sys/devices/system/cpu/cpufreq: entry point for global variables */
extern struct kobject *cpufreq_global_kobject;
#define CPUFREQ_PRECHANGE (0) int cpufreq_get_global_kobject(void);
#define CPUFREQ_POSTCHANGE (1) void cpufreq_put_global_kobject(void);
#define CPUFREQ_RESUMECHANGE (8) int cpufreq_sysfs_create_file(const struct attribute *attr);
#define CPUFREQ_SUSPENDCHANGE (9) void cpufreq_sysfs_remove_file(const struct attribute *attr);
struct cpufreq_freqs { #ifdef CONFIG_CPU_FREQ
unsigned int cpu; /* cpu nr */ unsigned int cpufreq_get(unsigned int cpu);
unsigned int old; unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int new; unsigned int cpufreq_quick_get_max(unsigned int cpu);
u8 flags; /* flags of cpufreq_driver, see below. */ void disable_cpufreq(void);
};
/** u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
* cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
* safe) int cpufreq_update_policy(unsigned int cpu);
* @old: old value bool have_governor_per_policy(void);
* @div: divisor struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
* @mult: multiplier #else
* static inline unsigned int cpufreq_get(unsigned int cpu)
*
* new = old * mult / div
*/
static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
u_int mult)
{ {
#if BITS_PER_LONG == 32 return 0;
}
u64 result = ((u64) old) * ((u64) mult); static inline unsigned int cpufreq_quick_get(unsigned int cpu)
do_div(result, div); {
return (unsigned long) result; return 0;
}
#elif BITS_PER_LONG == 64 static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
unsigned long result = old * ((u64) mult); return 0;
result /= div; }
return result; static inline void disable_cpufreq(void) { }
#endif #endif
};
/********************************************************************* /*********************************************************************
* CPUFREQ GOVERNORS * * CPUFREQ DRIVER INTERFACE *
*********************************************************************/ *********************************************************************/
#define CPUFREQ_GOV_START 1 #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
#define CPUFREQ_GOV_STOP 2 #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
#define CPUFREQ_GOV_LIMITS 3
#define CPUFREQ_GOV_POLICY_INIT 4
#define CPUFREQ_GOV_POLICY_EXIT 5
struct cpufreq_governor { struct freq_attr {
char name[CPUFREQ_NAME_LEN]; struct attribute attr;
int initialized; ssize_t (*show)(struct cpufreq_policy *, char *);
int (*governor) (struct cpufreq_policy *policy, ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
unsigned int event);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
unsigned int max_transition_latency; /* HW must be able to switch to
next freq faster than this value in nano secs or we
will fallback to performance governor */
struct list_head governor_list;
struct module *owner;
}; };
/* #define cpufreq_freq_attr_ro(_name) \
* Pass a target to the cpufreq driver. static struct freq_attr _name = \
*/ __ATTR(_name, 0444, show_##_name, NULL)
extern int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, #define cpufreq_freq_attr_ro_perm(_name, _perm) \
unsigned int cpu); static struct freq_attr _name = \
__ATTR(_name, _perm, show_##_name, NULL)
int cpufreq_register_governor(struct cpufreq_governor *governor); #define cpufreq_freq_attr_rw(_name) \
void cpufreq_unregister_governor(struct cpufreq_governor *governor); static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
/********************************************************************* struct global_attr {
* CPUFREQ DRIVER INTERFACE * struct attribute attr;
*********************************************************************/ ssize_t (*show)(struct kobject *kobj,
struct attribute *attr, char *buf);
ssize_t (*store)(struct kobject *a, struct attribute *b,
const char *c, size_t count);
};
#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ #define define_one_global_ro(_name) \
#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ static struct global_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \
static struct global_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
struct freq_attr;
struct cpufreq_driver { struct cpufreq_driver {
struct module *owner;
char name[CPUFREQ_NAME_LEN]; char name[CPUFREQ_NAME_LEN];
u8 flags; u8 flags;
/* /*
...@@ -258,8 +203,6 @@ struct cpufreq_driver { ...@@ -258,8 +203,6 @@ struct cpufreq_driver {
unsigned int (*get) (unsigned int cpu); unsigned int (*get) (unsigned int cpu);
/* optional */ /* optional */
unsigned int (*getavg) (struct cpufreq_policy *policy,
unsigned int cpu);
int (*bios_limit) (int cpu, unsigned int *limit); int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy); int (*exit) (struct cpufreq_policy *policy);
...@@ -269,7 +212,6 @@ struct cpufreq_driver { ...@@ -269,7 +212,6 @@ struct cpufreq_driver {
}; };
/* flags */ /* flags */
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */ * all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
...@@ -281,8 +223,7 @@ struct cpufreq_driver { ...@@ -281,8 +223,7 @@ struct cpufreq_driver {
int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
void cpufreq_notify_transition(struct cpufreq_policy *policy, const char *cpufreq_get_current_driver(void);
struct cpufreq_freqs *freqs, unsigned int state);
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
unsigned int min, unsigned int max) unsigned int min, unsigned int max)
...@@ -300,86 +241,117 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, ...@@ -300,86 +241,117 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
return; return;
} }
struct freq_attr { /*********************************************************************
struct attribute attr; * CPUFREQ NOTIFIER INTERFACE *
ssize_t (*show)(struct cpufreq_policy *, char *); *********************************************************************/
ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
};
#define cpufreq_freq_attr_ro(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define cpufreq_freq_attr_ro_perm(_name, _perm) \
static struct freq_attr _name = \
__ATTR(_name, _perm, show_##_name, NULL)
#define cpufreq_freq_attr_rw(_name) \
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
struct global_attr { #define CPUFREQ_TRANSITION_NOTIFIER (0)
struct attribute attr; #define CPUFREQ_POLICY_NOTIFIER (1)
ssize_t (*show)(struct kobject *kobj,
struct attribute *attr, char *buf);
ssize_t (*store)(struct kobject *a, struct attribute *b,
const char *c, size_t count);
};
#define define_one_global_ro(_name) \ /* Transition notifiers */
static struct global_attr _name = \ #define CPUFREQ_PRECHANGE (0)
__ATTR(_name, 0444, show_##_name, NULL) #define CPUFREQ_POSTCHANGE (1)
#define CPUFREQ_RESUMECHANGE (8)
#define CPUFREQ_SUSPENDCHANGE (9)
#define define_one_global_rw(_name) \ /* Policy Notifiers */
static struct global_attr _name = \ #define CPUFREQ_ADJUST (0)
__ATTR(_name, 0644, show_##_name, store_##_name) #define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_UPDATE_POLICY_CPU (4)
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); #ifdef CONFIG_CPU_FREQ
void cpufreq_cpu_put(struct cpufreq_policy *data); int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
const char *cpufreq_get_current_driver(void); int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
/********************************************************************* void cpufreq_notify_transition(struct cpufreq_policy *policy,
* CPUFREQ 2.6. INTERFACE * struct cpufreq_freqs *freqs, unsigned int state);
*********************************************************************/
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
int cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
#ifdef CONFIG_CPU_FREQ #else /* CONFIG_CPU_FREQ */
/* static inline int cpufreq_register_notifier(struct notifier_block *nb,
* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it unsigned int list)
*/
unsigned int cpufreq_get(unsigned int cpu);
#else
static inline unsigned int cpufreq_get(unsigned int cpu)
{ {
return 0; return 0;
} }
#endif static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
unsigned int list)
/*
* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
*/
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
#else
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
{ {
return 0; return 0;
} }
static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) #endif /* !CONFIG_CPU_FREQ */
/**
* cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
* safe)
* @old: old value
* @div: divisor
* @mult: multiplier
*
*
* new = old * mult / div
*/
static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
u_int mult)
{ {
return 0; #if BITS_PER_LONG == 32
} u64 result = ((u64) old) * ((u64) mult);
do_div(result, div);
return (unsigned long) result;
#elif BITS_PER_LONG == 64
unsigned long result = old * ((u64) mult);
result /= div;
return result;
#endif #endif
}
/********************************************************************* /*********************************************************************
* CPUFREQ DEFAULT GOVERNOR * * CPUFREQ GOVERNORS *
*********************************************************************/ *********************************************************************/
/*
* If (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
* two generic policies are available:
*/
#define CPUFREQ_POLICY_POWERSAVE (1)
#define CPUFREQ_POLICY_PERFORMANCE (2)
/* Governor Events */
#define CPUFREQ_GOV_START 1
#define CPUFREQ_GOV_STOP 2
#define CPUFREQ_GOV_LIMITS 3
#define CPUFREQ_GOV_POLICY_INIT 4
#define CPUFREQ_GOV_POLICY_EXIT 5
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int initialized;
int (*governor) (struct cpufreq_policy *policy,
unsigned int event);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
char *buf);
int (*store_setspeed) (struct cpufreq_policy *policy,
unsigned int freq);
unsigned int max_transition_latency; /* HW must be able to switch to
next freq faster than this value in nano secs or we
will fallback to performance governor */
struct list_head governor_list;
struct module *owner;
};
/* Pass a target to the cpufreq driver */
int cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
int __cpufreq_driver_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation);
int cpufreq_register_governor(struct cpufreq_governor *governor);
void cpufreq_unregister_governor(struct cpufreq_governor *governor);
/* CPUFREQ DEFAULT GOVERNOR */
/* /*
* Performance governor is fallback governor if any other gov failed to auto * Performance governor is fallback governor if any other gov failed to auto
* load due latency restrictions * load due latency restrictions
...@@ -428,18 +400,16 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, ...@@ -428,18 +400,16 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int relation, unsigned int relation,
unsigned int *index); unsigned int *index);
/* the following 3 funtions are for cpufreq core use only */ void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */ /* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu); unsigned int cpu);
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
void cpufreq_frequency_table_put_attr(unsigned int cpu); void cpufreq_frequency_table_put_attr(unsigned int cpu);
ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
#endif /* _LINUX_CPUFREQ_H */ #endif /* _LINUX_CPUFREQ_H */
...@@ -266,6 +266,7 @@ extern int of_device_is_available(const struct device_node *device); ...@@ -266,6 +266,7 @@ extern int of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node, extern const void *of_get_property(const struct device_node *node,
const char *name, const char *name,
int *lenp); int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
#define for_each_property_of_node(dn, pp) \ #define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next) for (pp = dn->properties; pp != NULL; pp = pp->next)
...@@ -459,6 +460,12 @@ static inline const void *of_get_property(const struct device_node *node, ...@@ -459,6 +460,12 @@ static inline const void *of_get_property(const struct device_node *node,
return NULL; return NULL;
} }
static inline struct device_node *of_get_cpu_node(int cpu,
unsigned int *thread)
{
return NULL;
}
static inline int of_property_read_u64(const struct device_node *np, static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value) const char *propname, u64 *out_value)
{ {
......
#ifndef _LINUX_OF_DEVICE_H #ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H #define _LINUX_OF_DEVICE_H
#include <linux/cpu.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of_platform.h> /* temporary until merge */ #include <linux/of_platform.h> /* temporary until merge */
...@@ -43,6 +44,15 @@ static inline void of_device_node_put(struct device *dev) ...@@ -43,6 +44,15 @@ static inline void of_device_node_put(struct device *dev)
of_node_put(dev->of_node); of_node_put(dev->of_node);
} }
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
struct device *cpu_dev;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev)
return NULL;
return of_node_get(cpu_dev->of_node);
}
#else /* CONFIG_OF */ #else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev, static inline int of_driver_match_device(struct device *dev,
...@@ -67,6 +77,11 @@ static inline const struct of_device_id *of_match_device( ...@@ -67,6 +77,11 @@ static inline const struct of_device_id *of_match_device(
{ {
return NULL; return NULL;
} }
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
#endif /* CONFIG_OF */ #endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */ #endif /* _LINUX_OF_DEVICE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment