Commit 4419fbd4 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

* pm-cpufreq: (55 commits)
  cpufreq / intel_pstate: Fix 32 bit build
  cpufreq: conservative: Fix typos in comments
  cpufreq: ondemand: Fix typos in comments
  cpufreq: exynos: simplify .init() for setting policy->cpus
  cpufreq: kirkwood: Add a cpufreq driver for Marvell Kirkwood SoCs
  cpufreq/x86: Add P-state driver for sandy bridge.
  cpufreq_stats: do not remove sysfs files if frequency table is not present
  cpufreq: Do not track governor name for scaling drivers with internal governors.
  cpufreq: Only call cpufreq_out_of_sync() for driver that implement cpufreq_driver.target()
  cpufreq: Retrieve current frequency from scaling drivers with internal governors
  cpufreq: Fix locking issues
  cpufreq: Create a macro for unlock_policy_rwsem{read,write}
  cpufreq: Remove unused HOTPLUG_CPU code
  cpufreq: governors: Fix WARN_ON() for multi-policy platforms
  cpufreq: ondemand: Replace down_differential tuner with adj_up_threshold
  cpufreq / stats: Get rid of CPUFREQ_STATDEVICE_ATTR
  cpufreq: Don't check cpu_online(policy->cpu)
  cpufreq: add imx6q-cpufreq driver
  cpufreq: Don't remove sysfs link for policy->cpu
  cpufreq: Remove unnecessary use of policy->shared_type
  ...
parents 95ecb407 191e5edf
......@@ -111,6 +111,12 @@ policy->governor must contain the "default policy" for
For setting some of these values, the frequency table helpers might be
helpful. See the section 2 for more information on them.
SMP systems normally have same clock source for a group of cpus. For these the
.init() would be called only once for the first online cpu. Here the .init()
routine must initialize policy->cpus with mask of all possible cpus (Online +
Offline) that share the clock. Then the core would copy this mask onto
policy->related_cpus and will reset policy->cpus to carry only online cpus.
1.3 verify
------------
......
......@@ -190,11 +190,11 @@ scaling_max_freq show the current "policy limits" (in
first set scaling_max_freq, then
scaling_min_freq.
affected_cpus : List of CPUs that require software coordination
of frequency.
affected_cpus : List of Online CPUs that require software
coordination of frequency.
related_cpus : List of CPUs that need some sort of frequency
coordination, whether software or hardware.
related_cpus : List of Online + Offline CPUs that need software
coordination of frequency.
scaling_driver : Hardware driver for cpufreq.
......
Marvell Kirkwood Platforms Device Tree Bindings
-----------------------------------------------
Boards with a SoC of the Marvell Kirkwood
shall have the following property:
Required root node property:
compatible: must contain "marvell,kirkwood";
In order to support the kirkwood cpufreq driver, there must be a node
cpus/cpu@0 with three clocks, "cpu_clk", "ddrclk" and "powersave",
where the "powersave" clock is a gating clock used to switch the CPU
between the "cpu_clk" and the "ddrclk".
Example:
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
device_type = "cpu";
compatible = "marvell,sheeva-88SV131";
clocks = <&core_clk 1>, <&core_clk 3>, <&gate_clk 11>;
clock-names = "cpu_clk", "ddrclk", "powersave";
};
......@@ -37,6 +37,16 @@ cpu@900 {
next-level-cache = <&L2>;
clocks = <&a9pll>;
clock-names = "cpu";
operating-points = <
/* kHz ignored */
1300000 1000000
1200000 1000000
1100000 1000000
800000 1000000
400000 1000000
200000 1000000
>;
clock-latency = <100000>;
};
cpu@901 {
......
......@@ -31,7 +31,6 @@ static void __iomem *twd_base;
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
static bool common_setup_called;
static DEFINE_PER_CPU(bool, percpu_setup_called);
static struct clock_event_device __percpu **twd_evt;
......@@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
return IRQ_NONE;
}
static struct clk *twd_get_clock(void)
static void twd_get_clock(struct device_node *np)
{
struct clk *clk;
int err;
clk = clk_get_sys("smp_twd", NULL);
if (IS_ERR(clk)) {
pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
return clk;
if (np)
twd_clk = of_clk_get(np, 0);
else
twd_clk = clk_get_sys("smp_twd", NULL);
if (IS_ERR(twd_clk)) {
pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk));
return;
}
err = clk_prepare_enable(clk);
err = clk_prepare_enable(twd_clk);
if (err) {
pr_err("smp_twd: clock failed to prepare+enable: %d\n", err);
clk_put(clk);
return ERR_PTR(err);
clk_put(twd_clk);
return;
}
return clk;
twd_timer_rate = clk_get_rate(twd_clk);
}
/*
......@@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
}
per_cpu(percpu_setup_called, cpu) = true;
/*
* This stuff only need to be done once for the entire TWD cluster
* during the runtime of the system.
*/
if (!common_setup_called) {
twd_clk = twd_get_clock();
/*
* We use IS_ERR_OR_NULL() here, because if the clock stubs
* are active we will get a valid clk reference which is
* however NULL and will return the rate 0. In that case we
* need to calibrate the rate instead.
*/
if (!IS_ERR_OR_NULL(twd_clk))
twd_timer_rate = clk_get_rate(twd_clk);
else
twd_calibrate_rate();
common_setup_called = true;
}
twd_calibrate_rate();
/*
* The following is done once per CPU the first time .setup() is
......@@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = {
.stop = twd_timer_stop,
};
static int __init twd_local_timer_common_register(void)
static int __init twd_local_timer_common_register(struct device_node *np)
{
int err;
......@@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void)
if (err)
goto out_irq;
twd_get_clock(np);
return 0;
out_irq:
......@@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
if (!twd_base)
return -ENOMEM;
return twd_local_timer_common_register();
return twd_local_timer_common_register(NULL);
}
#ifdef CONFIG_OF
......@@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void)
goto out;
}
err = twd_local_timer_common_register();
err = twd_local_timer_common_register(np);
out:
WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
......
config ARCH_HIGHBANK
bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
select ARCH_HAS_CPUFREQ
select ARCH_HAS_OPP
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_GIC
......@@ -11,5 +13,7 @@ config ARCH_HIGHBANK
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
select HAVE_SMP
select MAILBOX
select PL320_MBOX
select SPARSE_IRQ
select USE_OF
......@@ -243,8 +243,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->related_cpus, cpu_possible_mask);
cpumask_copy(policy->cpus, cpu_possible_mask);
if (policy->cpu == 0)
register_pm_notifier(&tegra_cpu_pm_notifier);
......
......@@ -134,6 +134,8 @@ source "drivers/hwspinlock/Kconfig"
source "drivers/clocksource/Kconfig"
source "drivers/mailbox/Kconfig"
source "drivers/iommu/Kconfig"
source "drivers/remoteproc/Kconfig"
......
......@@ -130,6 +130,7 @@ obj-y += platform/
#common clk code
obj-y += clk/
obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_NFC) += nfc/
obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
......
......@@ -162,7 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
return v;
}
EXPORT_SYMBOL(opp_get_voltage);
EXPORT_SYMBOL_GPL(opp_get_voltage);
/**
* opp_get_freq() - Gets the frequency corresponding to an available opp
......@@ -192,7 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
return f;
}
EXPORT_SYMBOL(opp_get_freq);
EXPORT_SYMBOL_GPL(opp_get_freq);
/**
* opp_get_opp_count() - Get number of opps available in the opp list
......@@ -225,7 +225,7 @@ int opp_get_opp_count(struct device *dev)
return count;
}
EXPORT_SYMBOL(opp_get_opp_count);
EXPORT_SYMBOL_GPL(opp_get_opp_count);
/**
* opp_find_freq_exact() - search for an exact frequency
......@@ -276,7 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
return opp;
}
EXPORT_SYMBOL(opp_find_freq_exact);
EXPORT_SYMBOL_GPL(opp_find_freq_exact);
/**
* opp_find_freq_ceil() - Search for an rounded ceil freq
......@@ -323,7 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
return opp;
}
EXPORT_SYMBOL(opp_find_freq_ceil);
EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
/**
* opp_find_freq_floor() - Search for a rounded floor freq
......@@ -374,7 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
return opp;
}
EXPORT_SYMBOL(opp_find_freq_floor);
EXPORT_SYMBOL_GPL(opp_find_freq_floor);
/**
* opp_add() - Add an OPP table from a table definitions
......@@ -568,7 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, true);
}
EXPORT_SYMBOL(opp_enable);
EXPORT_SYMBOL_GPL(opp_enable);
/**
* opp_disable() - Disable a specific OPP
......@@ -590,7 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
{
return opp_set_availability(dev, freq, false);
}
EXPORT_SYMBOL(opp_disable);
EXPORT_SYMBOL_GPL(opp_disable);
#ifdef CONFIG_CPU_FREQ
/**
......@@ -661,6 +661,7 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
/**
* opp_free_cpufreq_table() - free the cpufreq table
......@@ -678,6 +679,7 @@ void opp_free_cpufreq_table(struct device *dev,
kfree(*table);
*table = NULL;
}
EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
#endif /* CONFIG_CPU_FREQ */
/**
......@@ -738,4 +740,5 @@ int of_init_opp_table(struct device *dev)
return 0;
}
EXPORT_SYMBOL_GPL(of_init_opp_table);
#endif
......@@ -182,8 +182,10 @@ static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
reg |= HB_PLL_EXT_ENA;
reg &= ~HB_PLL_EXT_BYPASS;
} else {
writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
reg &= ~HB_PLL_DIVQ_MASK;
reg |= divq << HB_PLL_DIVQ_SHIFT;
writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
}
writel(reg, hbclk->reg);
......
......@@ -193,6 +193,7 @@ static const struct mvebu_soc_descr __initconst kirkwood_gating_descr[] = {
{ "runit", NULL, 7 },
{ "xor0", NULL, 8 },
{ "audio", NULL, 9 },
{ "powersave", "cpuclk", 11 },
{ "sata0", NULL, 14 },
{ "sata1", NULL, 15 },
{ "xor1", NULL, 16 },
......
......@@ -185,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
config GENERIC_CPUFREQ_CPU0
bool "Generic CPU0 cpufreq driver"
tristate "Generic CPU0 cpufreq driver"
depends on HAVE_CLK && REGULATOR && PM_OPP && OF
select CPU_FREQ_TABLE
help
......
......@@ -77,9 +77,39 @@ config ARM_EXYNOS5250_CPUFREQ
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
config ARM_KIRKWOOD_CPUFREQ
def_bool ARCH_KIRKWOOD && OF
help
This adds the CPUFreq driver for Marvell Kirkwood
SoCs.
config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6Q cpufreq support"
depends on SOC_IMX6Q
depends on REGULATOR_ANATOP
help
This adds cpufreq driver support for Freescale i.MX6Q SOC.
If in doubt, say N.
config ARM_SPEAR_CPUFREQ
bool "SPEAr CPUFreq support"
depends on PLAT_SPEAR
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK
select CPU_FREQ_TABLE
select GENERIC_CPUFREQ_CPU0
select PM_OPP
select REGULATOR
default m
help
This adds the CPUFreq driver for Calxeda Highbank SoC
based boards.
If in doubt, say N.
......@@ -2,6 +2,24 @@
# x86 CPU Frequency scaling drivers
#
config X86_INTEL_PSTATE
tristate "Intel P state control"
depends on X86
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
the scaling driver and governor for Sandy bridge processors.
When this driver is enabled it will become the perferred
scaling driver for Sandy bridge processors.
Note: This driver should be built with the same settings as
the other scaling drivers configured into the system
(module/built-in) in order for the driver to register itself
as the scaling driver on the system.
If in doubt, say N.
config X86_PCC_CPUFREQ
tristate "Processor Clocking Control interface driver"
depends on ACPI && ACPI_PROCESSOR
......
......@@ -19,11 +19,12 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
##################################################################################
# x86 drivers.
# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
# K8 systems. ACPI is preferred to all other hardware-specific drivers.
# K8 systems. This is still the case but acpi-cpufreq errors out so that
# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
......@@ -39,6 +40,7 @@ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o
##################################################################################
# ARM SoC drivers
......@@ -50,8 +52,11 @@ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
##################################################################################
# PowerPC platform drivers
......
......@@ -734,7 +734,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_copy(policy->cpus, cpu_core_mask(cpu));
}
......@@ -762,6 +762,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
switch (perf->control_register.space_id) {
case ACPI_ADR_SPACE_SYSTEM_IO:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
boot_cpu_data.x86 == 0xf) {
pr_debug("AMD K8 systems must use native drivers.\n");
result = -ENODEV;
goto err_unreg;
}
pr_debug("SYSTEM IO addr space\n");
data->cpu_feature = SYSTEM_IO_CAPABLE;
break;
......
......@@ -12,12 +12,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
......@@ -146,7 +146,6 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
* share the clock and voltage and clock. Use cpufreq affected_cpus
* interface to have all CPUs scaled together.
*/
policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
......@@ -177,34 +176,32 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
.attr = cpu0_cpufreq_attr,
};
static int cpu0_cpufreq_driver_init(void)
static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
int ret;
np = of_find_node_by_path("/cpus/cpu@0");
for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
if (of_get_property(np, "operating-points", NULL))
break;
}
if (!np) {
pr_err("failed to find cpu0 node\n");
return -ENOENT;
}
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
pr_err("failed to get cpu0 device\n");
ret = -ENODEV;
goto out_put_node;
}
cpu_dev = &pdev->dev;
cpu_dev->of_node = np;
cpu_clk = clk_get(cpu_dev, NULL);
cpu_clk = devm_clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
goto out_put_node;
}
cpu_reg = regulator_get(cpu_dev, "cpu0");
cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
pr_warn("failed to get cpu0 regulator\n");
cpu_reg = NULL;
......@@ -267,7 +264,24 @@ static int cpu0_cpufreq_driver_init(void)
of_node_put(np);
return ret;
}
late_initcall(cpu0_cpufreq_driver_init);
static int cpu0_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&cpu0_cpufreq_driver);
opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
static struct platform_driver cpu0_cpufreq_platdrv = {
.driver = {
.name = "cpufreq-cpu0",
.owner = THIS_MODULE,
},
.probe = cpu0_cpufreq_probe,
.remove = cpu0_cpufreq_remove,
};
module_platform_driver(cpu0_cpufreq_platdrv);
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
......
This diff is collapsed.
......@@ -25,7 +25,7 @@
#include "cpufreq_governor.h"
/* Conservative governor macors */
/* Conservative governor macros */
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define DEF_SAMPLING_DOWN_FACTOR (1)
......@@ -113,17 +113,20 @@ static void cs_check_cpu(int cpu, unsigned int load)
static void cs_dbs_timer(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct cs_cpu_dbs_info_s *dbs_info = container_of(work,
struct cs_cpu_dbs_info_s, cdbs.work.work);
unsigned int cpu = dbs_info->cdbs.cpu;
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info,
cpu);
int delay = delay_for_sampling_rate(cs_tuners.sampling_rate);
mutex_lock(&dbs_info->cdbs.timer_mutex);
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate))
dbs_check_cpu(&cs_dbs_data, cpu);
dbs_check_cpu(&cs_dbs_data, cpu);
schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
mutex_unlock(&dbs_info->cdbs.timer_mutex);
schedule_delayed_work_on(smp_processor_id(), dw, delay);
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
......@@ -141,7 +144,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
/*
* we only care if our internally tracked freq moves outside the 'valid'
* ranges of freqency available to us otherwise we do not change it
* ranges of frequency available to us otherwise we do not change it
*/
if (dbs_info->requested_freq > policy->max
|| dbs_info->requested_freq < policy->min)
......
......@@ -161,25 +161,48 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
}
EXPORT_SYMBOL_GPL(dbs_check_cpu);
static inline void dbs_timer_init(struct dbs_data *dbs_data,
struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate)
static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu,
unsigned int sampling_rate)
{
int delay = delay_for_sampling_rate(sampling_rate);
struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer);
schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay);
schedule_delayed_work_on(cpu, &cdbs->work, delay);
}
static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs)
static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu)
{
struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu);
cancel_delayed_work_sync(&cdbs->work);
}
/* Will return if we need to evaluate cpu load again or not */
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate)
{
if (policy_is_shared(cdbs->cur_policy)) {
ktime_t time_now = ktime_get();
s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
/* Do nothing if we recently have sampled */
if (delta_us < (s64)(sampling_rate / 2))
return false;
else
cdbs->time_stamp = time_now;
}
return true;
}
EXPORT_SYMBOL_GPL(need_load_eval);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event)
{
struct od_cpu_dbs_info_s *od_dbs_info = NULL;
struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
struct cs_ops *cs_ops = NULL;
struct od_ops *od_ops = NULL;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
struct cpu_dbs_common_info *cpu_cdbs;
......@@ -192,109 +215,111 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
sampling_rate = &cs_tuners->sampling_rate;
ignore_nice = cs_tuners->ignore_nice;
cs_ops = dbs_data->gov_ops;
} else {
od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu);
sampling_rate = &od_tuners->sampling_rate;
ignore_nice = od_tuners->ignore_nice;
od_ops = dbs_data->gov_ops;
}
switch (event) {
case CPUFREQ_GOV_START:
if ((!cpu_online(cpu)) || (!policy->cur))
if (!policy->cur)
return -EINVAL;
mutex_lock(&dbs_data->mutex);
dbs_data->enable++;
cpu_cdbs->cpu = cpu;
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs;
j_cdbs = dbs_data->get_cpu_cdbs(j);
struct cpu_dbs_common_info *j_cdbs =
dbs_data->get_cpu_cdbs(j);
j_cdbs->cpu = j;
j_cdbs->cur_policy = policy;
j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
&j_cdbs->prev_cpu_wall);
if (ignore_nice)
j_cdbs->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
/*
* Start the timerschedule work, when this governor is used for
* first time
*/
if (dbs_data->enable != 1)
goto second_time;
rc = sysfs_create_group(cpufreq_global_kobject,
dbs_data->attr_group);
if (rc) {
mutex_unlock(&dbs_data->mutex);
return rc;
mutex_init(&j_cdbs->timer_mutex);
INIT_DEFERRABLE_WORK(&j_cdbs->work,
dbs_data->gov_dbs_timer);
}
/* policy latency is in nS. Convert it to uS first */
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
latency = 1;
if (!policy->governor->initialized) {
rc = sysfs_create_group(cpufreq_global_kobject,
dbs_data->attr_group);
if (rc) {
mutex_unlock(&dbs_data->mutex);
return rc;
}
}
/*
* conservative does not implement micro like ondemand
* governor, thus we are bound to jiffes/HZ
*/
if (dbs_data->governor == GOV_CONSERVATIVE) {
struct cs_ops *ops = dbs_data->gov_ops;
cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
cs_dbs_info->requested_freq = policy->cur;
cpufreq_register_notifier(ops->notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (!policy->governor->initialized) {
cpufreq_register_notifier(cs_ops->notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
dbs_data->min_sampling_rate =
MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
}
} else {
struct od_ops *ops = dbs_data->gov_ops;
od_dbs_info->rate_mult = 1;
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
od_ops->powersave_bias_init_cpu(cpu);
od_tuners->io_is_busy = ops->io_busy();
if (!policy->governor->initialized)
od_tuners->io_is_busy = od_ops->io_busy();
}
if (policy->governor->initialized)
goto unlock;
/* policy latency is in nS. Convert it to uS first */
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
latency = 1;
/* Bring kernel and HW constraints together */
dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
*sampling_rate = max(dbs_data->min_sampling_rate, latency *
LATENCY_MULTIPLIER);
second_time:
if (dbs_data->governor == GOV_CONSERVATIVE) {
cs_dbs_info->down_skip = 0;
cs_dbs_info->enable = 1;
cs_dbs_info->requested_freq = policy->cur;
} else {
struct od_ops *ops = dbs_data->gov_ops;
od_dbs_info->rate_mult = 1;
od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
ops->powersave_bias_init_cpu(cpu);
}
unlock:
mutex_unlock(&dbs_data->mutex);
mutex_init(&cpu_cdbs->timer_mutex);
dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate);
/* Initiate timer time stamp */
cpu_cdbs->time_stamp = ktime_get();
for_each_cpu(j, policy->cpus)
dbs_timer_init(dbs_data, j, *sampling_rate);
break;
case CPUFREQ_GOV_STOP:
if (dbs_data->governor == GOV_CONSERVATIVE)
cs_dbs_info->enable = 0;
dbs_timer_exit(cpu_cdbs);
for_each_cpu(j, policy->cpus)
dbs_timer_exit(dbs_data, j);
mutex_lock(&dbs_data->mutex);
mutex_destroy(&cpu_cdbs->timer_mutex);
dbs_data->enable--;
if (!dbs_data->enable) {
struct cs_ops *ops = dbs_data->gov_ops;
if (policy->governor->initialized == 1) {
sysfs_remove_group(cpufreq_global_kobject,
dbs_data->attr_group);
if (dbs_data->governor == GOV_CONSERVATIVE)
cpufreq_unregister_notifier(ops->notifier_block,
cpufreq_unregister_notifier(cs_ops->notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
mutex_unlock(&dbs_data->mutex);
......
......@@ -82,6 +82,7 @@ struct cpu_dbs_common_info {
* the governor or limits.
*/
struct mutex timer_mutex;
ktime_t time_stamp;
};
struct od_cpu_dbs_info_s {
......@@ -108,7 +109,7 @@ struct od_dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int down_differential;
unsigned int adj_up_threshold;
unsigned int powersave_bias;
unsigned int io_is_busy;
};
......@@ -129,7 +130,6 @@ struct dbs_data {
#define GOV_CONSERVATIVE 1
int governor;
unsigned int min_sampling_rate;
unsigned int enable; /* number of CPUs using this policy */
struct attribute_group *attr_group;
void *tuners;
......@@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall);
void dbs_check_cpu(struct dbs_data *dbs_data, int cpu);
bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event);
#endif /* _CPUFREQ_GOVERNER_H */
......@@ -26,7 +26,7 @@
#include "cpufreq_governor.h"
/* On-demand governor macors */
/* On-demand governor macros */
#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
......@@ -47,7 +47,8 @@ static struct cpufreq_governor cpufreq_gov_ondemand;
static struct od_dbs_tuners od_tuners = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
};
......@@ -65,7 +66,7 @@ static void ondemand_powersave_bias_init_cpu(int cpu)
* efficient idling at a higher frequency/voltage is.
* Pavel Machek says this is not so for various generations of AMD and old
* Intel systems.
* Mike Chan (androidlcom) calis this is also not true for ARM.
* Mike Chan (android.com) claims this is also not true for ARM.
* Because of this, whitelist specific known (series) of CPUs by default, and
* leave all others up to the user.
*/
......@@ -73,7 +74,7 @@ static int should_io_be_busy(void)
{
#if defined(CONFIG_X86)
/*
* For Intel, Core 2 (model 15) andl later have an efficient idle.
* For Intel, Core 2 (model 15) and later have an efficient idle.
*/
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 6 &&
......@@ -158,8 +159,8 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
/*
* Every sampling_rate, we check, if current idle time is less than 20%
* (default), then we try to increase frequency Every sampling_rate, we look for
* a the lowest frequency which can sustain the load while keeping idle time
* (default), then we try to increase frequency. Every sampling_rate, we look
* for the lowest frequency which can sustain the load while keeping idle time
* over 30%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency. Frequency reduction
......@@ -192,11 +193,9 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
* support the current CPU usage without triggering the up policy. To be
* safe, we focus 10 points under the threshold.
*/
if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) *
policy->cur) {
if (load_freq < od_tuners.adj_up_threshold * policy->cur) {
unsigned int freq_next;
freq_next = load_freq / (od_tuners.up_threshold -
od_tuners.down_differential);
freq_next = load_freq / od_tuners.adj_up_threshold;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
......@@ -218,33 +217,42 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
static void od_dbs_timer(struct work_struct *work)
{
struct delayed_work *dw = to_delayed_work(work);
struct od_cpu_dbs_info_s *dbs_info =
container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
unsigned int cpu = dbs_info->cdbs.cpu;
int delay, sample_type = dbs_info->sample_type;
unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
cpu);
int delay, sample_type = core_dbs_info->sample_type;
bool eval_load;
mutex_lock(&dbs_info->cdbs.timer_mutex);
mutex_lock(&core_dbs_info->cdbs.timer_mutex);
eval_load = need_load_eval(&core_dbs_info->cdbs,
od_tuners.sampling_rate);
/* Common NORMAL_SAMPLE setup */
dbs_info->sample_type = OD_NORMAL_SAMPLE;
core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
if (sample_type == OD_SUB_SAMPLE) {
delay = dbs_info->freq_lo_jiffies;
__cpufreq_driver_target(dbs_info->cdbs.cur_policy,
dbs_info->freq_lo, CPUFREQ_RELATION_H);
delay = core_dbs_info->freq_lo_jiffies;
if (eval_load)
__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
core_dbs_info->freq_lo,
CPUFREQ_RELATION_H);
} else {
dbs_check_cpu(&od_dbs_data, cpu);
if (dbs_info->freq_lo) {
if (eval_load)
dbs_check_cpu(&od_dbs_data, cpu);
if (core_dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
dbs_info->sample_type = OD_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
core_dbs_info->sample_type = OD_SUB_SAMPLE;
delay = core_dbs_info->freq_hi_jiffies;
} else {
delay = delay_for_sampling_rate(od_tuners.sampling_rate
* dbs_info->rate_mult);
* core_dbs_info->rate_mult);
}
}
schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay);
mutex_unlock(&dbs_info->cdbs.timer_mutex);
schedule_delayed_work_on(smp_processor_id(), dw, delay);
mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}
/************************** sysfs interface ************************/
......@@ -259,7 +267,7 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj,
* update_sampling_rate - update sampling rate effective immediately if needed.
* @new_rate: new sampling rate
*
* If new rate is smaller than the old, simply updaing
* If new rate is smaller than the old, simply updating
* dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
* original sampling_rate was 1 second and the requested new sampling rate is 10
* ms because the user needs immediate reaction from ondemand governor, but not
......@@ -287,7 +295,7 @@ static void update_sampling_rate(unsigned int new_rate)
cpufreq_cpu_put(policy);
continue;
}
dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
cpufreq_cpu_put(policy);
mutex_lock(&dbs_info->cdbs.timer_mutex);
......@@ -306,8 +314,7 @@ static void update_sampling_rate(unsigned int new_rate)
cancel_delayed_work_sync(&dbs_info->cdbs.work);
mutex_lock(&dbs_info->cdbs.timer_mutex);
schedule_delayed_work_on(dbs_info->cdbs.cpu,
&dbs_info->cdbs.work,
schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
usecs_to_jiffies(new_rate));
}
......@@ -351,6 +358,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
/* Calculate the new adj_up_threshold */
od_tuners.adj_up_threshold += input;
od_tuners.adj_up_threshold -= od_tuners.up_threshold;
od_tuners.up_threshold = input;
return count;
}
......@@ -507,7 +518,8 @@ static int __init cpufreq_gov_dbs_init(void)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
......
......@@ -24,12 +24,6 @@
static spinlock_t cpufreq_stats_lock;
#define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \
static struct freq_attr _attr_##_name = {\
.attr = {.name = __stringify(_name), .mode = _mode, }, \
.show = _show,\
};
struct cpufreq_stats {
unsigned int cpu;
unsigned int total_trans;
......@@ -136,17 +130,17 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
return PAGE_SIZE;
return len;
}
CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table);
cpufreq_freq_attr_ro(trans_table);
#endif
CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans);
CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state);
cpufreq_freq_attr_ro(total_trans);
cpufreq_freq_attr_ro(time_in_state);
static struct attribute *default_attrs[] = {
&_attr_total_trans.attr,
&_attr_time_in_state.attr,
&total_trans.attr,
&time_in_state.attr,
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
&_attr_trans_table.attr,
&trans_table.attr,
#endif
NULL
};
......@@ -170,11 +164,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
if (stat) {
pr_debug("%s: Free stat table\n", __func__);
kfree(stat->time_in_state);
kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL;
}
per_cpu(cpufreq_stats_table, cpu) = NULL;
}
/* must be called early in the CPU removal sequence (before
......@@ -183,8 +179,14 @@ static void cpufreq_stats_free_table(unsigned int cpu)
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (policy && policy->cpu == cpu)
if (!cpufreq_frequency_get_table(cpu))
return;
if (policy && !policy_is_shared(policy)) {
pr_debug("%s: Free sysfs stat\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
}
if (policy)
cpufreq_cpu_put(policy);
}
......@@ -262,6 +264,19 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
return ret;
}
static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
policy->last_cpu);
pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
policy->cpu, policy->last_cpu);
per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
policy->last_cpu);
per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
stat->cpu = policy->cpu;
}
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
......@@ -269,6 +284,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
if (val == CPUFREQ_UPDATE_POLICY_CPU) {
cpufreq_stats_update_policy_cpu(policy);
return 0;
}
if (val != CPUFREQ_NOTIFY)
return 0;
table = cpufreq_frequency_get_table(cpu);
......
......@@ -118,8 +118,6 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
switch (event) {
case CPUFREQ_GOV_START:
if (!cpu_online(cpu))
return -EINVAL;
BUG_ON(!policy->cur);
mutex_lock(&userspace_mutex);
......
......@@ -128,9 +128,7 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */
/* policy sharing between dual CPUs */
cpumask_copy(policy->cpus, cpu_present_mask);
policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
cpumask_setall(policy->cpus);
return 0;
}
......
......@@ -227,19 +227,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
/*
* EXYNOS4 multi-core processors has 2 cores
* that the frequency cannot be set independently.
* Each cpu is bound to the same speed.
* So the affected cpu is all of the cpus.
*/
if (num_online_cpus() == 1) {
cpumask_copy(policy->related_cpus, cpu_possible_mask);
cpumask_copy(policy->cpus, cpu_online_mask);
} else {
policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
}
cpumask_setall(policy->cpus);
return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
}
......
......@@ -63,9 +63,6 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n",
policy->min, policy->max, policy->cpu);
if (!cpu_online(policy->cpu))
return -EINVAL;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
......@@ -121,9 +118,6 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
break;
}
if (!cpu_online(policy->cpu))
return -EINVAL;
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
unsigned int freq = table[i].frequency;
if (freq == CPUFREQ_ENTRY_INVALID)
......@@ -227,6 +221,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu)
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr);
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy)
{
pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n",
policy->cpu, policy->last_cpu);
per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table,
policy->last_cpu);
per_cpu(cpufreq_show_table, policy->last_cpu) = NULL;
}
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu)
{
return per_cpu(cpufreq_show_table, cpu);
......
/*
* Copyright (C) 2012 Calxeda, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This driver provides the clk notifier callbacks that are used when
* the cpufreq-cpu0 driver changes to frequency to alert the highbank
* EnergyCore Management Engine (ECME) about the need to change
* voltage. The ECME interfaces with the actual voltage regulators.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/of.h>
#include <linux/mailbox.h>
#include <linux/platform_device.h>
#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
#define HB_CPUFREQ_IPC_LEN 7
#define HB_CPUFREQ_VOLT_RETRIES 15
static int hb_voltage_change(unsigned int freq)
{
int i;
u32 msg[HB_CPUFREQ_IPC_LEN];
msg[0] = HB_CPUFREQ_CHANGE_NOTE;
msg[1] = freq / 1000000;
for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
msg[i] = 0;
return pl320_ipc_transmit(msg);
}
static int hb_cpufreq_clk_notify(struct notifier_block *nb,
unsigned long action, void *hclk)
{
struct clk_notifier_data *clk_data = hclk;
int i = 0;
if (action == PRE_RATE_CHANGE) {
if (clk_data->new_rate > clk_data->old_rate)
while (hb_voltage_change(clk_data->new_rate))
if (i++ > HB_CPUFREQ_VOLT_RETRIES)
return NOTIFY_BAD;
} else if (action == POST_RATE_CHANGE) {
if (clk_data->new_rate < clk_data->old_rate)
while (hb_voltage_change(clk_data->new_rate))
if (i++ > HB_CPUFREQ_VOLT_RETRIES)
return NOTIFY_BAD;
}
return NOTIFY_DONE;
}
static struct notifier_block hb_cpufreq_clk_nb = {
.notifier_call = hb_cpufreq_clk_notify,
};
static int hb_cpufreq_driver_init(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-cpu0", };
struct device *cpu_dev;
struct clk *cpu_clk;
struct device_node *np;
int ret;
if (!of_machine_is_compatible("calxeda,highbank"))
return -ENODEV;
for_each_child_of_node(of_find_node_by_path("/cpus"), np)
if (of_get_property(np, "operating-points", NULL))
break;
if (!np) {
pr_err("failed to find highbank cpufreq node\n");
return -ENOENT;
}
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
pr_err("failed to get highbank cpufreq device\n");
ret = -ENODEV;
goto out_put_node;
}
cpu_dev->of_node = np;
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
ret = PTR_ERR(cpu_clk);
pr_err("failed to get cpu0 clock: %d\n", ret);
goto out_put_node;
}
ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb);
if (ret) {
pr_err("failed to register clk notifier: %d\n", ret);
goto out_put_node;
}
/* Instantiate cpufreq-cpu0 */
platform_device_register_full(&devinfo);
out_put_node:
of_node_put(np);
return ret;
}
module_init(hb_cpufreq_driver_init);
MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>");
MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
MODULE_LICENSE("GPL");
/*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/opp.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#define PU_SOC_VOLTAGE_NORMAL 1250000
#define PU_SOC_VOLTAGE_HIGH 1275000
#define FREQ_1P2_GHZ 1200000000
static struct regulator *arm_reg;
static struct regulator *pu_reg;
static struct regulator *soc_reg;
static struct clk *arm_clk;
static struct clk *pll1_sys_clk;
static struct clk *pll1_sw_clk;
static struct clk *step_clk;
static struct clk *pll2_pfd2_396m_clk;
static struct device *cpu_dev;
static struct cpufreq_frequency_table *freq_table;
static unsigned int transition_latency;
static int imx6q_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, freq_table);
}
static unsigned int imx6q_get_speed(unsigned int cpu)
{
return clk_get_rate(arm_clk) / 1000;
}
static int imx6q_set_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int relation)
{
struct cpufreq_freqs freqs;
struct opp *opp;
unsigned long freq_hz, volt, volt_old;
unsigned int index, cpu;
int ret;
ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
relation, &index);
if (ret) {
dev_err(cpu_dev, "failed to match target frequency %d: %d\n",
target_freq, ret);
return ret;
}
freqs.new = freq_table[index].frequency;
freq_hz = freqs.new * 1000;
freqs.old = clk_get_rate(arm_clk) / 1000;
if (freqs.old == freqs.new)
return 0;
for_each_online_cpu(cpu) {
freqs.cpu = cpu;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_hz);
if (IS_ERR(opp)) {
rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz);
return PTR_ERR(opp);
}
volt = opp_get_voltage(opp);
rcu_read_unlock();
volt_old = regulator_get_voltage(arm_reg);
dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n",
freqs.old / 1000, volt_old / 1000,
freqs.new / 1000, volt / 1000);
/* scaling up? scale voltage before frequency */
if (freqs.new > freqs.old) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret) {
dev_err(cpu_dev,
"failed to scale vddarm up: %d\n", ret);
return ret;
}
/*
* Need to increase vddpu and vddsoc for safety
* if we are about to run at 1.2 GHz.
*/
if (freqs.new == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_HIGH, 0);
regulator_set_voltage_tol(soc_reg,
PU_SOC_VOLTAGE_HIGH, 0);
}
}
/*
* The setpoints are selected per PLL/PDF frequencies, so we need to
* reprogram PLL for frequency scaling. The procedure of reprogramming
* PLL1 is as below.
*
* - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
* - Disable pll2_pfd2_396m_clk
*/
clk_prepare_enable(pll2_pfd2_396m_clk);
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, freqs.new * 1000);
/*
* If we are leaving 396 MHz set-point, we need to enable
* pll1_sys_clk and disable pll2_pfd2_396m_clk to keep
* their use count correct.
*/
if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) {
clk_prepare_enable(pll1_sys_clk);
clk_disable_unprepare(pll2_pfd2_396m_clk);
}
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
clk_disable_unprepare(pll2_pfd2_396m_clk);
} else {
/*
* Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient
* to provide the frequency.
*/
clk_disable_unprepare(pll1_sys_clk);
}
/* Ensure the arm clock divider is what we expect */
ret = clk_set_rate(arm_clk, freqs.new * 1000);
if (ret) {
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
regulator_set_voltage_tol(arm_reg, volt_old, 0);
return ret;
}
/* scaling down? scale voltage after frequency */
if (freqs.new < freqs.old) {
ret = regulator_set_voltage_tol(arm_reg, volt, 0);
if (ret)
dev_warn(cpu_dev,
"failed to scale vddarm down: %d\n", ret);
if (freqs.old == FREQ_1P2_GHZ / 1000) {
regulator_set_voltage_tol(pu_reg,
PU_SOC_VOLTAGE_NORMAL, 0);
regulator_set_voltage_tol(soc_reg,
PU_SOC_VOLTAGE_NORMAL, 0);
}
}
for_each_online_cpu(cpu) {
freqs.cpu = cpu;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
return 0;
}
static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
{
int ret;
ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (ret) {
dev_err(cpu_dev, "invalid frequency table: %d\n", ret);
return ret;
}
policy->cpuinfo.transition_latency = transition_latency;
policy->cur = clk_get_rate(arm_clk) / 1000;
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
return 0;
}
static int imx6q_cpufreq_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *imx6q_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver imx6q_cpufreq_driver = {
.verify = imx6q_verify_speed,
.target = imx6q_set_target,
.get = imx6q_get_speed,
.init = imx6q_cpufreq_init,
.exit = imx6q_cpufreq_exit,
.name = "imx6q-cpufreq",
.attr = imx6q_cpufreq_attr,
};
static int imx6q_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
struct opp *opp;
unsigned long min_volt, max_volt;
int num, ret;
cpu_dev = &pdev->dev;
np = of_find_node_by_path("/cpus/cpu@0");
if (!np) {
dev_err(cpu_dev, "failed to find cpu0 node\n");
return -ENOENT;
}
cpu_dev->of_node = np;
arm_clk = devm_clk_get(cpu_dev, "arm");
pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
step_clk = devm_clk_get(cpu_dev, "step");
pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m");
if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) ||
IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) {
dev_err(cpu_dev, "failed to get clocks\n");
ret = -ENOENT;
goto put_node;
}
arm_reg = devm_regulator_get(cpu_dev, "arm");
pu_reg = devm_regulator_get(cpu_dev, "pu");
soc_reg = devm_regulator_get(cpu_dev, "soc");
if (!arm_reg || !pu_reg || !soc_reg) {
dev_err(cpu_dev, "failed to get regulators\n");
ret = -ENOENT;
goto put_node;
}
/* We expect an OPP table supplied by platform */
num = opp_get_opp_count(cpu_dev);
if (num < 0) {
ret = num;
dev_err(cpu_dev, "no OPP table is found: %d\n", ret);
goto put_node;
}
ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto put_node;
}
if (of_property_read_u32(np, "clock-latency", &transition_latency))
transition_latency = CPUFREQ_ETERNAL;
/*
* OPP is maintained in order of increasing frequency, and
* freq_table initialised from OPP is therefore sorted in the
* same order.
*/
rcu_read_lock();
opp = opp_find_freq_exact(cpu_dev,
freq_table[0].frequency * 1000, true);
min_volt = opp_get_voltage(opp);
opp = opp_find_freq_exact(cpu_dev,
freq_table[--num].frequency * 1000, true);
max_volt = opp_get_voltage(opp);
rcu_read_unlock();
ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt);
if (ret > 0)
transition_latency += ret * 1000;
/* Count vddpu and vddsoc latency in for 1.2 GHz support */
if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) {
ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL,
PU_SOC_VOLTAGE_HIGH);
if (ret > 0)
transition_latency += ret * 1000;
ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL,
PU_SOC_VOLTAGE_HIGH);
if (ret > 0)
transition_latency += ret * 1000;
}
ret = cpufreq_register_driver(&imx6q_cpufreq_driver);
if (ret) {
dev_err(cpu_dev, "failed register driver: %d\n", ret);
goto free_freq_table;
}
of_node_put(np);
return 0;
free_freq_table:
opp_free_cpufreq_table(cpu_dev, &freq_table);
put_node:
of_node_put(np);
return ret;
}
static int imx6q_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&imx6q_cpufreq_driver);
opp_free_cpufreq_table(cpu_dev, &freq_table);
return 0;
}
static struct platform_driver imx6q_cpufreq_platdrv = {
.driver = {
.name = "imx6q-cpufreq",
.owner = THIS_MODULE,
},
.probe = imx6q_cpufreq_probe,
.remove = imx6q_cpufreq_remove,
};
module_platform_driver(imx6q_cpufreq_platdrv);
MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver");
MODULE_LICENSE("GPL");
This diff is collapsed.
/*
* kirkwood_freq.c: cpufreq driver for the Marvell kirkwood
*
* Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/proc-fns.h>
#define CPU_SW_INT_BLK BIT(28)
static struct priv
{
struct clk *cpu_clk;
struct clk *ddr_clk;
struct clk *powersave_clk;
struct device *dev;
void __iomem *base;
} priv;
#define STATE_CPU_FREQ 0x01
#define STATE_DDR_FREQ 0x02
/*
* Kirkwood can swap the clock to the CPU between two clocks:
*
* - cpu clk
* - ddr clk
*
* The frequencies are set at runtime before registering this *
* table.
*/
static struct cpufreq_frequency_table kirkwood_freq_table[] = {
{STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */
{STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */
{0, CPUFREQ_TABLE_END},
};
static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
{
if (__clk_is_enabled(priv.powersave_clk))
return kirkwood_freq_table[1].frequency;
return kirkwood_freq_table[0].frequency;
}
static void kirkwood_cpufreq_set_cpu_state(unsigned int index)
{
struct cpufreq_freqs freqs;
unsigned int state = kirkwood_freq_table[index].index;
unsigned long reg;
freqs.old = kirkwood_cpufreq_get_cpu_frequency(0);
freqs.new = kirkwood_freq_table[index].frequency;
freqs.cpu = 0; /* Kirkwood is UP */
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n",
kirkwood_freq_table[index].frequency);
dev_dbg(priv.dev, "old frequency was %i KHz\n",
kirkwood_cpufreq_get_cpu_frequency(0));
if (freqs.old != freqs.new) {
local_irq_disable();
/* Disable interrupts to the CPU */
reg = readl_relaxed(priv.base);
reg |= CPU_SW_INT_BLK;
writel_relaxed(reg, priv.base);
switch (state) {
case STATE_CPU_FREQ:
clk_disable(priv.powersave_clk);
break;
case STATE_DDR_FREQ:
clk_enable(priv.powersave_clk);
break;
}
/* Wait-for-Interrupt, while the hardware changes frequency */
cpu_do_idle();
/* Enable interrupts to the CPU */
reg = readl_relaxed(priv.base);
reg &= ~CPU_SW_INT_BLK;
writel_relaxed(reg, priv.base);
local_irq_enable();
}
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
};
static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, kirkwood_freq_table);
}
static int kirkwood_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int index = 0;
if (cpufreq_frequency_table_target(policy, kirkwood_freq_table,
target_freq, relation, &index))
return -EINVAL;
kirkwood_cpufreq_set_cpu_state(index);
return 0;
}
/* Module init and exit code */
static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int result;
/* cpuinfo and default policy values */
policy->cpuinfo.transition_latency = 5000; /* 5uS */
policy->cur = kirkwood_cpufreq_get_cpu_frequency(0);
result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table);
if (result)
return result;
cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu);
return 0;
}
static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *kirkwood_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver kirkwood_cpufreq_driver = {
.get = kirkwood_cpufreq_get_cpu_frequency,
.verify = kirkwood_cpufreq_verify,
.target = kirkwood_cpufreq_target,
.init = kirkwood_cpufreq_cpu_init,
.exit = kirkwood_cpufreq_cpu_exit,
.name = "kirkwood-cpufreq",
.owner = THIS_MODULE,
.attr = kirkwood_cpufreq_attr,
};
static int kirkwood_cpufreq_probe(struct platform_device *pdev)
{
struct device_node *np;
struct resource *res;
int err;
priv.dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Cannot get memory resource\n");
return -ENODEV;
}
priv.base = devm_request_and_ioremap(&pdev->dev, res);
if (!priv.base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
return -EADDRNOTAVAIL;
}
np = of_find_node_by_path("/cpus/cpu@0");
if (!np)
return -ENODEV;
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) {
dev_err(priv.dev, "Unable to get cpuclk");
return PTR_ERR(priv.cpu_clk);
}
clk_prepare_enable(priv.cpu_clk);
kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
priv.ddr_clk = of_clk_get_by_name(np, "ddrclk");
if (IS_ERR(priv.ddr_clk)) {
dev_err(priv.dev, "Unable to get ddrclk");
err = PTR_ERR(priv.ddr_clk);
goto out_cpu;
}
clk_prepare_enable(priv.ddr_clk);
kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000;
priv.powersave_clk = of_clk_get_by_name(np, "powersave");
if (IS_ERR(priv.powersave_clk)) {
dev_err(priv.dev, "Unable to get powersave");
err = PTR_ERR(priv.powersave_clk);
goto out_ddr;
}
clk_prepare(priv.powersave_clk);
of_node_put(np);
np = NULL;
err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
if (!err)
return 0;
dev_err(priv.dev, "Failed to register cpufreq driver");
clk_disable_unprepare(priv.powersave_clk);
out_ddr:
clk_disable_unprepare(priv.ddr_clk);
out_cpu:
clk_disable_unprepare(priv.cpu_clk);
of_node_put(np);
return err;
}
static int kirkwood_cpufreq_remove(struct platform_device *pdev)
{
cpufreq_unregister_driver(&kirkwood_cpufreq_driver);
clk_disable_unprepare(priv.powersave_clk);
clk_disable_unprepare(priv.ddr_clk);
clk_disable_unprepare(priv.cpu_clk);
return 0;
}
static struct platform_driver kirkwood_cpufreq_platform_driver = {
.probe = kirkwood_cpufreq_probe,
.remove = kirkwood_cpufreq_remove,
.driver = {
.name = "kirkwood-cpufreq",
.owner = THIS_MODULE,
},
};
module_platform_driver(kirkwood_cpufreq_platform_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU");
MODULE_ALIAS("platform:kirkwood-cpufreq");
......@@ -181,7 +181,7 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* secondary CPUs are tied to the primary one by the
* cpufreq core if in the secondary policy we tell it that
* it actually must be one policy together with all others. */
cpumask_copy(policy->cpus, cpu_online_mask);
cpumask_setall(policy->cpus);
cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu);
return cpufreq_frequency_table_cpuinfo(policy,
......
......@@ -214,10 +214,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
* interface to handle this scenario. Additional is_smp() check
* is to keep SMP_ON_UP build working.
*/
if (is_smp()) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
if (is_smp())
cpumask_setall(policy->cpus);
}
/* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000;
......
......@@ -1249,39 +1249,59 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.attr = powernow_k8_attr,
};
static void __request_acpi_cpufreq(void)
{
const char *cur_drv, *drv = "acpi-cpufreq";
cur_drv = cpufreq_get_current_driver();
if (!cur_drv)
goto request;
if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv))))
pr_warn(PFX "WTF driver: %s\n", cur_drv);
return;
request:
pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n");
request_module(drv);
}
/* driver entry point for init */
static int __cpuinit powernowk8_init(void)
{
unsigned int i, supported_cpus = 0;
int rv;
int ret;
if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
request_module("acpi-cpufreq");
__request_acpi_cpufreq();
return -ENODEV;
}
if (!x86_match_cpu(powernow_k8_ids))
return -ENODEV;
get_online_cpus();
for_each_online_cpu(i) {
int rc;
smp_call_function_single(i, check_supported_cpu, &rc, 1);
if (rc == 0)
smp_call_function_single(i, check_supported_cpu, &ret, 1);
if (!ret)
supported_cpus++;
}
if (supported_cpus != num_online_cpus())
if (supported_cpus != num_online_cpus()) {
put_online_cpus();
return -ENODEV;
}
put_online_cpus();
rv = cpufreq_register_driver(&cpufreq_amd64_driver);
ret = cpufreq_register_driver(&cpufreq_amd64_driver);
if (ret)
return ret;
if (!rv)
pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
num_online_nodes(), boot_cpu_data.x86_model_id,
supported_cpus);
pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
return rv;
return ret;
}
/* driver entry point for term */
......
......@@ -30,7 +30,7 @@ static struct {
u32 cnt;
} spear_cpufreq;
int spear_cpufreq_verify(struct cpufreq_policy *policy)
static int spear_cpufreq_verify(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl);
}
......@@ -157,7 +157,9 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = newfreq / 1000;
freqs.new /= mult;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
for_each_cpu(freqs.cpu, policy->cpus)
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
if (mult == 2)
ret = spear1340_set_cpu_rate(srcclk, newfreq);
......@@ -170,7 +172,8 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy,
freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000;
}
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
for_each_cpu(freqs.cpu, policy->cpus)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
......@@ -188,8 +191,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency;
policy->cur = spear_cpufreq_get(0);
cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
cpumask_copy(policy->related_cpus, policy->cpus);
cpumask_setall(policy->cpus);
return 0;
}
......
menuconfig MAILBOX
bool "Mailbox Hardware Support"
help
Mailbox is a framework to control hardware communication between
on-chip processors through queued messages and interrupt driven
signals. Say Y if your platform supports hardware mailboxes.
if MAILBOX
config PL320_MBOX
bool "ARM PL320 Mailbox"
depends on ARM_AMBA
help
An implementation of the ARM PL320 Interprocessor Communication
Mailbox (IPCM), tailored for the Calxeda Highbank. It is used to
send short messages between Highbank's A9 cores and the EnergyCore
Management Engine, primarily for cpufreq. Say Y here if you want
to use the PL320 IPCM support.
endif
obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
/*
* Copyright 2012 Calxeda, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/types.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <linux/mailbox.h>
#define IPCMxSOURCE(m) ((m) * 0x40)
#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
#define IPCMxDCLEAR(m) (((m) * 0x40) + 0x008)
#define IPCMxDSTATUS(m) (((m) * 0x40) + 0x00C)
#define IPCMxMODE(m) (((m) * 0x40) + 0x010)
#define IPCMxMSET(m) (((m) * 0x40) + 0x014)
#define IPCMxMCLEAR(m) (((m) * 0x40) + 0x018)
#define IPCMxMSTATUS(m) (((m) * 0x40) + 0x01C)
#define IPCMxSEND(m) (((m) * 0x40) + 0x020)
#define IPCMxDR(m, dr) (((m) * 0x40) + ((dr) * 4) + 0x024)
#define IPCMMIS(irq) (((irq) * 8) + 0x800)
#define IPCMRIS(irq) (((irq) * 8) + 0x804)
#define MBOX_MASK(n) (1 << (n))
#define IPC_TX_MBOX 1
#define IPC_RX_MBOX 2
#define CHAN_MASK(n) (1 << (n))
#define A9_SOURCE 1
#define M3_SOURCE 0
static void __iomem *ipc_base;
static int ipc_irq;
static DEFINE_MUTEX(ipc_m1_lock);
static DECLARE_COMPLETION(ipc_completion);
static ATOMIC_NOTIFIER_HEAD(ipc_notifier);
static inline void set_destination(int source, int mbox)
{
__raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox));
__raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox));
}
static inline void clear_destination(int source, int mbox)
{
__raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox));
__raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox));
}
static void __ipc_send(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
__raw_writel(data[i], ipc_base + IPCMxDR(mbox, i));
__raw_writel(0x1, ipc_base + IPCMxSEND(mbox));
}
static u32 __ipc_rcv(int mbox, u32 *data)
{
int i;
for (i = 0; i < 7; i++)
data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i));
return data[1];
}
/* blocking implmentation from the A9 side, not usuable in interrupts! */
int pl320_ipc_transmit(u32 *data)
{
int ret;
mutex_lock(&ipc_m1_lock);
init_completion(&ipc_completion);
__ipc_send(IPC_TX_MBOX, data);
ret = wait_for_completion_timeout(&ipc_completion,
msecs_to_jiffies(1000));
if (ret == 0) {
ret = -ETIMEDOUT;
goto out;
}
ret = __ipc_rcv(IPC_TX_MBOX, data);
out:
mutex_unlock(&ipc_m1_lock);
return ret;
}
EXPORT_SYMBOL_GPL(pl320_ipc_transmit);
static irqreturn_t ipc_handler(int irq, void *dev)
{
u32 irq_stat;
u32 data[7];
irq_stat = __raw_readl(ipc_base + IPCMMIS(1));
if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) {
__raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
complete(&ipc_completion);
}
if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) {
__ipc_rcv(IPC_RX_MBOX, data);
atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1);
__raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX));
}
return IRQ_HANDLED;
}
int pl320_ipc_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&ipc_notifier, nb);
}
EXPORT_SYMBOL_GPL(pl320_ipc_register_notifier);
int pl320_ipc_unregister_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&ipc_notifier, nb);
}
EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
static int __init pl320_probe(struct amba_device *adev,
const struct amba_id *id)
{
int ret;
ipc_base = ioremap(adev->res.start, resource_size(&adev->res));
if (ipc_base == NULL)
return -ENOMEM;
__raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX));
ipc_irq = adev->irq[0];
ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL);
if (ret < 0)
goto err;
/* Init slow mailbox */
__raw_writel(CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxSOURCE(IPC_TX_MBOX));
__raw_writel(CHAN_MASK(M3_SOURCE),
ipc_base + IPCMxDSET(IPC_TX_MBOX));
__raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxMSET(IPC_TX_MBOX));
/* Init receive mailbox */
__raw_writel(CHAN_MASK(M3_SOURCE),
ipc_base + IPCMxSOURCE(IPC_RX_MBOX));
__raw_writel(CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxDSET(IPC_RX_MBOX));
__raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE),
ipc_base + IPCMxMSET(IPC_RX_MBOX));
return 0;
err:
iounmap(ipc_base);
return ret;
}
static struct amba_id pl320_ids[] = {
{
.id = 0x00041320,
.mask = 0x000fffff,
},
{ 0, 0 },
};
static struct amba_driver pl320_driver = {
.drv = {
.name = "pl320",
},
.id_table = pl320_ids,
.probe = pl320_probe,
};
static int __init ipc_init(void)
{
return amba_driver_register(&pl320_driver);
}
module_init(ipc_init);
......@@ -89,11 +89,15 @@ struct cpufreq_real_policy {
};
struct cpufreq_policy {
cpumask_var_t cpus; /* CPUs requiring sw coordination */
cpumask_var_t related_cpus; /* CPUs with any coordination */
unsigned int shared_type; /* ANY or ALL affected CPUs
/* CPUs sharing clock, require sw coordination */
cpumask_var_t cpus; /* Online CPUs only */
cpumask_var_t related_cpus; /* Online + Offline CPUs */
unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
should set cpufreq */
unsigned int cpu; /* cpu nr of registered CPU */
unsigned int cpu; /* cpu nr of CPU managing this policy */
unsigned int last_cpu; /* cpu nr of previous CPU that managed
* this policy */
struct cpufreq_cpuinfo cpuinfo;/* see above */
unsigned int min; /* in kHz */
......@@ -112,16 +116,23 @@ struct cpufreq_policy {
struct completion kobj_unregister;
};
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_ADJUST (0)
#define CPUFREQ_INCOMPATIBLE (1)
#define CPUFREQ_NOTIFY (2)
#define CPUFREQ_START (3)
#define CPUFREQ_UPDATE_POLICY_CPU (4)
/* Only for ACPI */
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
}
/******************** cpufreq transition notifiers *******************/
#define CPUFREQ_PRECHANGE (0)
......@@ -173,6 +184,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mu
struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int initialized;
int (*governor) (struct cpufreq_policy *policy,
unsigned int event);
ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
......@@ -308,6 +320,9 @@ __ATTR(_name, 0444, show_##_name, NULL)
static struct global_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *data);
const char *cpufreq_get_current_driver(void);
/*********************************************************************
* CPUFREQ 2.6. INTERFACE *
......@@ -397,14 +412,13 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
/* the following 3 funtions are for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
void cpufreq_cpu_put(struct cpufreq_policy *data);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
void cpufreq_frequency_table_put_attr(unsigned int cpu);
#endif /* _LINUX_CPUFREQ_H */
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
int pl320_ipc_transmit(u32 *data);
int pl320_ipc_register_notifier(struct notifier_block *nb);
int pl320_ipc_unregister_notifier(struct notifier_block *nb);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment