Commit c47b3bd0 authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpufreq'

* pm-cpufreq: (63 commits)
  intel_pstate: Clean up get_target_pstate_use_performance()
  intel_pstate: Use sample.core_avg_perf in get_avg_pstate()
  intel_pstate: Clarify average performance computation
  intel_pstate: Avoid unnecessary synchronize_sched() during initialization
  cpufreq: schedutil: Make default depend on CONFIG_SMP
  cpufreq: powernv: del_timer_sync when global and local pstate are equal
  cpufreq: powernv: Move smp_call_function_any() out of irq safe block
  intel_pstate: Clean up intel_pstate_get()
  cpufreq: schedutil: Make it depend on CONFIG_SMP
  cpufreq: governor: Fix handling of special cases in dbs_update()
  cpufreq: intel_pstate: Ignore _PPC processing under HWP
  cpufreq: arm_big_little: use generic OPP functions for {init, free}_opp_table
  cpufreq: tango: Use generic platdev driver
  cpufreq: Fix GOV_LIMITS handling for the userspace governor
  cpufreq: mvebu: Move cpufreq code into drivers/cpufreq/
  cpufreq: dt: Kill platform-data
  mvebu: Use dev_pm_opp_set_sharing_cpus() to mark OPP tables as shared
  cpufreq: dt: Identify cpu-sharing for platforms without operating-points-v2
  cpufreq: governor: Change confusing struct field and variable names
  cpufreq: intel_pstate: Enable PPC enforcement for servers
  ...
parents 29cff184 1aa7a6e2
......@@ -1661,6 +1661,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
hwp_only
Only load intel_pstate on systems which support
hardware P state control (HWP) if available.
support_acpi_ppc
Enforce ACPI _PPC performance limits. If the Fixed ACPI
Description Table, specifies preferred power management
profile as "Enterprise Server" or "Performance Server",
then this feature is turned on by default.
intremap= [X86-64, Intel-IOMMU]
on enable Interrupt Remapping (default)
......
......@@ -1322,6 +1322,7 @@ F: drivers/rtc/rtc-armada38x.c
F: arch/arm/boot/dts/armada*
F: arch/arm/boot/dts/kirkwood*
F: arch/arm64/boot/dts/marvell/armada*
F: drivers/cpufreq/mvebu-cpufreq.c
ARM/Marvell Berlin SoC support
......
......@@ -18,11 +18,6 @@
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h>
static void __init berlin_init_late(void)
{
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
}
static const char * const berlin_dt_compat[] = {
"marvell,berlin",
NULL,
......@@ -30,7 +25,6 @@ static const char * const berlin_dt_compat[] = {
DT_MACHINE_START(BERLIN_DT, "Marvell Berlin")
.dt_compat = berlin_dt_compat,
.init_late = berlin_init_late,
/*
* with DT probing for L2CCs, berlin_init_machine can be removed.
* Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
......
......@@ -213,33 +213,6 @@ static void __init exynos_init_irq(void)
exynos_map_pmu();
}
static const struct of_device_id exynos_cpufreq_matches[] = {
{ .compatible = "samsung,exynos3250", .data = "cpufreq-dt" },
{ .compatible = "samsung,exynos4210", .data = "cpufreq-dt" },
{ .compatible = "samsung,exynos4212", .data = "cpufreq-dt" },
{ .compatible = "samsung,exynos4412", .data = "cpufreq-dt" },
{ .compatible = "samsung,exynos5250", .data = "cpufreq-dt" },
#ifndef CONFIG_BL_SWITCHER
{ .compatible = "samsung,exynos5420", .data = "cpufreq-dt" },
{ .compatible = "samsung,exynos5800", .data = "cpufreq-dt" },
#endif
{ /* sentinel */ }
};
static void __init exynos_cpufreq_init(void)
{
struct device_node *root = of_find_node_by_path("/");
const struct of_device_id *match;
match = of_match_node(exynos_cpufreq_matches, root);
if (!match) {
platform_device_register_simple("exynos-cpufreq", -1, NULL, 0);
return;
}
platform_device_register_simple(match->data, -1, NULL, 0);
}
static void __init exynos_dt_machine_init(void)
{
/*
......@@ -262,8 +235,6 @@ static void __init exynos_dt_machine_init(void)
of_machine_is_compatible("samsung,exynos5250"))
platform_device_register(&exynos_cpuidle);
exynos_cpufreq_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
......
......@@ -18,15 +18,6 @@
#include "common.h"
#include "mx27.h"
static void __init imx27_dt_init(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
platform_device_register_full(&devinfo);
}
static const char * const imx27_dt_board_compat[] __initconst = {
"fsl,imx27",
NULL
......@@ -36,6 +27,5 @@ DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
.map_io = mx27_map_io,
.init_early = imx27_init_early,
.init_irq = mx27_init_irq,
.init_machine = imx27_dt_init,
.dt_compat = imx27_dt_board_compat,
MACHINE_END
......@@ -50,13 +50,10 @@ static void __init imx51_ipu_mipi_setup(void)
static void __init imx51_dt_init(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
imx51_ipu_mipi_setup();
imx_src_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
platform_device_register_full(&devinfo);
}
static void __init imx51_init_late(void)
......
......@@ -40,8 +40,6 @@ static void __init imx53_dt_init(void)
static void __init imx53_init_late(void)
{
imx53_pm_init();
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
}
static const char * const imx53_dt_board_compat[] __initconst = {
......
......@@ -105,11 +105,6 @@ static void __init imx7d_init_irq(void)
irqchip_init();
}
static void __init imx7d_init_late(void)
{
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
}
static const char *const imx7d_dt_compat[] __initconst = {
"fsl,imx7d",
NULL,
......@@ -117,7 +112,6 @@ static const char *const imx7d_dt_compat[] __initconst = {
DT_MACHINE_START(IMX7D, "Freescale i.MX7 Dual (Device Tree)")
.init_irq = imx7d_init_irq,
.init_late = imx7d_init_late,
.init_machine = imx7d_init_machine,
.dt_compat = imx7d_dt_compat,
MACHINE_END
......@@ -20,7 +20,6 @@
#include <linux/clk.h>
#include <linux/cpu_pm.h>
#include <linux/cpufreq-dt.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>
......@@ -29,7 +28,6 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/resource.h>
#include <linux/slab.h>
#include <linux/smp.h>
......@@ -608,86 +606,3 @@ int mvebu_pmsu_dfs_request(int cpu)
return 0;
}
struct cpufreq_dt_platform_data cpufreq_dt_pd = {
.independent_clocks = true,
};
static int __init armada_xp_pmsu_cpufreq_init(void)
{
struct device_node *np;
struct resource res;
int ret, cpu;
if (!of_machine_is_compatible("marvell,armadaxp"))
return 0;
/*
* In order to have proper cpufreq handling, we need to ensure
* that the Device Tree description of the CPU clock includes
* the definition of the PMU DFS registers. If not, we do not
* register the clock notifier and the cpufreq driver. This
* piece of code is only for compatibility with old Device
* Trees.
*/
np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
if (!np)
return 0;
ret = of_address_to_resource(np, 1, &res);
if (ret) {
pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
of_node_put(np);
return 0;
}
of_node_put(np);
/*
* For each CPU, this loop registers the operating points
* supported (which are the nominal CPU frequency and half of
* it), and registers the clock notifier that will take care
* of doing the PMSU part of a frequency transition.
*/
for_each_possible_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
int ret;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("Cannot get CPU %d\n", cpu);
continue;
}
clk = clk_get(cpu_dev, 0);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
return PTR_ERR(clk);
}
/*
* In case of a failure of dev_pm_opp_add(), we don't
* bother with cleaning up the registered OPP (there's
* no function to do so), and simply cancel the
* registration of the cpufreq device.
*/
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
if (ret) {
clk_put(clk);
return ret;
}
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
clk_put(clk);
return ret;
}
}
platform_device_register_data(NULL, "cpufreq-dt", -1,
&cpufreq_dt_pd, sizeof(cpufreq_dt_pd));
return 0;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
......@@ -277,13 +277,10 @@ static void __init omap4_init_voltages(void)
static inline void omap_init_cpufreq(void)
{
struct platform_device_info devinfo = { };
struct platform_device_info devinfo = { .name = "omap-cpufreq" };
if (!of_have_populated_dt())
devinfo.name = "omap-cpufreq";
else
devinfo.name = "cpufreq-dt";
platform_device_register_full(&devinfo);
platform_device_register_full(&devinfo);
}
static int __init omap2_common_pm_init(void)
......
......@@ -74,7 +74,6 @@ static void __init rockchip_dt_init(void)
{
rockchip_suspend_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
platform_device_register_simple("cpufreq-dt", 0, NULL, 0);
}
static const char * const rockchip_board_dt_compat[] = {
......
......@@ -38,7 +38,6 @@ smp-$(CONFIG_ARCH_EMEV2) += smp-emev2.o headsmp-scu.o platsmp-scu.o
# PM objects
obj-$(CONFIG_SUSPEND) += suspend.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_PM_RCAR) += pm-rcar.o
obj-$(CONFIG_PM_RMOBILE) += pm-rmobile.o
obj-$(CONFIG_ARCH_RCAR_GEN2) += pm-rcar-gen2.o
......
......@@ -25,16 +25,9 @@ static inline int shmobile_suspend_init(void) { return 0; }
static inline void shmobile_smp_apmu_suspend_init(void) { }
#endif
#ifdef CONFIG_CPU_FREQ
int shmobile_cpufreq_init(void);
#else
static inline int shmobile_cpufreq_init(void) { return 0; }
#endif
static inline void __init shmobile_init_late(void)
{
shmobile_suspend_init();
shmobile_cpufreq_init();
}
#endif /* __ARCH_MACH_COMMON_H */
/*
* CPUFreq support code for SH-Mobile ARM
*
* Copyright (C) 2014 Gaku Inami
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/platform_device.h>
#include "common.h"
int __init shmobile_cpufreq_init(void)
{
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
}
......@@ -17,11 +17,6 @@
#include <asm/mach/arch.h>
static void __init sunxi_dt_cpufreq_init(void)
{
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
}
static const char * const sunxi_board_dt_compat[] = {
"allwinner,sun4i-a10",
"allwinner,sun5i-a10s",
......@@ -32,7 +27,6 @@ static const char * const sunxi_board_dt_compat[] = {
DT_MACHINE_START(SUNXI_DT, "Allwinner sun4i/sun5i Families")
.dt_compat = sunxi_board_dt_compat,
.init_late = sunxi_dt_cpufreq_init,
MACHINE_END
static const char * const sun6i_board_dt_compat[] = {
......@@ -53,7 +47,6 @@ static void __init sun6i_timer_init(void)
DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family")
.init_time = sun6i_timer_init,
.dt_compat = sun6i_board_dt_compat,
.init_late = sunxi_dt_cpufreq_init,
MACHINE_END
static const char * const sun7i_board_dt_compat[] = {
......@@ -63,7 +56,6 @@ static const char * const sun7i_board_dt_compat[] = {
DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family")
.dt_compat = sun7i_board_dt_compat,
.init_late = sunxi_dt_cpufreq_init,
MACHINE_END
static const char * const sun8i_board_dt_compat[] = {
......@@ -77,7 +69,6 @@ static const char * const sun8i_board_dt_compat[] = {
DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
.init_time = sun6i_timer_init,
.dt_compat = sun8i_board_dt_compat,
.init_late = sunxi_dt_cpufreq_init,
MACHINE_END
static const char * const sun9i_board_dt_compat[] = {
......
......@@ -110,7 +110,6 @@ static void __init zynq_init_late(void)
*/
static void __init zynq_init_machine(void)
{
struct platform_device_info devinfo = { .name = "cpufreq-dt", };
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
struct device *parent = NULL;
......@@ -145,7 +144,6 @@ static void __init zynq_init_machine(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
platform_device_register(&zynq_cpuidle_device);
platform_device_register_full(&devinfo);
}
static void __init zynq_timer_init(void)
......
......@@ -18,7 +18,11 @@ config CPU_FREQ
if CPU_FREQ
config CPU_FREQ_GOV_ATTR_SET
bool
config CPU_FREQ_GOV_COMMON
select CPU_FREQ_GOV_ATTR_SET
select IRQ_WORK
bool
......@@ -103,6 +107,17 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
Be aware that not all cpufreq drivers support the conservative
governor. If unsure have a look at the help section of the
driver. Fallback governor will be the performance governor.
config CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
bool "schedutil"
depends on SMP
select CPU_FREQ_GOV_SCHEDUTIL
select CPU_FREQ_GOV_PERFORMANCE
help
Use the 'schedutil' CPUFreq governor by default. If unsure,
have a look at the help section of that governor. The fallback
governor will be 'performance'.
endchoice
config CPU_FREQ_GOV_PERFORMANCE
......@@ -184,6 +199,26 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
config CPU_FREQ_GOV_SCHEDUTIL
tristate "'schedutil' cpufreq policy governor"
depends on CPU_FREQ && SMP
select CPU_FREQ_GOV_ATTR_SET
select IRQ_WORK
help
This governor makes decisions based on the utilization data provided
by the scheduler. It sets the CPU frequency to be proportional to
the utilization/capacity ratio coming from the scheduler. If the
utilization is frequency-invariant, the new frequency is also
proportional to the maximum available frequency. If that is not the
case, it is proportional to the current frequency of the CPU. The
frequency tipping point is at utilization/capacity equal to 80% in
both cases.
To compile this driver as a module, choose M here: the module will
be called cpufreq_schedutil.
If in doubt, say N.
comment "CPU frequency scaling drivers"
config CPUFREQ_DT
......@@ -191,6 +226,7 @@ config CPUFREQ_DT
depends on HAVE_CLK && OF
# if CPU_THERMAL is on and THERMAL=m, CPUFREQ_DT cannot be =y:
depends on !CPU_THERMAL || THERMAL
select CPUFREQ_DT_PLATDEV
select PM_OPP
help
This adds a generic DT based cpufreq driver for frequency management.
......@@ -199,6 +235,15 @@ config CPUFREQ_DT
If in doubt, say N.
config CPUFREQ_DT_PLATDEV
bool
help
This adds a generic DT based cpufreq platdev driver for frequency
management. This creates a 'cpufreq-dt' platform device, on the
supported platforms.
If in doubt, say N.
if X86
source "drivers/cpufreq/Kconfig.x86"
endif
......
......@@ -50,15 +50,6 @@ config ARM_HIGHBANK_CPUFREQ
If in doubt, say N.
config ARM_HISI_ACPU_CPUFREQ
tristate "Hisilicon ACPU CPUfreq driver"
depends on ARCH_HISI && CPUFREQ_DT
select PM_OPP
help
This enables the hisilicon ACPU CPUfreq driver.
If in doubt, say N.
config ARM_IMX6Q_CPUFREQ
tristate "Freescale i.MX6 cpufreq support"
depends on ARCH_MXC
......
......@@ -5,6 +5,7 @@
config X86_INTEL_PSTATE
bool "Intel P state control"
depends on X86
select ACPI_PROCESSOR if ACPI
help
This driver provides a P state for Intel core processors.
The driver implements an internal governor and will become
......
......@@ -11,8 +11,10 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET) += cpufreq_governor_attr_set.o
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
##################################################################################
# x86 drivers.
......@@ -53,7 +55,6 @@ obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o
obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o
obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ) += hisi-acpu-cpufreq.o
obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o
obj-$(CONFIG_ARM_INTEGRATOR) += integrator-cpufreq.o
obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o
......@@ -78,6 +79,7 @@ obj-$(CONFIG_ARM_TEGRA20_CPUFREQ) += tegra20-cpufreq.o
obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o
obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o
obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o
obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o
##################################################################################
......
This diff is collapsed.
......@@ -298,7 +298,8 @@ static int merge_cluster_tables(void)
return 0;
}
static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
static void _put_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
......@@ -308,11 +309,12 @@ static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
clk_put(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
if (arm_bL_ops->free_opp_table)
arm_bL_ops->free_opp_table(cpu_dev);
arm_bL_ops->free_opp_table(cpumask);
dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
}
static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
static void put_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i;
......@@ -321,7 +323,7 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
return;
if (cluster < MAX_CLUSTERS)
return _put_cluster_clk_and_freq_table(cpu_dev);
return _put_cluster_clk_and_freq_table(cpu_dev, cpumask);
for_each_present_cpu(i) {
struct device *cdev = get_cpu_device(i);
......@@ -330,14 +332,15 @@ static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
return;
}
_put_cluster_clk_and_freq_table(cdev);
_put_cluster_clk_and_freq_table(cdev, cpumask);
}
/* free virtual table */
kfree(freq_table[cluster]);
}
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
static int _get_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
int ret;
......@@ -345,7 +348,7 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
if (freq_table[cluster])
return 0;
ret = arm_bL_ops->init_opp_table(cpu_dev);
ret = arm_bL_ops->init_opp_table(cpumask);
if (ret) {
dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
__func__, cpu_dev->id, ret);
......@@ -374,14 +377,15 @@ static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
free_opp_table:
if (arm_bL_ops->free_opp_table)
arm_bL_ops->free_opp_table(cpu_dev);
arm_bL_ops->free_opp_table(cpumask);
out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
cluster);
return ret;
}
static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
static int get_cluster_clk_and_freq_table(struct device *cpu_dev,
const struct cpumask *cpumask)
{
u32 cluster = cpu_to_cluster(cpu_dev->id);
int i, ret;
......@@ -390,7 +394,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
return 0;
if (cluster < MAX_CLUSTERS) {
ret = _get_cluster_clk_and_freq_table(cpu_dev);
ret = _get_cluster_clk_and_freq_table(cpu_dev, cpumask);
if (ret)
atomic_dec(&cluster_usage[cluster]);
return ret;
......@@ -407,7 +411,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
return -ENODEV;
}
ret = _get_cluster_clk_and_freq_table(cdev);
ret = _get_cluster_clk_and_freq_table(cdev, cpumask);
if (ret)
goto put_clusters;
}
......@@ -433,7 +437,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
return -ENODEV;
}
_put_cluster_clk_and_freq_table(cdev);
_put_cluster_clk_and_freq_table(cdev, cpumask);
}
atomic_dec(&cluster_usage[cluster]);
......@@ -455,18 +459,6 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
ret = get_cluster_clk_and_freq_table(cpu_dev);
if (ret)
return ret;
ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
if (ret) {
dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
policy->cpu, cur_cluster);
put_cluster_clk_and_freq_table(cpu_dev);
return ret;
}
if (cur_cluster < MAX_CLUSTERS) {
int cpu;
......@@ -479,6 +471,18 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
}
ret = get_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
if (ret)
return ret;
ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
if (ret) {
dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
policy->cpu, cur_cluster);
put_cluster_clk_and_freq_table(cpu_dev, policy->cpus);
return ret;
}
if (arm_bL_ops->get_transition_latency)
policy->cpuinfo.transition_latency =
arm_bL_ops->get_transition_latency(cpu_dev);
......@@ -509,7 +513,7 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy)
return -ENODEV;
}
put_cluster_clk_and_freq_table(cpu_dev);
put_cluster_clk_and_freq_table(cpu_dev, policy->related_cpus);
dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
return 0;
......
......@@ -30,11 +30,11 @@ struct cpufreq_arm_bL_ops {
* This must set opp table for cpu_dev in a similar way as done by
* dev_pm_opp_of_add_table().
*/
int (*init_opp_table)(struct device *cpu_dev);
int (*init_opp_table)(const struct cpumask *cpumask);
/* Optional */
int (*get_transition_latency)(struct device *cpu_dev);
void (*free_opp_table)(struct device *cpu_dev);
void (*free_opp_table)(const struct cpumask *cpumask);
};
int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
......
......@@ -43,23 +43,6 @@ static struct device_node *get_cpu_node_with_valid_op(int cpu)
return np;
}
static int dt_init_opp_table(struct device *cpu_dev)
{
struct device_node *np;
int ret;
np = of_node_get(cpu_dev->of_node);
if (!np) {
pr_err("failed to find cpu%d node\n", cpu_dev->id);
return -ENOENT;
}
ret = dev_pm_opp_of_add_table(cpu_dev);
of_node_put(np);
return ret;
}
static int dt_get_transition_latency(struct device *cpu_dev)
{
struct device_node *np;
......@@ -81,8 +64,8 @@ static int dt_get_transition_latency(struct device *cpu_dev)
static struct cpufreq_arm_bL_ops dt_bL_ops = {
.name = "dt-bl",
.get_transition_latency = dt_get_transition_latency,
.init_opp_table = dt_init_opp_table,
.free_opp_table = dev_pm_opp_of_remove_table,
.init_opp_table = dev_pm_opp_of_cpumask_add_table,
.free_opp_table = dev_pm_opp_of_cpumask_remove_table,
};
static int generic_bL_probe(struct platform_device *pdev)
......
......@@ -173,4 +173,25 @@ static int __init cppc_cpufreq_init(void)
return -ENODEV;
}
static void __exit cppc_cpufreq_exit(void)
{
struct cpudata *cpu;
int i;
cpufreq_unregister_driver(&cppc_cpufreq_driver);
for_each_possible_cpu(i) {
cpu = all_cpu_data[i];
free_cpumask_var(cpu->shared_cpu_map);
kfree(cpu);
}
kfree(all_cpu_data);
}
module_exit(cppc_cpufreq_exit);
MODULE_AUTHOR("Ashwin Chaugule");
MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
MODULE_LICENSE("GPL");
late_initcall(cppc_cpufreq_init);
/*
* Copyright (C) 2016 Linaro.
* Viresh Kumar <viresh.kumar@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/of.h>
#include <linux/platform_device.h>
static const struct of_device_id machines[] __initconst = {
{ .compatible = "allwinner,sun4i-a10", },
{ .compatible = "allwinner,sun5i-a10s", },
{ .compatible = "allwinner,sun5i-a13", },
{ .compatible = "allwinner,sun5i-r8", },
{ .compatible = "allwinner,sun6i-a31", },
{ .compatible = "allwinner,sun6i-a31s", },
{ .compatible = "allwinner,sun7i-a20", },
{ .compatible = "allwinner,sun8i-a23", },
{ .compatible = "allwinner,sun8i-a33", },
{ .compatible = "allwinner,sun8i-a83t", },
{ .compatible = "allwinner,sun8i-h3", },
{ .compatible = "hisilicon,hi6220", },
{ .compatible = "fsl,imx27", },
{ .compatible = "fsl,imx51", },
{ .compatible = "fsl,imx53", },
{ .compatible = "fsl,imx7d", },
{ .compatible = "marvell,berlin", },
{ .compatible = "samsung,exynos3250", },
{ .compatible = "samsung,exynos4210", },
{ .compatible = "samsung,exynos4212", },
{ .compatible = "samsung,exynos4412", },
{ .compatible = "samsung,exynos5250", },
#ifndef CONFIG_BL_SWITCHER
{ .compatible = "samsung,exynos5420", },
{ .compatible = "samsung,exynos5800", },
#endif
{ .compatible = "renesas,emev2", },
{ .compatible = "renesas,r7s72100", },
{ .compatible = "renesas,r8a73a4", },
{ .compatible = "renesas,r8a7740", },
{ .compatible = "renesas,r8a7778", },
{ .compatible = "renesas,r8a7779", },
{ .compatible = "renesas,r8a7790", },
{ .compatible = "renesas,r8a7791", },
{ .compatible = "renesas,r8a7793", },
{ .compatible = "renesas,r8a7794", },
{ .compatible = "renesas,sh73a0", },
{ .compatible = "rockchip,rk2928", },
{ .compatible = "rockchip,rk3036", },
{ .compatible = "rockchip,rk3066a", },
{ .compatible = "rockchip,rk3066b", },
{ .compatible = "rockchip,rk3188", },
{ .compatible = "rockchip,rk3228", },
{ .compatible = "rockchip,rk3288", },
{ .compatible = "rockchip,rk3366", },
{ .compatible = "rockchip,rk3368", },
{ .compatible = "rockchip,rk3399", },
{ .compatible = "sigma,tango4" },
{ .compatible = "ti,omap2", },
{ .compatible = "ti,omap3", },
{ .compatible = "ti,omap4", },
{ .compatible = "ti,omap5", },
{ .compatible = "xlnx,zynq-7000", },
};
static int __init cpufreq_dt_platdev_init(void)
{
struct device_node *np = of_find_node_by_path("/");
if (!np)
return -ENODEV;
if (!of_match_node(machines, np))
return -ENODEV;
of_node_put(of_root);
return PTR_ERR_OR_ZERO(platform_device_register_simple("cpufreq-dt", -1,
NULL, 0));
}
device_initcall(cpufreq_dt_platdev_init);
......@@ -15,7 +15,6 @@
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
#include <linux/cpufreq.h>
#include <linux/cpufreq-dt.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/module.h>
......@@ -147,7 +146,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
unsigned int transition_latency;
bool opp_v1 = false;
bool fallback = false;
const char *name;
int ret;
......@@ -167,14 +166,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* Get OPP-sharing information from "operating-points-v2" bindings */
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
if (ret != -ENOENT)
goto out_put_clk;
/*
* operating-points-v2 not supported, fallback to old method of
* finding shared-OPPs for backward compatibility.
* finding shared-OPPs for backward compatibility if the
* platform hasn't set sharing CPUs.
*/
if (ret == -ENOENT)
opp_v1 = true;
else
goto out_put_clk;
if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
fallback = true;
}
/*
......@@ -214,11 +215,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
if (opp_v1) {
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
if (!pd || !pd->independent_clocks)
cpumask_setall(policy->cpus);
if (fallback) {
cpumask_setall(policy->cpus);
/*
* OPP tables are initialized only for policy->cpu, do it for
......
......@@ -7,6 +7,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
......@@ -56,8 +58,6 @@ MODULE_PARM_DESC(fid, "CPU multiplier to use (11.5 = 115)");
MODULE_PARM_DESC(min_fsb,
"Minimum FSB to use, if not defined: current FSB - 50");
#define PFX "cpufreq-nforce2: "
/**
* nforce2_calc_fsb - calculate FSB
* @pll: PLL value
......@@ -174,13 +174,13 @@ static int nforce2_set_fsb(unsigned int fsb)
int pll = 0;
if ((fsb > max_fsb) || (fsb < NFORCE2_MIN_FSB)) {
printk(KERN_ERR PFX "FSB %d is out of range!\n", fsb);
pr_err("FSB %d is out of range!\n", fsb);
return -EINVAL;
}
tfsb = nforce2_fsb_read(0);
if (!tfsb) {
printk(KERN_ERR PFX "Error while reading the FSB\n");
pr_err("Error while reading the FSB\n");
return -EINVAL;
}
......@@ -276,8 +276,7 @@ static int nforce2_target(struct cpufreq_policy *policy,
/* local_irq_save(flags); */
if (nforce2_set_fsb(target_fsb) < 0)
printk(KERN_ERR PFX "Changing FSB to %d failed\n",
target_fsb);
pr_err("Changing FSB to %d failed\n", target_fsb);
else
pr_debug("Changed FSB successfully to %d\n",
target_fsb);
......@@ -325,8 +324,7 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
/* FIX: Get FID from CPU */
if (!fid) {
if (!cpu_khz) {
printk(KERN_WARNING PFX
"cpu_khz not set, can't calculate multiplier!\n");
pr_warn("cpu_khz not set, can't calculate multiplier!\n");
return -ENODEV;
}
......@@ -341,8 +339,8 @@ static int nforce2_cpu_init(struct cpufreq_policy *policy)
}
}
printk(KERN_INFO PFX "FSB currently at %i MHz, FID %d.%d\n", fsb,
fid / 10, fid % 10);
pr_info("FSB currently at %i MHz, FID %d.%d\n",
fsb, fid / 10, fid % 10);
/* Set maximum FSB to FSB at boot time */
max_fsb = nforce2_fsb_read(1);
......@@ -401,11 +399,9 @@ static int nforce2_detect_chipset(void)
if (nforce2_dev == NULL)
return -ENODEV;
printk(KERN_INFO PFX "Detected nForce2 chipset revision %X\n",
nforce2_dev->revision);
printk(KERN_INFO PFX
"FSB changing is maybe unstable and can lead to "
"crashes and data loss.\n");
pr_info("Detected nForce2 chipset revision %X\n",
nforce2_dev->revision);
pr_info("FSB changing is maybe unstable and can lead to crashes and data loss\n");
return 0;
}
......@@ -423,7 +419,7 @@ static int __init nforce2_init(void)
/* detect chipset */
if (nforce2_detect_chipset()) {
printk(KERN_INFO PFX "No nForce2 chipset.\n");
pr_info("No nForce2 chipset\n");
return -ENODEV;
}
......
......@@ -78,6 +78,11 @@ static int cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
static int cpufreq_start_governor(struct cpufreq_policy *policy);
static inline int cpufreq_exit_governor(struct cpufreq_policy *policy)
{
return cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
}
/**
* Two notifier lists: the "policy" list is involved in the
* validation process for a new CPU frequency policy; the
......@@ -429,6 +434,73 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
/*
* Fast frequency switching status count. Positive means "enabled", negative
* means "disabled" and 0 means "not decided yet".
*/
static int cpufreq_fast_switch_count;
static DEFINE_MUTEX(cpufreq_fast_switch_lock);
static void cpufreq_list_transition_notifiers(void)
{
struct notifier_block *nb;
pr_info("Registered transition notifiers:\n");
mutex_lock(&cpufreq_transition_notifier_list.mutex);
for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
pr_info("%pF\n", nb->notifier_call);
mutex_unlock(&cpufreq_transition_notifier_list.mutex);
}
/**
* cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
* @policy: cpufreq policy to enable fast frequency switching for.
*
* Try to enable fast frequency switching for @policy.
*
* The attempt will fail if there is at least one transition notifier registered
* at this point, as fast frequency switching is quite fundamentally at odds
* with transition notifiers. Thus if successful, it will make registration of
* transition notifiers fail going forward.
*/
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
{
lockdep_assert_held(&policy->rwsem);
if (!policy->fast_switch_possible)
return;
mutex_lock(&cpufreq_fast_switch_lock);
if (cpufreq_fast_switch_count >= 0) {
cpufreq_fast_switch_count++;
policy->fast_switch_enabled = true;
} else {
pr_warn("CPU%u: Fast frequency switching not enabled\n",
policy->cpu);
cpufreq_list_transition_notifiers();
}
mutex_unlock(&cpufreq_fast_switch_lock);
}
EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
/**
* cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
* @policy: cpufreq policy to disable fast frequency switching for.
*/
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
{
mutex_lock(&cpufreq_fast_switch_lock);
if (policy->fast_switch_enabled) {
policy->fast_switch_enabled = false;
if (!WARN_ON(cpufreq_fast_switch_count <= 0))
cpufreq_fast_switch_count--;
}
mutex_unlock(&cpufreq_fast_switch_lock);
}
EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
/*********************************************************************
* SYSFS INTERFACE *
......@@ -1248,26 +1320,24 @@ static int cpufreq_online(unsigned int cpu)
*/
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
struct cpufreq_policy *policy;
unsigned cpu = dev->id;
int ret;
dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
if (cpu_online(cpu)) {
ret = cpufreq_online(cpu);
} else {
/*
* A hotplug notifier will follow and we will handle it as CPU
* online then. For now, just create the sysfs link, unless
* there is no policy or the link is already present.
*/
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
if (cpu_online(cpu))
return cpufreq_online(cpu);
ret = policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
? add_cpu_dev_symlink(policy, cpu) : 0;
}
/*
* A hotplug notifier will follow and we will handle it as CPU online
* then. For now, just create the sysfs link, unless there is no policy
* or the link is already present.
*/
policy = per_cpu(cpufreq_cpu_data, cpu);
if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
return 0;
return ret;
return add_cpu_dev_symlink(policy, cpu);
}
static void cpufreq_offline(unsigned int cpu)
......@@ -1319,7 +1389,7 @@ static void cpufreq_offline(unsigned int cpu)
/* If cpu is last user of policy, free policy */
if (has_target()) {
ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
ret = cpufreq_exit_governor(policy);
if (ret)
pr_err("%s: Failed to exit governor\n", __func__);
}
......@@ -1447,8 +1517,12 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
ret_freq = cpufreq_driver->get(policy->cpu);
/* Updating inactive policies is invalid, so avoid doing that. */
if (unlikely(policy_is_inactive(policy)))
/*
* Updating inactive policies is invalid, so avoid doing that. Also
* if fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case too.
*/
if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
......@@ -1679,8 +1753,18 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
mutex_lock(&cpufreq_fast_switch_lock);
if (cpufreq_fast_switch_count > 0) {
mutex_unlock(&cpufreq_fast_switch_lock);
return -EBUSY;
}
ret = srcu_notifier_chain_register(
&cpufreq_transition_notifier_list, nb);
if (!ret)
cpufreq_fast_switch_count--;
mutex_unlock(&cpufreq_fast_switch_lock);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_register(
......@@ -1713,8 +1797,14 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
mutex_lock(&cpufreq_fast_switch_lock);
ret = srcu_notifier_chain_unregister(
&cpufreq_transition_notifier_list, nb);
if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
cpufreq_fast_switch_count++;
mutex_unlock(&cpufreq_fast_switch_lock);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = blocking_notifier_chain_unregister(
......@@ -1733,6 +1823,37 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
* GOVERNORS *
*********************************************************************/
/**
* cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
* @policy: cpufreq policy to switch the frequency for.
* @target_freq: New frequency to set (may be approximate).
*
* Carry out a fast frequency switch without sleeping.
*
* The driver's ->fast_switch() callback invoked by this function must be
* suitable for being called from within RCU-sched read-side critical sections
* and it is expected to select the minimum available frequency greater than or
* equal to @target_freq (CPUFREQ_RELATION_L).
*
* This function must not be called if policy->fast_switch_enabled is unset.
*
* Governors calling this function must guarantee that it will never be invoked
* twice in parallel for the same policy and that it will never be called in
* parallel with either ->target() or ->target_index() for the same policy.
*
* If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
* callback to indicate an error condition, the hardware configuration must be
* preserved.
*/
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
clamp_val(target_freq, policy->min, policy->max);
return cpufreq_driver->fast_switch(policy, target_freq);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
/* Must set freqs->new to intermediate frequency */
static int __target_intermediate(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int index)
......@@ -2108,7 +2229,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
ret = cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
ret = cpufreq_exit_governor(policy);
if (ret) {
pr_err("%s: Failed to Exit Governor: %s (%d)\n",
__func__, old_gov->name, ret);
......@@ -2125,7 +2246,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
pr_debug("cpufreq: governor change\n");
return 0;
}
cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
cpufreq_exit_governor(policy);
}
/* new governor failed, so re-start old one */
......@@ -2193,16 +2314,13 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpufreq_online(cpu);
break;
case CPU_DOWN_PREPARE:
cpufreq_offline(cpu);
break;
case CPU_DOWN_FAILED:
cpufreq_online(cpu);
break;
}
return NOTIFY_OK;
}
......
......@@ -129,9 +129,10 @@ static struct notifier_block cs_cpufreq_notifier_block = {
/************************** sysfs interface ************************/
static struct dbs_governor cs_dbs_gov;
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
const char *buf, size_t count)
static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
......@@ -143,9 +144,10 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
......@@ -158,9 +160,10 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
......@@ -175,9 +178,10 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
......@@ -199,9 +203,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
return count;
}
static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input;
int ret;
......
This diff is collapsed.
......@@ -24,20 +24,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
/*
* The polling frequency depends on the capability of the processor. Default
* polling frequency is 1000 times the transition latency of the processor. The
* governor will work on any processor with transition latency <= 10ms, using
* appropriate sampling rate.
*
* For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
* this governor will not work. All times here are in us (micro seconds).
*/
#define MIN_SAMPLING_RATE_RATIO (2)
#define LATENCY_MULTIPLIER (1000)
#define MIN_LATENCY_MULTIPLIER (20)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
/* Ondemand Sampling types */
enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
......@@ -52,7 +38,7 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/* Governor demand based switching data (per-policy or global). */
struct dbs_data {
int usage_count;
struct gov_attr_set attr_set;
void *tuners;
unsigned int min_sampling_rate;
unsigned int ignore_nice_load;
......@@ -60,37 +46,27 @@ struct dbs_data {
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int io_is_busy;
struct kobject kobj;
struct list_head policy_dbs_list;
/*
* Protect concurrent updates to governor tunables from sysfs,
* policy_dbs_list and usage_count.
*/
struct mutex mutex;
};
/* Governor's specific attributes */
struct dbs_data;
struct governor_attr {
struct attribute attr;
ssize_t (*show)(struct dbs_data *dbs_data, char *buf);
ssize_t (*store)(struct dbs_data *dbs_data, const char *buf,
size_t count);
};
static inline struct dbs_data *to_dbs_data(struct gov_attr_set *attr_set)
{
return container_of(attr_set, struct dbs_data, attr_set);
}
#define gov_show_one(_gov, file_name) \
static ssize_t show_##file_name \
(struct dbs_data *dbs_data, char *buf) \
(struct gov_attr_set *attr_set, char *buf) \
{ \
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
struct _gov##_dbs_tuners *tuners = dbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
}
#define gov_show_one_common(file_name) \
static ssize_t show_##file_name \
(struct dbs_data *dbs_data, char *buf) \
(struct gov_attr_set *attr_set, char *buf) \
{ \
struct dbs_data *dbs_data = to_dbs_data(attr_set); \
return sprintf(buf, "%u\n", dbs_data->file_name); \
}
......@@ -135,7 +111,7 @@ static inline void gov_update_sample_delay(struct policy_dbs_info *policy_dbs,
/* Per cpu structures */
struct cpu_dbs_info {
u64 prev_cpu_idle;
u64 prev_cpu_wall;
u64 prev_update_time;
u64 prev_cpu_nice;
/*
* Used to keep track of load in the previous interval. However, when
......@@ -184,7 +160,7 @@ void od_register_powersave_bias_handler(unsigned int (*f)
(struct cpufreq_policy *, unsigned int, unsigned int),
unsigned int powersave_bias);
void od_unregister_powersave_bias_handler(void);
ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
size_t count);
void gov_update_cpu_data(struct dbs_data *dbs_data);
#endif /* _CPUFREQ_GOVERNOR_H */
/*
* Abstract code for CPUFreq governor tunable sysfs attributes.
*
* Copyright (C) 2016, Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "cpufreq_governor.h"
static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj)
{
return container_of(kobj, struct gov_attr_set, kobj);
}
static inline struct governor_attr *to_gov_attr(struct attribute *attr)
{
return container_of(attr, struct governor_attr, attr);
}
static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
struct governor_attr *gattr = to_gov_attr(attr);
return gattr->show(to_gov_attr_set(kobj), buf);
}
static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct gov_attr_set *attr_set = to_gov_attr_set(kobj);
struct governor_attr *gattr = to_gov_attr(attr);
int ret;
mutex_lock(&attr_set->update_lock);
ret = attr_set->usage_count ? gattr->store(attr_set, buf, count) : -EBUSY;
mutex_unlock(&attr_set->update_lock);
return ret;
}
const struct sysfs_ops governor_sysfs_ops = {
.show = governor_show,
.store = governor_store,
};
EXPORT_SYMBOL_GPL(governor_sysfs_ops);
void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node)
{
INIT_LIST_HEAD(&attr_set->policy_list);
mutex_init(&attr_set->update_lock);
attr_set->usage_count = 1;
list_add(list_node, &attr_set->policy_list);
}
EXPORT_SYMBOL_GPL(gov_attr_set_init);
void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node)
{
mutex_lock(&attr_set->update_lock);
attr_set->usage_count++;
list_add(list_node, &attr_set->policy_list);
mutex_unlock(&attr_set->update_lock);
}
EXPORT_SYMBOL_GPL(gov_attr_set_get);
unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node)
{
unsigned int count;
mutex_lock(&attr_set->update_lock);
list_del(list_node);
count = --attr_set->usage_count;
mutex_unlock(&attr_set->update_lock);
if (count)
return count;
kobject_put(&attr_set->kobj);
mutex_destroy(&attr_set->update_lock);
return 0;
}
EXPORT_SYMBOL_GPL(gov_attr_set_put);
......@@ -207,9 +207,10 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy)
/************************** sysfs interface ************************/
static struct dbs_governor od_dbs_gov;
static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf,
size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
......@@ -224,9 +225,10 @@ static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
return count;
}
static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_up_threshold(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);
......@@ -240,9 +242,10 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
const char *buf, size_t count)
static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct policy_dbs_info *policy_dbs;
unsigned int input;
int ret;
......@@ -254,7 +257,7 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
dbs_data->sampling_down_factor = input;
/* Reset down sampling multiplier in case it was active */
list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
/*
* Doing this without locking might lead to using different
* rate_mult values in od_update() and od_dbs_timer().
......@@ -267,9 +270,10 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
const char *buf, size_t count)
static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
unsigned int input;
int ret;
......@@ -291,9 +295,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
return count;
}
static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
size_t count)
static ssize_t store_powersave_bias(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct dbs_data *dbs_data = to_dbs_data(attr_set);
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
struct policy_dbs_info *policy_dbs;
unsigned int input;
......@@ -308,7 +313,7 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
od_tuners->powersave_bias = input;
list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list)
list_for_each_entry(policy_dbs, &attr_set->policy_list, list)
ondemand_powersave_bias_init(policy_dbs->policy);
return count;
......
......@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
static DEFINE_MUTEX(userspace_mutex);
......@@ -31,6 +32,7 @@ static DEFINE_MUTEX(userspace_mutex);
static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
{
int ret = -EINVAL;
unsigned int *setspeed = policy->governor_data;
pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
......@@ -38,6 +40,8 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
if (!per_cpu(cpu_is_managed, policy->cpu))
goto err;
*setspeed = freq;
ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
err:
mutex_unlock(&userspace_mutex);
......@@ -49,19 +53,45 @@ static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", policy->cur);
}
static int cpufreq_userspace_policy_init(struct cpufreq_policy *policy)
{
unsigned int *setspeed;
setspeed = kzalloc(sizeof(*setspeed), GFP_KERNEL);
if (!setspeed)
return -ENOMEM;
policy->governor_data = setspeed;
return 0;
}
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
{
unsigned int *setspeed = policy->governor_data;
unsigned int cpu = policy->cpu;
int rc = 0;
if (event == CPUFREQ_GOV_POLICY_INIT)
return cpufreq_userspace_policy_init(policy);
if (!setspeed)
return -EINVAL;
switch (event) {
case CPUFREQ_GOV_POLICY_EXIT:
mutex_lock(&userspace_mutex);
policy->governor_data = NULL;
kfree(setspeed);
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_START:
BUG_ON(!policy->cur);
pr_debug("started managing cpu %u\n", cpu);
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 1;
*setspeed = policy->cur;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_STOP:
......@@ -69,20 +99,23 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
mutex_lock(&userspace_mutex);
per_cpu(cpu_is_managed, cpu) = 0;
*setspeed = 0;
mutex_unlock(&userspace_mutex);
break;
case CPUFREQ_GOV_LIMITS:
mutex_lock(&userspace_mutex);
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
cpu, policy->min, policy->max,
policy->cur);
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n",
cpu, policy->min, policy->max, policy->cur, *setspeed);
if (policy->max < policy->cur)
if (policy->max < *setspeed)
__cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
else if (policy->min > policy->cur)
else if (policy->min > *setspeed)
__cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
else
__cpufreq_driver_target(policy, *setspeed,
CPUFREQ_RELATION_L);
mutex_unlock(&userspace_mutex);
break;
}
......
......@@ -6,6 +6,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -20,7 +22,7 @@
#include <asm/msr.h>
#include <asm/tsc.h>
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
#include <linux/acpi.h>
#include <acpi/processor.h>
#endif
......@@ -33,7 +35,7 @@
struct eps_cpu_data {
u32 fsb;
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
u32 bios_limit;
#endif
struct cpufreq_frequency_table freq_table[];
......@@ -46,7 +48,7 @@ static int freq_failsafe_off;
static int voltage_failsafe_off;
static int set_max_voltage;
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
static int ignore_acpi_limit;
static struct acpi_processor_performance *eps_acpi_cpu_perf;
......@@ -141,11 +143,9 @@ static int eps_set_state(struct eps_cpu_data *centaur,
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
printk(KERN_INFO "eps: Current voltage = %dmV\n",
current_voltage * 16 + 700);
pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
printk(KERN_INFO "eps: Current multiplier = %d\n",
current_multiplier);
pr_info("Current multiplier = %d\n", current_multiplier);
}
#endif
return 0;
......@@ -166,7 +166,7 @@ static int eps_target(struct cpufreq_policy *policy, unsigned int index)
dest_state = centaur->freq_table[index].driver_data & 0xffff;
ret = eps_set_state(centaur, policy, dest_state);
if (ret)
printk(KERN_ERR "eps: Timeout!\n");
pr_err("Timeout!\n");
return ret;
}
......@@ -186,7 +186,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
int k, step, voltage;
int ret;
int states;
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
unsigned int limit;
#endif
......@@ -194,36 +194,36 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
/* Check brand */
printk(KERN_INFO "eps: Detected VIA ");
pr_info("Detected VIA ");
switch (c->x86_model) {
case 10:
rdmsr(0x1153, lo, hi);
brand = (((lo >> 2) ^ lo) >> 18) & 3;
printk(KERN_CONT "Model A ");
pr_cont("Model A ");
break;
case 13:
rdmsr(0x1154, lo, hi);
brand = (((lo >> 4) ^ (lo >> 2))) & 0x000000ff;
printk(KERN_CONT "Model D ");
pr_cont("Model D ");
break;
}
switch (brand) {
case EPS_BRAND_C7M:
printk(KERN_CONT "C7-M\n");
pr_cont("C7-M\n");
break;
case EPS_BRAND_C7:
printk(KERN_CONT "C7\n");
pr_cont("C7\n");
break;
case EPS_BRAND_EDEN:
printk(KERN_CONT "Eden\n");
pr_cont("Eden\n");
break;
case EPS_BRAND_C7D:
printk(KERN_CONT "C7-D\n");
pr_cont("C7-D\n");
break;
case EPS_BRAND_C3:
printk(KERN_CONT "C3\n");
pr_cont("C3\n");
return -ENODEV;
break;
}
......@@ -235,7 +235,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Can be locked at 0 */
rdmsrl(MSR_IA32_MISC_ENABLE, val);
if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
pr_info("Can't enable Enhanced PowerSaver\n");
return -ENODEV;
}
}
......@@ -243,22 +243,19 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Print voltage and multiplier */
rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
current_voltage = lo & 0xff;
printk(KERN_INFO "eps: Current voltage = %dmV\n",
current_voltage * 16 + 700);
pr_info("Current voltage = %dmV\n", current_voltage * 16 + 700);
current_multiplier = (lo >> 8) & 0xff;
printk(KERN_INFO "eps: Current multiplier = %d\n", current_multiplier);
pr_info("Current multiplier = %d\n", current_multiplier);
/* Print limits */
max_voltage = hi & 0xff;
printk(KERN_INFO "eps: Highest voltage = %dmV\n",
max_voltage * 16 + 700);
pr_info("Highest voltage = %dmV\n", max_voltage * 16 + 700);
max_multiplier = (hi >> 8) & 0xff;
printk(KERN_INFO "eps: Highest multiplier = %d\n", max_multiplier);
pr_info("Highest multiplier = %d\n", max_multiplier);
min_voltage = (hi >> 16) & 0xff;
printk(KERN_INFO "eps: Lowest voltage = %dmV\n",
min_voltage * 16 + 700);
pr_info("Lowest voltage = %dmV\n", min_voltage * 16 + 700);
min_multiplier = (hi >> 24) & 0xff;
printk(KERN_INFO "eps: Lowest multiplier = %d\n", min_multiplier);
pr_info("Lowest multiplier = %d\n", min_multiplier);
/* Sanity checks */
if (current_multiplier == 0 || max_multiplier == 0
......@@ -276,34 +273,30 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Check for systems using underclocked CPU */
if (!freq_failsafe_off && max_multiplier != current_multiplier) {
printk(KERN_INFO "eps: Your processor is running at different "
"frequency then its maximum. Aborting.\n");
printk(KERN_INFO "eps: You can use freq_failsafe_off option "
"to disable this check.\n");
pr_info("Your processor is running at different frequency then its maximum. Aborting.\n");
pr_info("You can use freq_failsafe_off option to disable this check.\n");
return -EINVAL;
}
if (!voltage_failsafe_off && max_voltage != current_voltage) {
printk(KERN_INFO "eps: Your processor is running at different "
"voltage then its maximum. Aborting.\n");
printk(KERN_INFO "eps: You can use voltage_failsafe_off "
"option to disable this check.\n");
pr_info("Your processor is running at different voltage then its maximum. Aborting.\n");
pr_info("You can use voltage_failsafe_off option to disable this check.\n");
return -EINVAL;
}
/* Calc FSB speed */
fsb = cpu_khz / current_multiplier;
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
/* Check for ACPI processor speed limit */
if (!ignore_acpi_limit && !eps_acpi_init()) {
if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) {
printk(KERN_INFO "eps: ACPI limit %u.%uGHz\n",
pr_info("ACPI limit %u.%uGHz\n",
limit/1000000,
(limit%1000000)/10000);
eps_acpi_exit(policy);
/* Check if max_multiplier is in BIOS limits */
if (limit && max_multiplier * fsb > limit) {
printk(KERN_INFO "eps: Aborting.\n");
pr_info("Aborting\n");
return -EINVAL;
}
}
......@@ -319,8 +312,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
v = (set_max_voltage - 700) / 16;
/* Check if voltage is within limits */
if (v >= min_voltage && v <= max_voltage) {
printk(KERN_INFO "eps: Setting %dmV as maximum.\n",
v * 16 + 700);
pr_info("Setting %dmV as maximum\n", v * 16 + 700);
max_voltage = v;
}
}
......@@ -341,7 +333,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
/* Copy basic values */
centaur->fsb = fsb;
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
centaur->bios_limit = limit;
#endif
......@@ -426,7 +418,7 @@ module_param(freq_failsafe_off, int, 0644);
MODULE_PARM_DESC(freq_failsafe_off, "Disable current vs max frequency check");
module_param(voltage_failsafe_off, int, 0644);
MODULE_PARM_DESC(voltage_failsafe_off, "Disable current vs max voltage check");
#if defined CONFIG_ACPI_PROCESSOR || defined CONFIG_ACPI_PROCESSOR_MODULE
#if IS_ENABLED(CONFIG_ACPI_PROCESSOR)
module_param(ignore_acpi_limit, int, 0644);
MODULE_PARM_DESC(ignore_acpi_limit, "Don't check ACPI's processor speed limit");
#endif
......
......@@ -16,6 +16,8 @@
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -185,7 +187,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy)
static int __init elanfreq_setup(char *str)
{
max_freq = simple_strtoul(str, &str, 0);
printk(KERN_WARNING "You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
pr_warn("You're using the deprecated elanfreq command line option. Use elanfreq.max_freq instead, please!\n");
return 1;
}
__setup("elanfreq=", elanfreq_setup);
......
/*
* Hisilicon Platforms Using ACPU CPUFreq Support
*
* Copyright (c) 2015 Hisilicon Limited.
* Copyright (c) 2015 Linaro Limited.
*
* Leo Yan <leo.yan@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
static int __init hisi_acpu_cpufreq_driver_init(void)
{
struct platform_device *pdev;
if (!of_machine_is_compatible("hisilicon,hi6220"))
return -ENODEV;
pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return PTR_ERR_OR_ZERO(pdev);
}
module_init(hisi_acpu_cpufreq_driver_init);
MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>");
MODULE_DESCRIPTION("Hisilicon acpu cpufreq driver");
MODULE_LICENSE("GPL v2");
......@@ -8,6 +8,8 @@
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
......@@ -118,8 +120,7 @@ processor_get_freq (
if (ret) {
set_cpus_allowed_ptr(current, &saved_mask);
printk(KERN_WARNING "get performance failed with error %d\n",
ret);
pr_warn("get performance failed with error %d\n", ret);
ret = 0;
goto migrate_end;
}
......@@ -177,7 +178,7 @@ processor_set_freq (
ret = processor_set_pstate(value);
if (ret) {
printk(KERN_WARNING "Transition failed with error %d\n", ret);
pr_warn("Transition failed with error %d\n", ret);
retval = -ENODEV;
goto migrate_end;
}
......@@ -291,8 +292,7 @@ acpi_cpufreq_cpu_init (
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management "
"activated.\n", cpu);
pr_info("CPU%u - ACPI performance management activated\n", cpu);
for (i = 0; i < data->acpi_data.state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n",
......
This diff is collapsed.
......@@ -21,6 +21,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
......@@ -40,8 +42,6 @@
#include "longhaul.h"
#define PFX "longhaul: "
#define TYPE_LONGHAUL_V1 1
#define TYPE_LONGHAUL_V2 2
#define TYPE_POWERSAVER 3
......@@ -347,14 +347,13 @@ static int longhaul_setstate(struct cpufreq_policy *policy,
freqs.new = calc_speed(longhaul_get_cpu_mult());
/* Check if requested frequency is set. */
if (unlikely(freqs.new != speed)) {
printk(KERN_INFO PFX "Failed to set requested frequency!\n");
pr_info("Failed to set requested frequency!\n");
/* Revision ID = 1 but processor is expecting revision key
* equal to 0. Jumpers at the bottom of processor will change
* multiplier and FSB, but will not change bits in Longhaul
* MSR nor enable voltage scaling. */
if (!revid_errata) {
printk(KERN_INFO PFX "Enabling \"Ignore Revision ID\" "
"option.\n");
pr_info("Enabling \"Ignore Revision ID\" option\n");
revid_errata = 1;
msleep(200);
goto retry_loop;
......@@ -364,11 +363,10 @@ static int longhaul_setstate(struct cpufreq_policy *policy,
* but it doesn't change frequency. I tried poking various
* bits in northbridge registers, but without success. */
if (longhaul_flags & USE_ACPI_C3) {
printk(KERN_INFO PFX "Disabling ACPI C3 support.\n");
pr_info("Disabling ACPI C3 support\n");
longhaul_flags &= ~USE_ACPI_C3;
if (revid_errata) {
printk(KERN_INFO PFX "Disabling \"Ignore "
"Revision ID\" option.\n");
pr_info("Disabling \"Ignore Revision ID\" option\n");
revid_errata = 0;
}
msleep(200);
......@@ -379,7 +377,7 @@ static int longhaul_setstate(struct cpufreq_policy *policy,
* RevID = 1. RevID errata will make things right. Just
* to be 100% sure. */
if (longhaul_version == TYPE_LONGHAUL_V2) {
printk(KERN_INFO PFX "Switching to Longhaul ver. 1\n");
pr_info("Switching to Longhaul ver. 1\n");
longhaul_version = TYPE_LONGHAUL_V1;
msleep(200);
goto retry_loop;
......@@ -387,8 +385,7 @@ static int longhaul_setstate(struct cpufreq_policy *policy,
}
if (!bm_timeout) {
printk(KERN_INFO PFX "Warning: Timeout while waiting for "
"idle PCI bus.\n");
pr_info("Warning: Timeout while waiting for idle PCI bus\n");
return -EBUSY;
}
......@@ -433,12 +430,12 @@ static int longhaul_get_ranges(void)
/* Get current frequency */
mult = longhaul_get_cpu_mult();
if (mult == -1) {
printk(KERN_INFO PFX "Invalid (reserved) multiplier!\n");
pr_info("Invalid (reserved) multiplier!\n");
return -EINVAL;
}
fsb = guess_fsb(mult);
if (fsb == 0) {
printk(KERN_INFO PFX "Invalid (reserved) FSB!\n");
pr_info("Invalid (reserved) FSB!\n");
return -EINVAL;
}
/* Get max multiplier - as we always did.
......@@ -468,11 +465,11 @@ static int longhaul_get_ranges(void)
print_speed(highest_speed/1000));
if (lowest_speed == highest_speed) {
printk(KERN_INFO PFX "highestspeed == lowest, aborting.\n");
pr_info("highestspeed == lowest, aborting\n");
return -EINVAL;
}
if (lowest_speed > highest_speed) {
printk(KERN_INFO PFX "nonsense! lowest (%d > %d) !\n",
pr_info("nonsense! lowest (%d > %d) !\n",
lowest_speed, highest_speed);
return -EINVAL;
}
......@@ -538,16 +535,16 @@ static void longhaul_setup_voltagescaling(void)
rdmsrl(MSR_VIA_LONGHAUL, longhaul.val);
if (!(longhaul.bits.RevisionID & 1)) {
printk(KERN_INFO PFX "Voltage scaling not supported by CPU.\n");
pr_info("Voltage scaling not supported by CPU\n");
return;
}
if (!longhaul.bits.VRMRev) {
printk(KERN_INFO PFX "VRM 8.5\n");
pr_info("VRM 8.5\n");
vrm_mV_table = &vrm85_mV[0];
mV_vrm_table = &mV_vrm85[0];
} else {
printk(KERN_INFO PFX "Mobile VRM\n");
pr_info("Mobile VRM\n");
if (cpu_model < CPU_NEHEMIAH)
return;
vrm_mV_table = &mobilevrm_mV[0];
......@@ -558,27 +555,21 @@ static void longhaul_setup_voltagescaling(void)
maxvid = vrm_mV_table[longhaul.bits.MaximumVID];
if (minvid.mV == 0 || maxvid.mV == 0 || minvid.mV > maxvid.mV) {
printk(KERN_INFO PFX "Bogus values Min:%d.%03d Max:%d.%03d. "
"Voltage scaling disabled.\n",
minvid.mV/1000, minvid.mV%1000,
maxvid.mV/1000, maxvid.mV%1000);
pr_info("Bogus values Min:%d.%03d Max:%d.%03d - Voltage scaling disabled\n",
minvid.mV/1000, minvid.mV%1000,
maxvid.mV/1000, maxvid.mV%1000);
return;
}
if (minvid.mV == maxvid.mV) {
printk(KERN_INFO PFX "Claims to support voltage scaling but "
"min & max are both %d.%03d. "
"Voltage scaling disabled\n",
maxvid.mV/1000, maxvid.mV%1000);
pr_info("Claims to support voltage scaling but min & max are both %d.%03d - Voltage scaling disabled\n",
maxvid.mV/1000, maxvid.mV%1000);
return;
}
/* How many voltage steps*/
numvscales = maxvid.pos - minvid.pos + 1;
printk(KERN_INFO PFX
"Max VID=%d.%03d "
"Min VID=%d.%03d, "
"%d possible voltage scales\n",
pr_info("Max VID=%d.%03d Min VID=%d.%03d, %d possible voltage scales\n",
maxvid.mV/1000, maxvid.mV%1000,
minvid.mV/1000, minvid.mV%1000,
numvscales);
......@@ -617,12 +608,12 @@ static void longhaul_setup_voltagescaling(void)
pos = minvid.pos;
freq_pos->driver_data |= mV_vrm_table[pos] << 8;
vid = vrm_mV_table[mV_vrm_table[pos]];
printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n",
pr_info("f: %d kHz, index: %d, vid: %d mV\n",
speed, (int)(freq_pos - longhaul_table), vid.mV);
}
can_scale_voltage = 1;
printk(KERN_INFO PFX "Voltage scaling enabled.\n");
pr_info("Voltage scaling enabled\n");
}
......@@ -720,8 +711,7 @@ static int enable_arbiter_disable(void)
pci_write_config_byte(dev, reg, pci_cmd);
pci_read_config_byte(dev, reg, &pci_cmd);
if (!(pci_cmd & 1<<7)) {
printk(KERN_ERR PFX
"Can't enable access to port 0x22.\n");
pr_err("Can't enable access to port 0x22\n");
status = 0;
}
}
......@@ -758,8 +748,7 @@ static int longhaul_setup_southbridge(void)
if (pci_cmd & 1 << 7) {
pci_read_config_dword(dev, 0x88, &acpi_regs_addr);
acpi_regs_addr &= 0xff00;
printk(KERN_INFO PFX "ACPI I/O at 0x%x\n",
acpi_regs_addr);
pr_info("ACPI I/O at 0x%x\n", acpi_regs_addr);
}
pci_dev_put(dev);
......@@ -853,14 +842,14 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
longhaul_version = TYPE_LONGHAUL_V1;
}
printk(KERN_INFO PFX "VIA %s CPU detected. ", cpuname);
pr_info("VIA %s CPU detected. ", cpuname);
switch (longhaul_version) {
case TYPE_LONGHAUL_V1:
case TYPE_LONGHAUL_V2:
printk(KERN_CONT "Longhaul v%d supported.\n", longhaul_version);
pr_cont("Longhaul v%d supported\n", longhaul_version);
break;
case TYPE_POWERSAVER:
printk(KERN_CONT "Powersaver supported.\n");
pr_cont("Powersaver supported\n");
break;
};
......@@ -889,15 +878,14 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
if (!(longhaul_flags & USE_ACPI_C3
|| longhaul_flags & USE_NORTHBRIDGE)
&& ((pr == NULL) || !(pr->flags.bm_control))) {
printk(KERN_ERR PFX
"No ACPI support. Unsupported northbridge.\n");
pr_err("No ACPI support: Unsupported northbridge\n");
return -ENODEV;
}
if (longhaul_flags & USE_NORTHBRIDGE)
printk(KERN_INFO PFX "Using northbridge support.\n");
pr_info("Using northbridge support\n");
if (longhaul_flags & USE_ACPI_C3)
printk(KERN_INFO PFX "Using ACPI support.\n");
pr_info("Using ACPI support\n");
ret = longhaul_get_ranges();
if (ret != 0)
......@@ -934,20 +922,18 @@ static int __init longhaul_init(void)
return -ENODEV;
if (!enable) {
printk(KERN_ERR PFX "Option \"enable\" not set. Aborting.\n");
pr_err("Option \"enable\" not set - Aborting\n");
return -ENODEV;
}
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
printk(KERN_ERR PFX "More than 1 CPU detected, "
"longhaul disabled.\n");
pr_err("More than 1 CPU detected, longhaul disabled\n");
return -ENODEV;
}
#endif
#ifdef CONFIG_X86_IO_APIC
if (cpu_has_apic) {
printk(KERN_ERR PFX "APIC detected. Longhaul is currently "
"broken in this configuration.\n");
pr_err("APIC detected. Longhaul is currently broken in this configuration.\n");
return -ENODEV;
}
#endif
......@@ -955,7 +941,7 @@ static int __init longhaul_init(void)
case 6 ... 9:
return cpufreq_register_driver(&longhaul_driver);
case 10:
printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
pr_err("Use acpi-cpufreq driver for VIA C7\n");
default:
;
}
......
......@@ -10,6 +10,9 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/err.h>
......@@ -76,7 +79,7 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpuclk = clk_get(NULL, "cpu_clk");
if (IS_ERR(cpuclk)) {
printk(KERN_ERR "cpufreq: couldn't get CPU clk\n");
pr_err("couldn't get CPU clk\n");
return PTR_ERR(cpuclk);
}
......@@ -163,7 +166,7 @@ static int __init cpufreq_init(void)
if (ret)
return ret;
pr_info("cpufreq: Loongson-2F CPU frequency driver.\n");
pr_info("Loongson-2F CPU frequency driver\n");
cpufreq_register_notifier(&loongson2_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
......
......@@ -13,6 +13,8 @@
#undef DEBUG
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
......@@ -174,7 +176,7 @@ static int __init maple_cpufreq_init(void)
/* Get first CPU node */
cpunode = of_cpu_device_node_get(0);
if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
pr_err("Can't find any CPU 0 node\n");
goto bail_noprops;
}
......@@ -182,8 +184,7 @@ static int __init maple_cpufreq_init(void)
/* we actually don't care on which CPU to access PVR */
pvr_hi = PVR_VER(mfspr(SPRN_PVR));
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
printk(KERN_ERR "cpufreq: Unsupported CPU version (%x)\n",
pvr_hi);
pr_err("Unsupported CPU version (%x)\n", pvr_hi);
goto bail_noprops;
}
......@@ -222,8 +223,8 @@ static int __init maple_cpufreq_init(void)
maple_pmode_cur = -1;
maple_scom_switch_freq(maple_scom_query_freq());
printk(KERN_INFO "Registering Maple CPU frequency driver\n");
printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
pr_info("Registering Maple CPU frequency driver\n");
pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
maple_cpu_freqs[1].frequency/1000,
maple_cpu_freqs[0].frequency/1000,
maple_cpu_freqs[maple_pmode_cur].frequency/1000);
......
......@@ -59,11 +59,8 @@ static LIST_HEAD(dvfs_info_list);
static struct mtk_cpu_dvfs_info *mtk_cpu_dvfs_info_lookup(int cpu)
{
struct mtk_cpu_dvfs_info *info;
struct list_head *list;
list_for_each(list, &dvfs_info_list) {
info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
list_for_each_entry(info, &dvfs_info_list, list_head) {
if (cpumask_test_cpu(cpu, &info->cpus))
return info;
}
......@@ -524,8 +521,7 @@ static struct cpufreq_driver mt8173_cpufreq_driver = {
static int mt8173_cpufreq_probe(struct platform_device *pdev)
{
struct mtk_cpu_dvfs_info *info;
struct list_head *list, *tmp;
struct mtk_cpu_dvfs_info *info, *tmp;
int cpu, ret;
for_each_possible_cpu(cpu) {
......@@ -559,11 +555,9 @@ static int mt8173_cpufreq_probe(struct platform_device *pdev)
return 0;
release_dvfs_info_list:
list_for_each_safe(list, tmp, &dvfs_info_list) {
info = list_entry(list, struct mtk_cpu_dvfs_info, list_head);
list_for_each_entry_safe(info, tmp, &dvfs_info_list, list_head) {
mtk_cpu_dvfs_info_release(info);
list_del(list);
list_del(&info->list_head);
}
return ret;
......
/*
* CPUFreq support for Armada 370/XP platforms.
*
* Copyright (C) 2012-2016 Marvell
*
* Yehuda Yitschak <yehuday@marvell.com>
* Gregory Clement <gregory.clement@free-electrons.com>
* Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#define pr_fmt(fmt) "mvebu-pmsu: " fmt
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/resource.h>
static int __init armada_xp_pmsu_cpufreq_init(void)
{
struct device_node *np;
struct resource res;
int ret, cpu;
if (!of_machine_is_compatible("marvell,armadaxp"))
return 0;
/*
* In order to have proper cpufreq handling, we need to ensure
* that the Device Tree description of the CPU clock includes
* the definition of the PMU DFS registers. If not, we do not
* register the clock notifier and the cpufreq driver. This
* piece of code is only for compatibility with old Device
* Trees.
*/
np = of_find_compatible_node(NULL, NULL, "marvell,armada-xp-cpu-clock");
if (!np)
return 0;
ret = of_address_to_resource(np, 1, &res);
if (ret) {
pr_warn(FW_WARN "not enabling cpufreq, deprecated armada-xp-cpu-clock binding\n");
of_node_put(np);
return 0;
}
of_node_put(np);
/*
* For each CPU, this loop registers the operating points
* supported (which are the nominal CPU frequency and half of
* it), and registers the clock notifier that will take care
* of doing the PMSU part of a frequency transition.
*/
for_each_possible_cpu(cpu) {
struct device *cpu_dev;
struct clk *clk;
int ret;
cpu_dev = get_cpu_device(cpu);
if (!cpu_dev) {
pr_err("Cannot get CPU %d\n", cpu);
continue;
}
clk = clk_get(cpu_dev, 0);
if (IS_ERR(clk)) {
pr_err("Cannot get clock for CPU %d\n", cpu);
return PTR_ERR(clk);
}
/*
* In case of a failure of dev_pm_opp_add(), we don't
* bother with cleaning up the registered OPP (there's
* no function to do so), and simply cancel the
* registration of the cpufreq device.
*/
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0);
if (ret) {
clk_put(clk);
return ret;
}
ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0);
if (ret) {
clk_put(clk);
return ret;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev,
cpumask_of(cpu_dev->id));
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
}
platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
return 0;
}
device_initcall(armada_xp_pmsu_cpufreq_init);
......@@ -13,6 +13,9 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
......@@ -163,13 +166,13 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
{
mpu_dev = get_cpu_device(0);
if (!mpu_dev) {
pr_warning("%s: unable to get the mpu device\n", __func__);
pr_warn("%s: unable to get the MPU device\n", __func__);
return -EINVAL;
}
mpu_reg = regulator_get(mpu_dev, "vcc");
if (IS_ERR(mpu_reg)) {
pr_warning("%s: unable to get MPU regulator\n", __func__);
pr_warn("%s: unable to get MPU regulator\n", __func__);
mpu_reg = NULL;
} else {
/*
......
......@@ -20,6 +20,8 @@
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -35,8 +37,6 @@
#include "speedstep-lib.h"
#define PFX "p4-clockmod: "
/*
* Duty Cycle (3bits), note DC_DISABLE is not specified in
* intel docs i just use it to mean disable
......@@ -124,11 +124,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
{
if (c->x86 == 0x06) {
if (cpu_has(c, X86_FEATURE_EST))
printk_once(KERN_WARNING PFX "Warning: EST-capable "
"CPU detected. The acpi-cpufreq module offers "
"voltage scaling in addition to frequency "
"scaling. You should use that instead of "
"p4-clockmod, if possible.\n");
pr_warn_once("Warning: EST-capable CPU detected. The acpi-cpufreq module offers voltage scaling in addition to frequency scaling. You should use that instead of p4-clockmod, if possible.\n");
switch (c->x86_model) {
case 0x0E: /* Core */
case 0x0F: /* Core Duo */
......@@ -152,11 +148,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
"The speedstep-ich or acpi cpufreq modules offer "
"voltage scaling in addition of frequency scaling. "
"You should use either one instead of p4-clockmod, "
"if possible.\n");
pr_warn("Warning: Pentium 4-M detected. The speedstep-ich or acpi cpufreq modules offer voltage scaling in addition of frequency scaling. You should use either one instead of p4-clockmod, if possible.\n");
return speedstep_get_frequency(SPEEDSTEP_CPU_P4M);
}
......@@ -265,8 +257,7 @@ static int __init cpufreq_p4_init(void)
ret = cpufreq_register_driver(&p4clockmod_driver);
if (!ret)
printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock "
"Modulation available\n");
pr_info("P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
return ret;
}
......
......@@ -13,6 +13,8 @@
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
......@@ -481,13 +483,13 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode)
freqs = of_get_property(cpunode, "bus-frequencies", &lenp);
lenp /= sizeof(u32);
if (freqs == NULL || lenp != 2) {
printk(KERN_ERR "cpufreq: bus-frequencies incorrect or missing\n");
pr_err("bus-frequencies incorrect or missing\n");
return 1;
}
ratio = of_get_property(cpunode, "processor-to-bus-ratio*2",
NULL);
if (ratio == NULL) {
printk(KERN_ERR "cpufreq: processor-to-bus-ratio*2 missing\n");
pr_err("processor-to-bus-ratio*2 missing\n");
return 1;
}
......@@ -550,7 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
if (volt_gpio_np)
voltage_gpio = read_gpio(volt_gpio_np);
if (!voltage_gpio){
printk(KERN_ERR "cpufreq: missing cpu-vcore-select gpio\n");
pr_err("missing cpu-vcore-select gpio\n");
return 1;
}
......@@ -675,9 +677,9 @@ static int __init pmac_cpufreq_setup(void)
pmac_cpu_freqs[CPUFREQ_HIGH].frequency = hi_freq;
ppc_proc_freq = cur_freq * 1000ul;
printk(KERN_INFO "Registering PowerMac CPU frequency driver\n");
printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
low_freq/1000, hi_freq/1000, cur_freq/1000);
pr_info("Registering PowerMac CPU frequency driver\n");
pr_info("Low: %d Mhz, High: %d Mhz, Boot: %d Mhz\n",
low_freq/1000, hi_freq/1000, cur_freq/1000);
return cpufreq_register_driver(&pmac_cpufreq_driver);
}
......
......@@ -12,6 +12,8 @@
#undef DEBUG
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
......@@ -138,7 +140,7 @@ static void g5_vdnap_switch_volt(int speed_mode)
usleep_range(1000, 1000);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
pr_warn("Timeout in clock slewing !\n");
}
......@@ -266,7 +268,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
rc = pmf_call_one(pfunc_cpu_setfreq_low, NULL);
if (rc)
printk(KERN_WARNING "cpufreq: pfunc switch error %d\n", rc);
pr_warn("pfunc switch error %d\n", rc);
/* It's an irq GPIO so we should be able to just block here,
* I'll do that later after I've properly tested the IRQ code for
......@@ -282,7 +284,7 @@ static int g5_pfunc_switch_freq(int speed_mode)
usleep_range(500, 500);
}
if (done == 0)
printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n");
pr_warn("Timeout in clock slewing !\n");
/* If frequency is going down, last ramp the voltage */
if (speed_mode > g5_pmode_cur)
......@@ -368,7 +370,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
}
pvr_hi = (*valp) >> 16;
if (pvr_hi != 0x3c && pvr_hi != 0x44) {
printk(KERN_ERR "cpufreq: Unsupported CPU version\n");
pr_err("Unsupported CPU version\n");
goto bail_noprops;
}
......@@ -403,8 +405,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
root = of_find_node_by_path("/");
if (root == NULL) {
printk(KERN_ERR "cpufreq: Can't find root of "
"device tree\n");
pr_err("Can't find root of device tree\n");
goto bail_noprops;
}
pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0");
......@@ -412,8 +413,7 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
pmf_find_function(root, "slewing-done");
if (pfunc_set_vdnap0 == NULL ||
pfunc_vdnap0_complete == NULL) {
printk(KERN_ERR "cpufreq: Can't find required "
"platform function\n");
pr_err("Can't find required platform function\n");
goto bail_noprops;
}
......@@ -453,10 +453,10 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
g5_pmode_cur = -1;
g5_switch_freq(g5_query_freq());
printk(KERN_INFO "Registering G5 CPU frequency driver\n");
printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n",
freq_method, volt_method);
printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
pr_info("Registering G5 CPU frequency driver\n");
pr_info("Frequency method: %s, Voltage method: %s\n",
freq_method, volt_method);
pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
g5_cpu_freqs[g5_pmode_cur].frequency/1000);
......@@ -493,7 +493,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
if (cpuid != NULL)
eeprom = of_get_property(cpuid, "cpuid", NULL);
if (eeprom == NULL) {
printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n");
pr_err("Can't find cpuid EEPROM !\n");
rc = -ENODEV;
goto bail;
}
......@@ -511,7 +511,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
break;
}
if (hwclock == NULL) {
printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n");
pr_err("Can't find i2c clock chip !\n");
rc = -ENODEV;
goto bail;
}
......@@ -539,7 +539,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Check we have minimum requirements */
if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL ||
pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) {
printk(KERN_ERR "cpufreq: Can't find platform functions !\n");
pr_err("Can't find platform functions !\n");
rc = -ENODEV;
goto bail;
}
......@@ -567,7 +567,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Get max frequency from device-tree */
valp = of_get_property(cpunode, "clock-frequency", NULL);
if (!valp) {
printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n");
pr_err("Can't find CPU frequency !\n");
rc = -ENODEV;
goto bail;
}
......@@ -583,8 +583,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Check for machines with no useful settings */
if (il == ih) {
printk(KERN_WARNING "cpufreq: No low frequency mode available"
" on this model !\n");
pr_warn("No low frequency mode available on this model !\n");
rc = -ENODEV;
goto bail;
}
......@@ -595,7 +594,7 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
/* Sanity check */
if (min_freq >= max_freq || min_freq < 1000) {
printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n");
pr_err("Can't calculate low frequency !\n");
rc = -ENXIO;
goto bail;
}
......@@ -619,10 +618,10 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
g5_pmode_cur = -1;
g5_switch_freq(g5_query_freq());
printk(KERN_INFO "Registering G5 CPU frequency driver\n");
printk(KERN_INFO "Frequency method: i2c/pfunc, "
"Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none");
printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
pr_info("Registering G5 CPU frequency driver\n");
pr_info("Frequency method: i2c/pfunc, Voltage method: %s\n",
has_volt ? "i2c/pfunc" : "none");
pr_info("Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n",
g5_cpu_freqs[1].frequency/1000,
g5_cpu_freqs[0].frequency/1000,
g5_cpu_freqs[g5_pmode_cur].frequency/1000);
......@@ -654,7 +653,7 @@ static int __init g5_cpufreq_init(void)
/* Get first CPU node */
cpunode = of_cpu_device_node_get(0);
if (cpunode == NULL) {
pr_err("cpufreq: Can't find any CPU node\n");
pr_err("Can't find any CPU node\n");
return -ENODEV;
}
......
......@@ -8,6 +8,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -22,7 +24,6 @@
#define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long
as it is unused */
#define PFX "powernow-k6: "
static unsigned int busfreq; /* FSB, in 10 kHz */
static unsigned int max_multiplier;
......@@ -141,7 +142,7 @@ static int powernow_k6_target(struct cpufreq_policy *policy,
{
if (clock_ratio[best_i].driver_data > max_multiplier) {
printk(KERN_ERR PFX "invalid target frequency\n");
pr_err("invalid target frequency\n");
return -EINVAL;
}
......@@ -175,13 +176,14 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
max_multiplier = param_max_multiplier;
goto have_max_multiplier;
}
printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
pr_err("invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n");
return -EINVAL;
}
if (!max_multiplier) {
printk(KERN_WARNING "powernow-k6: unknown frequency %u, cannot determine current multiplier\n", khz);
printk(KERN_WARNING "powernow-k6: use module parameters max_multiplier and bus_frequency\n");
pr_warn("unknown frequency %u, cannot determine current multiplier\n",
khz);
pr_warn("use module parameters max_multiplier and bus_frequency\n");
return -EOPNOTSUPP;
}
......@@ -193,7 +195,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy)
busfreq = param_busfreq / 10;
goto have_busfreq;
}
printk(KERN_ERR "powernow-k6: invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
pr_err("invalid bus_frequency parameter, allowed range 50000 - 150000 kHz\n");
return -EINVAL;
}
......@@ -275,7 +277,7 @@ static int __init powernow_k6_init(void)
return -ENODEV;
if (!request_region(POWERNOW_IOPORT, 16, "PowerNow!")) {
printk(KERN_INFO PFX "PowerNow IOPORT region already used.\n");
pr_info("PowerNow IOPORT region already used\n");
return -EIO;
}
......
......@@ -13,6 +13,8 @@
* - We disable half multipliers if ACPI is used on A0 stepping CPUs.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
......@@ -35,9 +37,6 @@
#include "powernow-k7.h"
#define PFX "powernow: "
struct psb_s {
u8 signature[10];
u8 tableversion;
......@@ -127,14 +126,13 @@ static int check_powernow(void)
maxei = cpuid_eax(0x80000000);
if (maxei < 0x80000007) { /* Any powernow info ? */
#ifdef MODULE
printk(KERN_INFO PFX "No powernow capabilities detected\n");
pr_info("No powernow capabilities detected\n");
#endif
return 0;
}
if ((c->x86_model == 6) && (c->x86_mask == 0)) {
printk(KERN_INFO PFX "K7 660[A0] core detected, "
"enabling errata workarounds\n");
pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
have_a0 = 1;
}
......@@ -144,22 +142,22 @@ static int check_powernow(void)
if (!(edx & (1 << 1 | 1 << 2)))
return 0;
printk(KERN_INFO PFX "PowerNOW! Technology present. Can scale: ");
pr_info("PowerNOW! Technology present. Can scale: ");
if (edx & 1 << 1) {
printk("frequency");
pr_cont("frequency");
can_scale_bus = 1;
}
if ((edx & (1 << 1 | 1 << 2)) == 0x6)
printk(" and ");
pr_cont(" and ");
if (edx & 1 << 2) {
printk("voltage");
pr_cont("voltage");
can_scale_vid = 1;
}
printk(".\n");
pr_cont("\n");
return 1;
}
......@@ -427,16 +425,14 @@ static int powernow_acpi_init(void)
err05:
kfree(acpi_processor_perf);
err0:
printk(KERN_WARNING PFX "ACPI perflib can not be used on "
"this platform\n");
pr_warn("ACPI perflib can not be used on this platform\n");
acpi_processor_perf = NULL;
return retval;
}
#else
static int powernow_acpi_init(void)
{
printk(KERN_INFO PFX "no support for ACPI processor found."
" Please recompile your kernel with ACPI processor\n");
pr_info("no support for ACPI processor found - please recompile your kernel with ACPI processor\n");
return -EINVAL;
}
#endif
......@@ -468,8 +464,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
psb = (struct psb_s *) p;
pr_debug("Table version: 0x%x\n", psb->tableversion);
if (psb->tableversion != 0x12) {
printk(KERN_INFO PFX "Sorry, only v1.2 tables"
" supported right now\n");
pr_info("Sorry, only v1.2 tables supported right now\n");
return -ENODEV;
}
......@@ -481,10 +476,8 @@ static int powernow_decode_bios(int maxfid, int startvid)
latency = psb->settlingtime;
if (latency < 100) {
printk(KERN_INFO PFX "BIOS set settling time "
"to %d microseconds. "
"Should be at least 100. "
"Correcting.\n", latency);
pr_info("BIOS set settling time to %d microseconds. Should be at least 100. Correcting.\n",
latency);
latency = 100;
}
pr_debug("Settling Time: %d microseconds.\n",
......@@ -516,10 +509,9 @@ static int powernow_decode_bios(int maxfid, int startvid)
p += 2;
}
}
printk(KERN_INFO PFX "No PST tables match this cpuid "
"(0x%x)\n", etuple);
printk(KERN_INFO PFX "This is indicative of a broken "
"BIOS.\n");
pr_info("No PST tables match this cpuid (0x%x)\n",
etuple);
pr_info("This is indicative of a broken BIOS\n");
return -EINVAL;
}
......@@ -552,7 +544,7 @@ static int fixup_sgtc(void)
sgtc = 100 * m * latency;
sgtc = sgtc / 3;
if (sgtc > 0xfffff) {
printk(KERN_WARNING PFX "SGTC too large %d\n", sgtc);
pr_warn("SGTC too large %d\n", sgtc);
sgtc = 0xfffff;
}
return sgtc;
......@@ -574,14 +566,10 @@ static unsigned int powernow_get(unsigned int cpu)
static int acer_cpufreq_pst(const struct dmi_system_id *d)
{
printk(KERN_WARNING PFX
"%s laptop with broken PST tables in BIOS detected.\n",
pr_warn("%s laptop with broken PST tables in BIOS detected\n",
d->ident);
printk(KERN_WARNING PFX
"You need to downgrade to 3A21 (09/09/2002), or try a newer "
"BIOS than 3A71 (01/20/2003)\n");
printk(KERN_WARNING PFX
"cpufreq scaling has been disabled as a result of this.\n");
pr_warn("You need to downgrade to 3A21 (09/09/2002), or try a newer BIOS than 3A71 (01/20/2003)\n");
pr_warn("cpufreq scaling has been disabled as a result of this\n");
return 0;
}
......@@ -616,40 +604,38 @@ static int powernow_cpu_init(struct cpufreq_policy *policy)
fsb = (10 * cpu_khz) / fid_codes[fidvidstatus.bits.CFID];
if (!fsb) {
printk(KERN_WARNING PFX "can not determine bus frequency\n");
pr_warn("can not determine bus frequency\n");
return -EINVAL;
}
pr_debug("FSB: %3dMHz\n", fsb/1000);
if (dmi_check_system(powernow_dmi_table) || acpi_force) {
printk(KERN_INFO PFX "PSB/PST known to be broken. "
"Trying ACPI instead\n");
pr_info("PSB/PST known to be broken - trying ACPI instead\n");
result = powernow_acpi_init();
} else {
result = powernow_decode_bios(fidvidstatus.bits.MFID,
fidvidstatus.bits.SVID);
if (result) {
printk(KERN_INFO PFX "Trying ACPI perflib\n");
pr_info("Trying ACPI perflib\n");
maximum_speed = 0;
minimum_speed = -1;
latency = 0;
result = powernow_acpi_init();
if (result) {
printk(KERN_INFO PFX
"ACPI and legacy methods failed\n");
pr_info("ACPI and legacy methods failed\n");
}
} else {
/* SGTC use the bus clock as timer */
latency = fixup_sgtc();
printk(KERN_INFO PFX "SGTC: %d\n", latency);
pr_info("SGTC: %d\n", latency);
}
}
if (result)
return result;
printk(KERN_INFO PFX "Minimum speed %d MHz. Maximum speed %d MHz.\n",
minimum_speed/1000, maximum_speed/1000);
pr_info("Minimum speed %d MHz - Maximum speed %d MHz\n",
minimum_speed/1000, maximum_speed/1000);
policy->cpuinfo.transition_latency =
cpufreq_scale(2000000UL, fsb, latency);
......
This diff is collapsed.
......@@ -17,7 +17,7 @@ int cbe_cpufreq_get_pmode(int cpu);
int cbe_cpufreq_set_pmode_pmi(int cpu, unsigned int pmode);
#if defined(CONFIG_CPU_FREQ_CBE_PMI) || defined(CONFIG_CPU_FREQ_CBE_PMI_MODULE)
#if IS_ENABLED(CONFIG_CPU_FREQ_CBE_PMI)
extern bool cbe_cpufreq_has_pmi;
#else
#define cbe_cpufreq_has_pmi (0)
......
......@@ -23,7 +23,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/of_platform.h>
#include <asm/processor.h>
......@@ -142,15 +142,4 @@ static int __init cbe_cpufreq_pmi_init(void)
return 0;
}
static void __exit cbe_cpufreq_pmi_exit(void)
{
cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER);
pmi_unregister_handler(&cbe_pmi_handler);
}
module_init(cbe_cpufreq_pmi_init);
module_exit(cbe_cpufreq_pmi_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");
device_initcall(cbe_cpufreq_pmi_init);
......@@ -29,6 +29,8 @@
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
......@@ -186,8 +188,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
ret = regulator_set_voltage(vcc_core, vmin, vmax);
if (ret)
pr_err("cpufreq: Failed to set vcc_core in [%dmV..%dmV]\n",
vmin, vmax);
pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax);
return ret;
}
......@@ -195,10 +196,10 @@ static void __init pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
pr_info("cpufreq: Didn't find vcc_core regulator\n");
pr_info("Didn't find vcc_core regulator\n");
vcc_core = NULL;
} else {
pr_info("cpufreq: Found vcc_core regulator\n");
pr_info("Found vcc_core regulator\n");
}
}
#else
......@@ -233,9 +234,8 @@ static void pxa27x_guess_max_freq(void)
{
if (!pxa27x_maxfreq) {
pxa27x_maxfreq = 416000;
printk(KERN_INFO "PXA CPU 27x max frequency not defined "
"(pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
pxa27x_maxfreq);
pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n",
pxa27x_maxfreq);
} else {
pxa27x_maxfreq *= 1000;
}
......@@ -408,7 +408,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
*/
if (cpu_is_pxa25x()) {
find_freq_tables(&pxa255_freq_table, &pxa255_freqs);
pr_info("PXA255 cpufreq using %s frequency table\n",
pr_info("using %s frequency table\n",
pxa255_turbo_table ? "turbo" : "run");
cpufreq_table_validate_and_show(policy, pxa255_freq_table);
......@@ -417,7 +417,7 @@ static int pxa_cpufreq_init(struct cpufreq_policy *policy)
cpufreq_table_validate_and_show(policy, pxa27x_freq_table);
}
printk(KERN_INFO "PXA CPU frequency change support initialized\n");
pr_info("frequency change support initialized\n");
return 0;
}
......
......@@ -301,10 +301,11 @@ static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy)
return -ENODEV;
}
static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
static int qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
struct cpu_data *data = policy->driver_data;
cpufreq_cooling_unregister(data->cdev);
kfree(data->pclk);
kfree(data->table);
kfree(data);
......@@ -333,8 +334,8 @@ static void qoriq_cpufreq_ready(struct cpufreq_policy *policy)
cpud->cdev = of_cpufreq_cooling_register(np,
policy->related_cpus);
if (IS_ERR(cpud->cdev)) {
pr_err("Failed to register cooling device cpu%d: %ld\n",
if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) {
pr_err("cpu%d is not running as cooling device: %ld\n",
policy->cpu, PTR_ERR(cpud->cdev));
cpud->cdev = NULL;
......@@ -348,7 +349,7 @@ static struct cpufreq_driver qoriq_cpufreq_driver = {
.name = "qoriq_cpufreq",
.flags = CPUFREQ_CONST_LOOPS,
.init = qoriq_cpufreq_cpu_init,
.exit = __exit_p(qoriq_cpufreq_cpu_exit),
.exit = qoriq_cpufreq_cpu_exit,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = qoriq_cpufreq_target,
.get = cpufreq_generic_get,
......
......@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
......@@ -197,21 +199,20 @@ static int s3c2412_cpufreq_add(struct device *dev,
hclk = clk_get(NULL, "hclk");
if (IS_ERR(hclk)) {
printk(KERN_ERR "%s: cannot find hclk clock\n", __func__);
pr_err("cannot find hclk clock\n");
return -ENOENT;
}
fclk = clk_get(NULL, "fclk");
if (IS_ERR(fclk)) {
printk(KERN_ERR "%s: cannot find fclk clock\n", __func__);
pr_err("cannot find fclk clock\n");
goto err_fclk;
}
fclk_rate = clk_get_rate(fclk);
if (fclk_rate > 200000000) {
printk(KERN_INFO
"%s: fclk %ld MHz, assuming 266MHz capable part\n",
__func__, fclk_rate / 1000000);
pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
fclk_rate / 1000000);
s3c2412_cpufreq_info.max.fclk = 266000000;
s3c2412_cpufreq_info.max.hclk = 133000000;
s3c2412_cpufreq_info.max.pclk = 66000000;
......@@ -219,13 +220,13 @@ static int s3c2412_cpufreq_add(struct device *dev,
armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) {
printk(KERN_ERR "%s: cannot find arm clock\n", __func__);
pr_err("cannot find arm clock\n");
goto err_armclk;
}
xtal = clk_get(NULL, "xtal");
if (IS_ERR(xtal)) {
printk(KERN_ERR "%s: cannot find xtal clock\n", __func__);
pr_err("cannot find xtal clock\n");
goto err_xtal;
}
......
......@@ -11,6 +11,8 @@
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
......@@ -66,7 +68,7 @@ static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
__func__, fclk, armclk, hclk_max);
if (armclk > fclk) {
printk(KERN_WARNING "%s: armclk > fclk\n", __func__);
pr_warn("%s: armclk > fclk\n", __func__);
armclk = fclk;
}
......@@ -273,7 +275,7 @@ static int s3c2440_cpufreq_add(struct device *dev,
armclk = s3c_cpufreq_clk_get(NULL, "armclk");
if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
printk(KERN_ERR "%s: failed to get clocks\n", __func__);
pr_err("%s: failed to get clocks\n", __func__);
return -ENOENT;
}
......
......@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/export.h>
#include <linux/interrupt.h>
......@@ -178,7 +180,7 @@ static int __init s3c_freq_debugfs_init(void)
{
dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
if (IS_ERR(dbgfs_root)) {
printk(KERN_ERR "%s: error creating debugfs root\n", __func__);
pr_err("%s: error creating debugfs root\n", __func__);
return PTR_ERR(dbgfs_root);
}
......
......@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
......@@ -175,7 +177,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
cpu_new.freq.fclk = cpu_new.pll.frequency;
if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
printk(KERN_ERR "no divisors for %d\n", target_freq);
pr_err("no divisors for %d\n", target_freq);
goto err_notpossible;
}
......@@ -187,7 +189,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
if (s3c_cpufreq_calcio(&cpu_new) < 0) {
printk(KERN_ERR "%s: no IO timings\n", __func__);
pr_err("%s: no IO timings\n", __func__);
goto err_notpossible;
}
}
......@@ -262,7 +264,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
return 0;
err_notpossible:
printk(KERN_ERR "no compatible settings for %d\n", target_freq);
pr_err("no compatible settings for %d\n", target_freq);
return -EINVAL;
}
......@@ -331,7 +333,7 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
&index);
if (ret < 0) {
printk(KERN_ERR "%s: no PLL available\n", __func__);
pr_err("%s: no PLL available\n", __func__);
goto err_notpossible;
}
......@@ -346,7 +348,7 @@ static int s3c_cpufreq_target(struct cpufreq_policy *policy,
return s3c_cpufreq_settarget(policy, target_freq, pll);
err_notpossible:
printk(KERN_ERR "no compatible settings for %d\n", target_freq);
pr_err("no compatible settings for %d\n", target_freq);
return -EINVAL;
}
......@@ -356,7 +358,7 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
clk = clk_get(dev, name);
if (IS_ERR(clk))
printk(KERN_ERR "cpufreq: failed to get clock '%s'\n", name);
pr_err("failed to get clock '%s'\n", name);
return clk;
}
......@@ -378,15 +380,16 @@ static int __init s3c_cpufreq_initclks(void)
if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
printk(KERN_ERR "%s: could not get clock(s)\n", __func__);
pr_err("%s: could not get clock(s)\n", __func__);
return -ENOENT;
}
printk(KERN_INFO "%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n", __func__,
clk_get_rate(clk_fclk) / 1000,
clk_get_rate(clk_hclk) / 1000,
clk_get_rate(clk_pclk) / 1000,
clk_get_rate(clk_arm) / 1000);
pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
__func__,
clk_get_rate(clk_fclk) / 1000,
clk_get_rate(clk_hclk) / 1000,
clk_get_rate(clk_pclk) / 1000,
clk_get_rate(clk_arm) / 1000);
return 0;
}
......@@ -424,7 +427,7 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
if (ret) {
printk(KERN_ERR "%s: failed to reset pll/freq\n", __func__);
pr_err("%s: failed to reset pll/freq\n", __func__);
return ret;
}
......@@ -449,13 +452,12 @@ static struct cpufreq_driver s3c24xx_driver = {
int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
{
if (!info || !info->name) {
printk(KERN_ERR "%s: failed to pass valid information\n",
__func__);
pr_err("%s: failed to pass valid information\n", __func__);
return -EINVAL;
}
printk(KERN_INFO "S3C24XX CPU Frequency driver, %s cpu support\n",
info->name);
pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
info->name);
/* check our driver info has valid data */
......@@ -478,7 +480,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
struct s3c_cpufreq_board *ours;
if (!board) {
printk(KERN_INFO "%s: no board data\n", __func__);
pr_info("%s: no board data\n", __func__);
return -EINVAL;
}
......@@ -487,7 +489,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
ours = kzalloc(sizeof(*ours), GFP_KERNEL);
if (ours == NULL) {
printk(KERN_ERR "%s: no memory\n", __func__);
pr_err("%s: no memory\n", __func__);
return -ENOMEM;
}
......@@ -502,15 +504,15 @@ static int __init s3c_cpufreq_auto_io(void)
int ret;
if (!cpu_cur.info->get_iotiming) {
printk(KERN_ERR "%s: get_iotiming undefined\n", __func__);
pr_err("%s: get_iotiming undefined\n", __func__);
return -ENOENT;
}
printk(KERN_INFO "%s: working out IO settings\n", __func__);
pr_info("%s: working out IO settings\n", __func__);
ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
if (ret)
printk(KERN_ERR "%s: failed to get timings\n", __func__);
pr_err("%s: failed to get timings\n", __func__);
return ret;
}
......@@ -561,7 +563,7 @@ static void s3c_cpufreq_update_loctkime(void)
val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
val |= calc_locktime(rate, cpu_cur.info->locktime_m);
printk(KERN_INFO "%s: new locktime is 0x%08x\n", __func__, val);
pr_info("%s: new locktime is 0x%08x\n", __func__, val);
__raw_writel(val, S3C2410_LOCKTIME);
}
......@@ -580,7 +582,7 @@ static int s3c_cpufreq_build_freq(void)
ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
if (!ftab) {
printk(KERN_ERR "%s: no memory for tables\n", __func__);
pr_err("%s: no memory for tables\n", __func__);
return -ENOMEM;
}
......@@ -608,15 +610,14 @@ static int __init s3c_cpufreq_initcall(void)
if (cpu_cur.board->auto_io) {
ret = s3c_cpufreq_auto_io();
if (ret) {
printk(KERN_ERR "%s: failed to get io timing\n",
pr_err("%s: failed to get io timing\n",
__func__);
goto out;
}
}
if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
printk(KERN_ERR "%s: no IO support registered\n",
__func__);
pr_err("%s: no IO support registered\n", __func__);
ret = -EINVAL;
goto out;
}
......@@ -666,9 +667,9 @@ int s3c_plltab_register(struct cpufreq_frequency_table *plls,
vals += plls_no;
vals->frequency = CPUFREQ_TABLE_END;
printk(KERN_INFO "cpufreq: %d PLL entries\n", plls_no);
pr_info("%d PLL entries\n", plls_no);
} else
printk(KERN_ERR "cpufreq: no memory for PLL tables\n");
pr_err("no memory for PLL tables\n");
return vals ? 0 : -ENOMEM;
}
......@@ -9,6 +9,8 @@
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
......@@ -205,7 +207,7 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
} else if (ch == DMC1) {
reg = (dmc_base[1] + 0x30);
} else {
printk(KERN_ERR "Cannot find DMC port\n");
pr_err("Cannot find DMC port\n");
return;
}
......@@ -534,7 +536,7 @@ static int s5pv210_cpu_init(struct cpufreq_policy *policy)
mem_type = check_mem_type(dmc_base[0]);
if ((mem_type != LPDDR) && (mem_type != LPDDR2)) {
printk(KERN_ERR "CPUFreq doesn't support this memory type\n");
pr_err("CPUFreq doesn't support this memory type\n");
ret = -EINVAL;
goto out_dmc1;
}
......@@ -635,13 +637,13 @@ static int s5pv210_cpufreq_probe(struct platform_device *pdev)
arm_regulator = regulator_get(NULL, "vddarm");
if (IS_ERR(arm_regulator)) {
pr_err("failed to get regulator vddarm");
pr_err("failed to get regulator vddarm\n");
return PTR_ERR(arm_regulator);
}
int_regulator = regulator_get(NULL, "vddint");
if (IS_ERR(int_regulator)) {
pr_err("failed to get regulator vddint");
pr_err("failed to get regulator vddint\n");
regulator_put(arm_regulator);
return PTR_ERR(int_regulator);
}
......
......@@ -13,6 +13,8 @@
* 2005-03-30: - initial revision
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -30,8 +32,6 @@
static __u8 __iomem *cpuctl;
#define PFX "sc520_freq: "
static struct cpufreq_frequency_table sc520_freq_table[] = {
{0, 0x01, 100000},
{0, 0x02, 133000},
......@@ -44,8 +44,8 @@ static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
switch (clockspeed_reg & 0x03) {
default:
printk(KERN_ERR PFX "error: cpuctl register has unexpected "
"value %02x\n", clockspeed_reg);
pr_err("error: cpuctl register has unexpected value %02x\n",
clockspeed_reg);
case 0x01:
return 100000;
case 0x02:
......@@ -112,7 +112,7 @@ static int __init sc520_freq_init(void)
cpuctl = ioremap((unsigned long)(MMCR_BASE + OFFS_CPUCTL), 1);
if (!cpuctl) {
printk(KERN_ERR "sc520_freq: error: failed to remap memory\n");
pr_err("sc520_freq: error: failed to remap memory\n");
return -ENOMEM;
}
......
......@@ -18,6 +18,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/platform_device.h>
......@@ -38,10 +39,20 @@ static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
return scpi_ops->dvfs_get_info(domain);
}
static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
static int scpi_get_transition_latency(struct device *cpu_dev)
{
int idx, ret = 0;
struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
if (IS_ERR(info))
return PTR_ERR(info);
return info->latency;
}
static int scpi_init_opp_table(const struct cpumask *cpumask)
{
int idx, ret;
struct scpi_opp *opp;
struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask));
struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
if (IS_ERR(info))
......@@ -51,11 +62,7 @@ static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
return -EIO;
for (opp = info->opps, idx = 0; idx < info->count; idx++, opp++) {
if (remove)
dev_pm_opp_remove(cpu_dev, opp->freq);
else
ret = dev_pm_opp_add(cpu_dev, opp->freq,
opp->m_volt * 1000);
ret = dev_pm_opp_add(cpu_dev, opp->freq, opp->m_volt * 1000);
if (ret) {
dev_warn(cpu_dev, "failed to add opp %uHz %umV\n",
opp->freq, opp->m_volt);
......@@ -64,33 +71,19 @@ static int scpi_opp_table_ops(struct device *cpu_dev, bool remove)
return ret;
}
}
return ret;
}
static int scpi_get_transition_latency(struct device *cpu_dev)
{
struct scpi_dvfs_info *info = scpi_get_dvfs_info(cpu_dev);
if (IS_ERR(info))
return PTR_ERR(info);
return info->latency;
}
static int scpi_init_opp_table(struct device *cpu_dev)
{
return scpi_opp_table_ops(cpu_dev, false);
}
static void scpi_free_opp_table(struct device *cpu_dev)
{
scpi_opp_table_ops(cpu_dev, true);
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask);
if (ret)
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
return ret;
}
static struct cpufreq_arm_bL_ops scpi_cpufreq_ops = {
.name = "scpi",
.get_transition_latency = scpi_get_transition_latency,
.init_opp_table = scpi_init_opp_table,
.free_opp_table = scpi_free_opp_table,
.free_opp_table = dev_pm_opp_cpumask_remove_table,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)
......
......@@ -13,6 +13,8 @@
* Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
......@@ -27,7 +29,6 @@
#include <asm/cpufeature.h>
#include <asm/cpu_device_id.h>
#define PFX "speedstep-centrino: "
#define MAINTAINER "linux-pm@vger.kernel.org"
#define INTEL_MSR_RANGE (0xffff)
......@@ -386,8 +387,7 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
/* check to see if it stuck */
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
printk(KERN_INFO PFX
"couldn't enable Enhanced SpeedStep\n");
pr_info("couldn't enable Enhanced SpeedStep\n");
return -ENODEV;
}
}
......
This diff is collapsed.
......@@ -8,6 +8,8 @@
* BIG FAT DISCLAIMER: Work in progress code. Possibly *dangerous*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
......@@ -153,7 +155,7 @@ static unsigned int pentium_core_get_frequency(void)
fsb = 333333;
break;
default:
printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
pr_err("PCORE - MSR_FSB_FREQ undefined value\n");
}
rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
......@@ -453,11 +455,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,
*/
if (*transition_latency > 10000000 ||
*transition_latency < 50000) {
printk(KERN_WARNING PFX "frequency transition "
"measured seems out of range (%u "
"nSec), falling back to a safe one of"
"%u nSec.\n",
*transition_latency, 500000);
pr_warn("frequency transition measured seems out of range (%u nSec), falling back to a safe one of %u nSec\n",
*transition_latency, 500000);
*transition_latency = 500000;
}
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment