Commit 02d92950 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: (23 commits)
  [CPUFREQ] EXYNOS: Removed useless headers and codes
  [CPUFREQ] EXYNOS: Make EXYNOS common cpufreq driver
  [CPUFREQ] powernow-k8: Update copyright, maintainer and documentation information
  [CPUFREQ] powernow-k8: Fix indexing issue
  [CPUFREQ] powernow-k8: Avoid Pstate MSR accesses on systems supporting CPB
  [CPUFREQ] update lpj only if frequency has changed
  [CPUFREQ] cpufreq:userspace: fix cpu_cur_freq updation
  [CPUFREQ] Remove wall variable from cpufreq_gov_dbs_init()
  [CPUFREQ] EXYNOS4210: cpufreq code is changed for stable working
  [CPUFREQ] EXYNOS4210: Update frequency table for cpu divider
  [CPUFREQ] EXYNOS4210: Remove code about bus on cpufreq
  [CPUFREQ] s3c64xx: Use pr_fmt() for consistent log messages
  cpufreq: OMAP: fixup for omap_device changes, include <linux/module.h>
  cpufreq: OMAP: fix freq_table leak
  cpufreq: OMAP: put clk if cpu_init failed
  cpufreq: OMAP: only supports OPP library
  cpufreq: OMAP: dont support !freq_table
  cpufreq: OMAP: deny initialization if no mpudev
  cpufreq: OMAP: move clk name decision to init
  cpufreq: OMAP: notify even with bad boot frequency
  ...
parents b24ca57e 6c523c61
/* linux/arch/arm/mach-exynos/include/mach/cpufreq.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - CPUFreq support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
enum cpufreq_level_index {
L0, L1, L2, L3, L4,
L5, L6, L7, L8, L9,
L10, L11, L12, L13, L14,
L15, L16, L17, L18, L19,
L20,
};
struct exynos_dvfs_info {
unsigned long mpll_freq_khz;
unsigned int pll_safe_idx;
unsigned int pm_lock_idx;
unsigned int max_support_idx;
unsigned int min_support_idx;
struct clk *cpu_clk;
unsigned int *volt_table;
struct cpufreq_frequency_table *freq_table;
void (*set_freq)(unsigned int, unsigned int);
bool (*need_apll_change)(unsigned int, unsigned int);
};
extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
...@@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ ...@@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ
If in doubt, say N. If in doubt, say N.
config ARM_EXYNOS_CPUFREQ
bool "SAMSUNG EXYNOS SoCs"
depends on ARCH_EXYNOS
select ARM_EXYNOS4210_CPUFREQ if CPU_EXYNOS4210
default y
help
This adds the CPUFreq driver common part for Samsung
EXYNOS SoCs.
If in doubt, say N.
config ARM_EXYNOS4210_CPUFREQ config ARM_EXYNOS4210_CPUFREQ
bool "Samsung EXYNOS4210" bool "Samsung EXYNOS4210"
depends on CPU_EXYNOS4210
default y
help help
This adds the CPUFreq driver for Samsung EXYNOS4210 This adds the CPUFreq driver for Samsung EXYNOS4210
SoC (S5PV310 or S5PC210). SoC (S5PV310 or S5PC210).
If in doubt, say N.
...@@ -42,7 +42,9 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o ...@@ -42,7 +42,9 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o
################################################################################## ##################################################################################
# PowerPC platform drivers # PowerPC platform drivers
......
...@@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) ...@@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
pr_debug("saving %lu as reference value for loops_per_jiffy; " pr_debug("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
} }
if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
(val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new); ci->new);
......
...@@ -713,11 +713,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -713,11 +713,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
static int __init cpufreq_gov_dbs_init(void) static int __init cpufreq_gov_dbs_init(void)
{ {
cputime64_t wall;
u64 idle_time; u64 idle_time;
int cpu = get_cpu(); int cpu = get_cpu();
idle_time = get_cpu_idle_time_us(cpu, &wall); idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu(); put_cpu();
if (idle_time != -1ULL) { if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */ /* Idle micro accounting is supported. Use finer thresholds */
......
...@@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!per_cpu(cpu_is_managed, freq->cpu)) if (!per_cpu(cpu_is_managed, freq->cpu))
return 0; return 0;
pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", if (val == CPUFREQ_POSTCHANGE) {
freq->cpu, freq->new); pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
per_cpu(cpu_cur_freq, freq->cpu) = freq->new; freq->cpu, freq->new);
per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
}
return 0; return 0;
} }
......
/*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - CPU frequency scaling support for EXYNOS series
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
#include <linux/suspend.h>
#include <mach/cpufreq.h>
#include <plat/cpu.h>
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
static struct cpufreq_freqs freqs;
static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
int exynos_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
exynos_info->freq_table);
}
unsigned int exynos_getspeed(unsigned int cpu)
{
return clk_get_rate(exynos_info->cpu_clk) / 1000;
}
static int exynos_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int index, old_index;
unsigned int arm_volt, safe_arm_volt = 0;
int ret = 0;
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
unsigned int *volt_table = exynos_info->volt_table;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
mutex_lock(&cpufreq_lock);
freqs.old = policy->cur;
if (frequency_locked && target_freq != locking_frequency) {
ret = -EAGAIN;
goto out;
}
if (cpufreq_frequency_table_target(policy, freq_table,
freqs.old, relation, &old_index)) {
ret = -EINVAL;
goto out;
}
if (cpufreq_frequency_table_target(policy, freq_table,
target_freq, relation, &index)) {
ret = -EINVAL;
goto out;
}
freqs.new = freq_table[index].frequency;
freqs.cpu = policy->cpu;
/*
* ARM clock source will be changed APLL to MPLL temporary
* To support this level, need to control regulator for
* required voltage level
*/
if (exynos_info->need_apll_change != NULL) {
if (exynos_info->need_apll_change(old_index, index) &&
(freq_table[index].frequency < mpll_freq_khz) &&
(freq_table[old_index].frequency < mpll_freq_khz))
safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
}
arm_volt = volt_table[index];
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
}
if (safe_arm_volt)
regulator_set_voltage(arm_regulator, safe_arm_volt,
safe_arm_volt);
if (freqs.new != freqs.old)
exynos_info->set_freq(old_index, index);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
/* When the new frequency is lower than current frequency */
if ((freqs.new < freqs.old) ||
((freqs.new > freqs.old) && safe_arm_volt)) {
/* down the voltage after frequency change */
regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
}
out:
mutex_unlock(&cpufreq_lock);
return ret;
}
#ifdef CONFIG_PM
static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
}
static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
{
return 0;
}
#endif
/**
* exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
* context
* @notifier
* @pm_event
* @v
*
* While frequency_locked == true, target() ignores every frequency but
* locking_frequency. The locking_frequency value is the initial frequency,
* which is set by the bootloader. In order to eliminate possible
* inconsistency in clock values, we save and restore frequencies during
* suspend and resume and block CPUFREQ activities. Note that the standard
* suspend/resume cannot be used as they are too deep (syscore_ops) for
* regulator actions.
*/
static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *v)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
static unsigned int saved_frequency;
unsigned int temp;
mutex_lock(&cpufreq_lock);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
if (frequency_locked)
goto out;
frequency_locked = true;
if (locking_frequency) {
saved_frequency = exynos_getspeed(0);
mutex_unlock(&cpufreq_lock);
exynos_target(policy, locking_frequency,
CPUFREQ_RELATION_H);
mutex_lock(&cpufreq_lock);
}
break;
case PM_POST_SUSPEND:
if (saved_frequency) {
/*
* While frequency_locked, only locking_frequency
* is valid for target(). In order to use
* saved_frequency while keeping frequency_locked,
* we temporarly overwrite locking_frequency.
*/
temp = locking_frequency;
locking_frequency = saved_frequency;
mutex_unlock(&cpufreq_lock);
exynos_target(policy, locking_frequency,
CPUFREQ_RELATION_H);
mutex_lock(&cpufreq_lock);
locking_frequency = temp;
}
frequency_locked = false;
break;
}
out:
mutex_unlock(&cpufreq_lock);
return NOTIFY_OK;
}
static struct notifier_block exynos_cpufreq_nb = {
.notifier_call = exynos_cpufreq_pm_notifier,
};
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
/*
* EXYNOS4 multi-core processors has 2 cores
* that the frequency cannot be set independently.
* Each cpu is bound to the same speed.
* So the affected cpu is all of the cpus.
*/
if (num_online_cpus() == 1) {
cpumask_copy(policy->related_cpus, cpu_possible_mask);
cpumask_copy(policy->cpus, cpu_online_mask);
} else {
cpumask_setall(policy->cpus);
}
return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
}
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos_verify_speed,
.target = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
.name = "exynos_cpufreq",
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
#endif
};
static int __init exynos_cpufreq_init(void)
{
int ret = -EINVAL;
exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
if (!exynos_info)
return -ENOMEM;
if (soc_is_exynos4210())
ret = exynos4210_cpufreq_init(exynos_info);
else
pr_err("%s: CPU type not found\n", __func__);
if (ret)
goto err_vdd_arm;
if (exynos_info->set_freq == NULL) {
pr_err("%s: No set_freq function (ERR)\n", __func__);
goto err_vdd_arm;
}
arm_regulator = regulator_get(NULL, "vdd_arm");
if (IS_ERR(arm_regulator)) {
pr_err("%s: failed to get resource vdd_arm\n", __func__);
goto err_vdd_arm;
}
register_pm_notifier(&exynos_cpufreq_nb);
if (cpufreq_register_driver(&exynos_driver)) {
pr_err("%s: failed to register cpufreq driver\n", __func__);
goto err_cpufreq;
}
return 0;
err_cpufreq:
unregister_pm_notifier(&exynos_cpufreq_nb);
if (!IS_ERR(arm_regulator))
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
}
late_initcall(exynos_cpufreq_init);
...@@ -2,61 +2,52 @@ ...@@ -2,61 +2,52 @@
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com * http://www.samsung.com
* *
* EXYNOS4 - CPU frequency scaling support * EXYNOS4210 - CPU frequency scaling support
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/types.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
#include <mach/map.h>
#include <mach/regs-clock.h> #include <mach/regs-clock.h>
#include <mach/regs-mem.h> #include <mach/cpufreq.h>
#include <plat/clock.h> #define CPUFREQ_LEVEL_END L5
#include <plat/pm.h>
static int max_support_idx = L0;
static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
static struct clk *cpu_clk; static struct clk *cpu_clk;
static struct clk *moutcore; static struct clk *moutcore;
static struct clk *mout_mpll; static struct clk *mout_mpll;
static struct clk *mout_apll; static struct clk *mout_apll;
static struct regulator *arm_regulator; struct cpufreq_clkdiv {
static struct regulator *int_regulator; unsigned int index;
unsigned int clkdiv;
static struct cpufreq_freqs freqs;
static unsigned int memtype;
static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
enum exynos4_memory_type {
DDR2 = 4,
LPDDR2,
DDR3,
}; };
enum cpufreq_level_index { static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
L0, L1, L2, L3, CPUFREQ_LEVEL_END, 1250000, 1150000, 1050000, 975000, 950000,
}; };
static struct cpufreq_frequency_table exynos4_freq_table[] = {
{L0, 1000*1000}, static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
{L1, 800*1000},
{L2, 400*1000}, static struct cpufreq_frequency_table exynos4210_freq_table[] = {
{L3, 100*1000}, {L0, 1200*1000},
{L1, 1000*1000},
{L2, 800*1000},
{L3, 500*1000},
{L4, 200*1000},
{0, CPUFREQ_TABLE_END}, {0, CPUFREQ_TABLE_END},
}; };
...@@ -67,17 +58,20 @@ static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = { ...@@ -67,17 +58,20 @@ static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
* DIVATB, DIVPCLK_DBG, DIVAPLL } * DIVATB, DIVPCLK_DBG, DIVAPLL }
*/ */
/* ARM L0: 1000MHz */ /* ARM L0: 1200MHz */
{ 0, 3, 7, 3, 3, 0, 1 }, { 0, 3, 7, 3, 4, 1, 7 },
/* ARM L1: 800MHz */ /* ARM L1: 1000MHz */
{ 0, 3, 7, 3, 3, 0, 1 }, { 0, 3, 7, 3, 4, 1, 7 },
/* ARM L2: 400MHz */ /* ARM L2: 800MHz */
{ 0, 1, 3, 1, 3, 0, 1 }, { 0, 3, 7, 3, 3, 1, 7 },
/* ARM L3: 100MHz */ /* ARM L3: 500MHz */
{ 0, 0, 1, 0, 3, 1, 1 }, { 0, 3, 7, 3, 3, 1, 7 },
/* ARM L4: 200MHz */
{ 0, 1, 3, 1, 3, 1, 0 },
}; };
static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
...@@ -86,147 +80,46 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = { ...@@ -86,147 +80,46 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
* { DIVCOPY, DIVHPM } * { DIVCOPY, DIVHPM }
*/ */
/* ARM L0: 1000MHz */ /* ARM L0: 1200MHz */
{ 3, 0 }, { 5, 0 },
/* ARM L1: 800MHz */ /* ARM L1: 1000MHz */
{ 3, 0 }, { 4, 0 },
/* ARM L2: 400MHz */ /* ARM L2: 800MHz */
{ 3, 0 }, { 3, 0 },
/* ARM L3: 100MHz */ /* ARM L3: 500MHz */
{ 3, 0 }, { 3, 0 },
};
static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
/*
* Clock divider value for following
* { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
* DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
*/
/* DMC L0: 400MHz */
{ 3, 1, 1, 1, 1, 1, 3, 1 },
/* DMC L1: 400MHz */
{ 3, 1, 1, 1, 1, 1, 3, 1 },
/* DMC L2: 266.7MHz */
{ 7, 1, 1, 2, 1, 1, 3, 1 },
/* DMC L3: 200MHz */
{ 7, 1, 1, 3, 1, 1, 3, 1 },
};
static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
/*
* Clock divider value for following
* { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
*/
/* ACLK200 L0: 200MHz */ /* ARM L4: 200MHz */
{ 3, 7, 4, 5, 1 }, { 3, 0 },
/* ACLK200 L1: 200MHz */
{ 3, 7, 4, 5, 1 },
/* ACLK200 L2: 160MHz */
{ 4, 7, 5, 7, 1 },
/* ACLK200 L3: 133.3MHz */
{ 5, 7, 7, 7, 1 },
};
static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
/*
* Clock divider value for following
* { DIVGDL/R, DIVGPL/R }
*/
/* ACLK_GDL/R L0: 200MHz */
{ 3, 1 },
/* ACLK_GDL/R L1: 200MHz */
{ 3, 1 },
/* ACLK_GDL/R L2: 160MHz */
{ 4, 1 },
/* ACLK_GDL/R L3: 133.3MHz */
{ 5, 1 },
};
struct cpufreq_voltage_table {
unsigned int index; /* any */
unsigned int arm_volt; /* uV */
unsigned int int_volt;
}; };
static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = { static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
{ /* APLL FOUT L0: 1200MHz */
.index = L0, ((150 << 16) | (3 << 8) | 1),
.arm_volt = 1200000,
.int_volt = 1100000,
}, {
.index = L1,
.arm_volt = 1100000,
.int_volt = 1100000,
}, {
.index = L2,
.arm_volt = 1000000,
.int_volt = 1000000,
}, {
.index = L3,
.arm_volt = 900000,
.int_volt = 1000000,
},
};
static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = { /* APLL FOUT L1: 1000MHz */
/* APLL FOUT L0: 1000MHz */
((250 << 16) | (6 << 8) | 1), ((250 << 16) | (6 << 8) | 1),
/* APLL FOUT L1: 800MHz */ /* APLL FOUT L2: 800MHz */
((200 << 16) | (6 << 8) | 1), ((200 << 16) | (6 << 8) | 1),
/* APLL FOUT L2 : 400MHz */ /* APLL FOUT L3: 500MHz */
((200 << 16) | (6 << 8) | 2), ((250 << 16) | (6 << 8) | 2),
/* APLL FOUT L3: 100MHz */ /* APLL FOUT L4: 200MHz */
((200 << 16) | (6 << 8) | 4), ((200 << 16) | (6 << 8) | 3),
}; };
static int exynos4_verify_speed(struct cpufreq_policy *policy) static void exynos4210_set_clkdiv(unsigned int div_index)
{
return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
}
static unsigned int exynos4_getspeed(unsigned int cpu)
{
return clk_get_rate(cpu_clk) / 1000;
}
static void exynos4_set_clkdiv(unsigned int div_index)
{ {
unsigned int tmp; unsigned int tmp;
/* Change Divider - CPU0 */ /* Change Divider - CPU0 */
tmp = __raw_readl(S5P_CLKDIV_CPU); tmp = exynos4210_clkdiv_table[div_index].clkdiv;
tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
S5P_CLKDIV_CPU0_APLL_MASK);
tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
(clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
(clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
(clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
(clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
(clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
(clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
__raw_writel(tmp, S5P_CLKDIV_CPU); __raw_writel(tmp, S5P_CLKDIV_CPU);
...@@ -248,83 +141,9 @@ static void exynos4_set_clkdiv(unsigned int div_index) ...@@ -248,83 +141,9 @@ static void exynos4_set_clkdiv(unsigned int div_index)
do { do {
tmp = __raw_readl(S5P_CLKDIV_STATCPU1); tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
} while (tmp & 0x11); } while (tmp & 0x11);
/* Change Divider - DMC0 */
tmp = __raw_readl(S5P_CLKDIV_DMC0);
tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
(clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
(clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
(clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
(clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
(clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
(clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
(clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
__raw_writel(tmp, S5P_CLKDIV_DMC0);
do {
tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
} while (tmp & 0x11111111);
/* Change Divider - TOP */
tmp = __raw_readl(S5P_CLKDIV_TOP);
tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
S5P_CLKDIV_TOP_ONENAND_MASK);
tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
(clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
(clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
(clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
(clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
__raw_writel(tmp, S5P_CLKDIV_TOP);
do {
tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
} while (tmp & 0x11111);
/* Change Divider - LEFTBUS */
tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
(clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
__raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
do {
tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
} while (tmp & 0x11);
/* Change Divider - RIGHTBUS */
tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
(clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
__raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
do {
tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
} while (tmp & 0x11);
} }
static void exynos4_set_apll(unsigned int index) static void exynos4210_set_apll(unsigned int index)
{ {
unsigned int tmp; unsigned int tmp;
...@@ -343,7 +162,7 @@ static void exynos4_set_apll(unsigned int index) ...@@ -343,7 +162,7 @@ static void exynos4_set_apll(unsigned int index)
/* 3. Change PLL PMS values */ /* 3. Change PLL PMS values */
tmp = __raw_readl(S5P_APLL_CON0); tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0)); tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
tmp |= exynos4_apll_pms_table[index]; tmp |= exynos4210_apll_pms_table[index];
__raw_writel(tmp, S5P_APLL_CON0); __raw_writel(tmp, S5P_APLL_CON0);
/* 4. wait_lock_time */ /* 4. wait_lock_time */
...@@ -360,328 +179,126 @@ static void exynos4_set_apll(unsigned int index) ...@@ -360,328 +179,126 @@ static void exynos4_set_apll(unsigned int index)
} while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT)); } while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
} }
static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index) bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
{
unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8);
unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8);
return (old_pm == new_pm) ? 0 : 1;
}
static void exynos4210_set_frequency(unsigned int old_index,
unsigned int new_index)
{ {
unsigned int tmp; unsigned int tmp;
if (old_index > new_index) { if (old_index > new_index) {
/* The frequency changing to L0 needs to change apll */ if (!exynos4210_pms_change(old_index, new_index)) {
if (freqs.new == exynos4_freq_table[L0].frequency) {
/* 1. Change the system clock divider values */
exynos4_set_clkdiv(new_index);
/* 2. Change the apll m,p,s value */
exynos4_set_apll(new_index);
} else {
/* 1. Change the system clock divider values */ /* 1. Change the system clock divider values */
exynos4_set_clkdiv(new_index); exynos4210_set_clkdiv(new_index);
/* 2. Change just s value in apll m,p,s value */ /* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(S5P_APLL_CON0); tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~(0x7 << 0); tmp &= ~(0x7 << 0);
tmp |= (exynos4_apll_pms_table[new_index] & 0x7); tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, S5P_APLL_CON0); __raw_writel(tmp, S5P_APLL_CON0);
}
}
else if (old_index < new_index) {
/* The frequency changing from L0 needs to change apll */
if (freqs.old == exynos4_freq_table[L0].frequency) {
/* 1. Change the apll m,p,s value */
exynos4_set_apll(new_index);
/* 2. Change the system clock divider values */
exynos4_set_clkdiv(new_index);
} else { } else {
/* Clock Configuration Procedure */
/* 1. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
/* 2. Change the apll m,p,s value */
exynos4210_set_apll(new_index);
}
} else if (old_index < new_index) {
if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change just s value in apll m,p,s value */ /* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(S5P_APLL_CON0); tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~(0x7 << 0); tmp &= ~(0x7 << 0);
tmp |= (exynos4_apll_pms_table[new_index] & 0x7); tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, S5P_APLL_CON0); __raw_writel(tmp, S5P_APLL_CON0);
/* 2. Change the system clock divider values */ /* 2. Change the system clock divider values */
exynos4_set_clkdiv(new_index); exynos4210_set_clkdiv(new_index);
} else {
/* Clock Configuration Procedure */
/* 1. Change the apll m,p,s value */
exynos4210_set_apll(new_index);
/* 2. Change the system clock divider values */
exynos4210_set_clkdiv(new_index);
} }
} }
} }
static int exynos4_target(struct cpufreq_policy *policy, int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
unsigned int target_freq,
unsigned int relation)
{
unsigned int index, old_index;
unsigned int arm_volt, int_volt;
int err = -EINVAL;
freqs.old = exynos4_getspeed(policy->cpu);
mutex_lock(&cpufreq_lock);
if (frequency_locked && target_freq != locking_frequency) {
err = -EAGAIN;
goto out;
}
if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
freqs.old, relation, &old_index))
goto out;
if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
target_freq, relation, &index))
goto out;
err = 0;
freqs.new = exynos4_freq_table[index].frequency;
freqs.cpu = policy->cpu;
if (freqs.new == freqs.old)
goto out;
/* get the voltage value */
arm_volt = exynos4_volt_table[index].arm_volt;
int_volt = exynos4_volt_table[index].int_volt;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* control regulator */
if (freqs.new > freqs.old) {
/* Voltage up */
regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
regulator_set_voltage(int_regulator, int_volt, int_volt);
}
/* Clock Configuration Procedure */
exynos4_set_frequency(old_index, index);
/* control regulator */
if (freqs.new < freqs.old) {
/* Voltage down */
regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
regulator_set_voltage(int_regulator, int_volt, int_volt);
}
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
out:
mutex_unlock(&cpufreq_lock);
return err;
}
#ifdef CONFIG_PM
/*
* These suspend/resume are used as syscore_ops, it is already too
* late to set regulator voltages at this stage.
*/
static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
}
static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
{ {
return 0; int i;
} unsigned int tmp;
#endif unsigned long rate;
/**
* exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
* context
* @notifier
* @pm_event
* @v
*
* While frequency_locked == true, target() ignores every frequency but
* locking_frequency. The locking_frequency value is the initial frequency,
* which is set by the bootloader. In order to eliminate possible
* inconsistency in clock values, we save and restore frequencies during
* suspend and resume and block CPUFREQ activities. Note that the standard
* suspend/resume cannot be used as they are too deep (syscore_ops) for
* regulator actions.
*/
static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *v)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
static unsigned int saved_frequency;
unsigned int temp;
mutex_lock(&cpufreq_lock);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
if (frequency_locked)
goto out;
frequency_locked = true;
if (locking_frequency) {
saved_frequency = exynos4_getspeed(0);
mutex_unlock(&cpufreq_lock);
exynos4_target(policy, locking_frequency,
CPUFREQ_RELATION_H);
mutex_lock(&cpufreq_lock);
}
break;
case PM_POST_SUSPEND:
if (saved_frequency) {
/*
* While frequency_locked, only locking_frequency
* is valid for target(). In order to use
* saved_frequency while keeping frequency_locked,
* we temporarly overwrite locking_frequency.
*/
temp = locking_frequency;
locking_frequency = saved_frequency;
mutex_unlock(&cpufreq_lock);
exynos4_target(policy, locking_frequency,
CPUFREQ_RELATION_H);
mutex_lock(&cpufreq_lock);
locking_frequency = temp;
}
frequency_locked = false;
break;
}
out:
mutex_unlock(&cpufreq_lock);
return NOTIFY_OK;
}
static struct notifier_block exynos4_cpufreq_nb = {
.notifier_call = exynos4_cpufreq_pm_notifier,
};
static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int ret;
policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
/*
* EXYNOS4 multi-core processors has 2 cores
* that the frequency cannot be set independently.
* Each cpu is bound to the same speed.
* So the affected cpu is all of the cpus.
*/
cpumask_setall(policy->cpus);
ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
if (ret)
return ret;
cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
return 0;
}
static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_put_attr(policy->cpu);
return 0;
}
static struct freq_attr *exynos4_cpufreq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver exynos4_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos4_verify_speed,
.target = exynos4_target,
.get = exynos4_getspeed,
.init = exynos4_cpufreq_cpu_init,
.exit = exynos4_cpufreq_cpu_exit,
.name = "exynos4_cpufreq",
.attr = exynos4_cpufreq_attr,
#ifdef CONFIG_PM
.suspend = exynos4_cpufreq_suspend,
.resume = exynos4_cpufreq_resume,
#endif
};
static int __init exynos4_cpufreq_init(void)
{
cpu_clk = clk_get(NULL, "armclk"); cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk)) if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk); return PTR_ERR(cpu_clk);
locking_frequency = exynos4_getspeed(0);
moutcore = clk_get(NULL, "moutcore"); moutcore = clk_get(NULL, "moutcore");
if (IS_ERR(moutcore)) if (IS_ERR(moutcore))
goto out; goto err_moutcore;
mout_mpll = clk_get(NULL, "mout_mpll"); mout_mpll = clk_get(NULL, "mout_mpll");
if (IS_ERR(mout_mpll)) if (IS_ERR(mout_mpll))
goto out; goto err_mout_mpll;
rate = clk_get_rate(mout_mpll) / 1000;
mout_apll = clk_get(NULL, "mout_apll"); mout_apll = clk_get(NULL, "mout_apll");
if (IS_ERR(mout_apll)) if (IS_ERR(mout_apll))
goto out; goto err_mout_apll;
arm_regulator = regulator_get(NULL, "vdd_arm"); tmp = __raw_readl(S5P_CLKDIV_CPU);
if (IS_ERR(arm_regulator)) {
printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
goto out;
}
int_regulator = regulator_get(NULL, "vdd_int"); for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
if (IS_ERR(int_regulator)) { tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK |
printk(KERN_ERR "failed to get resource %s\n", "vdd_int"); S5P_CLKDIV_CPU0_COREM0_MASK |
goto out; S5P_CLKDIV_CPU0_COREM1_MASK |
S5P_CLKDIV_CPU0_PERIPH_MASK |
S5P_CLKDIV_CPU0_ATB_MASK |
S5P_CLKDIV_CPU0_PCLKDBG_MASK |
S5P_CLKDIV_CPU0_APLL_MASK);
tmp |= ((clkdiv_cpu0[i][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
(clkdiv_cpu0[i][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
(clkdiv_cpu0[i][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
(clkdiv_cpu0[i][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
(clkdiv_cpu0[i][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
(clkdiv_cpu0[i][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
(clkdiv_cpu0[i][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
exynos4210_clkdiv_table[i].clkdiv = tmp;
} }
/* info->mpll_freq_khz = rate;
* Check DRAM type. info->pm_lock_idx = L2;
* Because DVFS level is different according to DRAM type. info->pll_safe_idx = L2;
*/ info->max_support_idx = max_support_idx;
memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET); info->min_support_idx = min_support_idx;
memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT); info->cpu_clk = cpu_clk;
memtype &= S5P_DMC0_MEMTYPE_MASK; info->volt_table = exynos4210_volt_table;
info->freq_table = exynos4210_freq_table;
if ((memtype < DDR2) && (memtype > DDR3)) { info->set_freq = exynos4210_set_frequency;
printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype); info->need_apll_change = exynos4210_pms_change;
goto out;
} else {
printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
}
register_pm_notifier(&exynos4_cpufreq_nb);
return cpufreq_register_driver(&exynos4_driver);
out:
if (!IS_ERR(cpu_clk))
clk_put(cpu_clk);
if (!IS_ERR(moutcore)) return 0;
clk_put(moutcore);
err_mout_apll:
if (!IS_ERR(mout_mpll)) if (!IS_ERR(mout_mpll))
clk_put(mout_mpll); clk_put(mout_mpll);
err_mout_mpll:
if (!IS_ERR(moutcore))
clk_put(moutcore);
err_moutcore:
if (!IS_ERR(cpu_clk))
clk_put(cpu_clk);
if (!IS_ERR(mout_apll)) pr_debug("%s: failed initialization\n", __func__);
clk_put(mout_apll);
if (!IS_ERR(arm_regulator))
regulator_put(arm_regulator);
if (!IS_ERR(int_regulator))
regulator_put(int_regulator);
printk(KERN_ERR "%s: failed initialization\n", __func__);
return -EINVAL; return -EINVAL;
} }
late_initcall(exynos4_cpufreq_init); EXPORT_SYMBOL(exynos4210_cpufreq_init);
/* /*
* linux/arch/arm/plat-omap/cpu-omap.c * CPU frequency scaling for OMAP using OPP information
*
* CPU frequency scaling for OMAP
* *
* Copyright (C) 2005 Nokia Corporation * Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com> * Written by Tony Lindgren <tony@atomide.com>
* *
* Based on cpu-sa1110.c, Copyright (C) 2001 Russell King * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
* *
* Copyright (C) 2007-2011 Texas Instruments, Inc.
* - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
*
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
...@@ -21,48 +22,49 @@ ...@@ -21,48 +22,49 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/opp.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <mach/hardware.h>
#include <plat/clock.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/smp_plat.h>
#include <asm/cpu.h>
#define VERY_HI_RATE 900000000 #include <plat/clock.h>
#include <plat/omap-pm.h>
#include <plat/common.h>
#include <plat/omap_device.h>
static struct cpufreq_frequency_table *freq_table; #include <mach/hardware.h>
#ifdef CONFIG_ARCH_OMAP1 #ifdef CONFIG_SMP
#define MPU_CLK "mpu" struct lpj_info {
#else unsigned long ref;
#define MPU_CLK "virt_prcm_set" unsigned int freq;
};
static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
static struct lpj_info global_lpj_ref;
#endif #endif
static struct cpufreq_frequency_table *freq_table;
static atomic_t freq_table_users = ATOMIC_INIT(0);
static struct clk *mpu_clk; static struct clk *mpu_clk;
static char *mpu_clk_name;
/* TODO: Add support for SDRAM timing changes */ static struct device *mpu_dev;
static int omap_verify_speed(struct cpufreq_policy *policy) static int omap_verify_speed(struct cpufreq_policy *policy)
{ {
if (freq_table) if (!freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
if (policy->cpu)
return -EINVAL; return -EINVAL;
return cpufreq_frequency_table_verify(policy, freq_table);
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
return 0;
} }
static unsigned int omap_getspeed(unsigned int cpu) static unsigned int omap_getspeed(unsigned int cpu)
{ {
unsigned long rate; unsigned long rate;
if (cpu) if (cpu >= NR_CPUS)
return 0; return 0;
rate = clk_get_rate(mpu_clk) / 1000; rate = clk_get_rate(mpu_clk) / 1000;
...@@ -73,68 +75,151 @@ static int omap_target(struct cpufreq_policy *policy, ...@@ -73,68 +75,151 @@ static int omap_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation) unsigned int relation)
{ {
struct cpufreq_freqs freqs; unsigned int i;
int ret = 0; int ret = 0;
struct cpufreq_freqs freqs;
/* Ensure desired rate is within allowed range. Some govenors if (!freq_table) {
* (ondemand) will just pass target_freq=0 to get the minimum. */ dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
if (target_freq < policy->min) policy->cpu);
target_freq = policy->min; return -EINVAL;
if (target_freq > policy->max) }
target_freq = policy->max;
ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
relation, &i);
if (ret) {
dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
__func__, policy->cpu, target_freq, ret);
return ret;
}
freqs.new = freq_table[i].frequency;
if (!freqs.new) {
dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
policy->cpu, target_freq);
return -EINVAL;
}
freqs.old = omap_getspeed(0); freqs.old = omap_getspeed(policy->cpu);
freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000; freqs.cpu = policy->cpu;
freqs.cpu = 0;
if (freqs.old == freqs.new) if (freqs.old == freqs.new && policy->cur == freqs.new)
return ret; return ret;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); /* notifiers */
for_each_cpu(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
#ifdef CONFIG_CPU_FREQ_DEBUG #ifdef CONFIG_CPU_FREQ_DEBUG
printk(KERN_DEBUG "cpufreq-omap: transition: %u --> %u\n", pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new);
freqs.old, freqs.new);
#endif #endif
ret = clk_set_rate(mpu_clk, freqs.new * 1000); ret = clk_set_rate(mpu_clk, freqs.new * 1000);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); freqs.new = omap_getspeed(policy->cpu);
#ifdef CONFIG_SMP
/*
* Note that loops_per_jiffy is not updated on SMP systems in
* cpufreq driver. So, update the per-CPU loops_per_jiffy value
* on frequency transition. We need to update all dependent CPUs.
*/
for_each_cpu(i, policy->cpus) {
struct lpj_info *lpj = &per_cpu(lpj_ref, i);
if (!lpj->freq) {
lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
lpj->freq = freqs.old;
}
per_cpu(cpu_data, i).loops_per_jiffy =
cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
}
/* And don't forget to adjust the global one */
if (!global_lpj_ref.freq) {
global_lpj_ref.ref = loops_per_jiffy;
global_lpj_ref.freq = freqs.old;
}
loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
freqs.new);
#endif
/* notifiers */
for_each_cpu(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
return ret; return ret;
} }
static inline void freq_table_free(void)
{
if (atomic_dec_and_test(&freq_table_users))
opp_free_cpufreq_table(mpu_dev, &freq_table);
}
static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
{ {
int result = 0; int result = 0;
mpu_clk = clk_get(NULL, MPU_CLK); mpu_clk = clk_get(NULL, mpu_clk_name);
if (IS_ERR(mpu_clk)) if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk); return PTR_ERR(mpu_clk);
if (policy->cpu != 0) if (policy->cpu >= NR_CPUS) {
return -EINVAL; result = -EINVAL;
goto fail_ck;
}
policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
if (atomic_inc_return(&freq_table_users) == 1)
result = opp_init_cpufreq_table(mpu_dev, &freq_table);
if (result) {
dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
__func__, policy->cpu, result);
goto fail_ck;
}
policy->cur = policy->min = policy->max = omap_getspeed(0); result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (result)
clk_init_cpufreq_table(&freq_table); goto fail_table;
if (freq_table) {
result = cpufreq_frequency_table_cpuinfo(policy, freq_table); cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
if (!result)
cpufreq_frequency_table_get_attr(freq_table, policy->min = policy->cpuinfo.min_freq;
policy->cpu); policy->max = policy->cpuinfo.max_freq;
} else { policy->cur = omap_getspeed(policy->cpu);
policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, /*
VERY_HI_RATE) / 1000; * On OMAP SMP configuartion, both processors share the voltage
* and clock. So both CPUs needs to be scaled together and hence
* needs software co-ordination. Use cpufreq affected_cpus
* interface to handle this scenario. Additional is_smp() check
* is to keep SMP_ON_UP build working.
*/
if (is_smp()) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
} }
/* FIXME: what's the actual transition time? */ /* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000; policy->cpuinfo.transition_latency = 300 * 1000;
return 0; return 0;
fail_table:
freq_table_free();
fail_ck:
clk_put(mpu_clk);
return result;
} }
static int omap_cpu_exit(struct cpufreq_policy *policy) static int omap_cpu_exit(struct cpufreq_policy *policy)
{ {
clk_exit_cpufreq_table(&freq_table); freq_table_free();
clk_put(mpu_clk); clk_put(mpu_clk);
return 0; return 0;
} }
...@@ -157,15 +242,33 @@ static struct cpufreq_driver omap_driver = { ...@@ -157,15 +242,33 @@ static struct cpufreq_driver omap_driver = {
static int __init omap_cpufreq_init(void) static int __init omap_cpufreq_init(void)
{ {
if (cpu_is_omap24xx())
mpu_clk_name = "virt_prcm_set";
else if (cpu_is_omap34xx())
mpu_clk_name = "dpll1_ck";
else if (cpu_is_omap44xx())
mpu_clk_name = "dpll_mpu_ck";
if (!mpu_clk_name) {
pr_err("%s: unsupported Silicon?\n", __func__);
return -EINVAL;
}
mpu_dev = omap_device_get_by_hwmod_name("mpu");
if (!mpu_dev) {
pr_warning("%s: unable to get the mpu device\n", __func__);
return -EINVAL;
}
return cpufreq_register_driver(&omap_driver); return cpufreq_register_driver(&omap_driver);
} }
arch_initcall(omap_cpufreq_init); static void __exit omap_cpufreq_exit(void)
{
/* cpufreq_unregister_driver(&omap_driver);
* if ever we want to remove this, upon cleanup call: }
*
* cpufreq_unregister_driver()
* cpufreq_frequency_table_put_attr()
*/
MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
MODULE_LICENSE("GPL");
module_init(omap_cpufreq_init);
module_exit(omap_cpufreq_exit);
/* /*
* (c) 2003-2010 Advanced Micro Devices, Inc. * (c) 2003-2012 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the * Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or * GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html * http://www.gnu.org/licenses/gpl.html
* *
* Support : mark.langsdorf@amd.com * Maintainer:
* Andreas Herrmann <andreas.herrmann3@amd.com>
* *
* Based on the powernow-k7.c module written by Dave Jones. * Based on the powernow-k7.c module written by Dave Jones.
* (C) 2003 Dave Jones on behalf of SuSE Labs * (C) 2003 Dave Jones on behalf of SuSE Labs
...@@ -16,12 +17,14 @@ ...@@ -16,12 +17,14 @@
* Valuable input gratefully received from Dave Jones, Pavel Machek, * Valuable input gratefully received from Dave Jones, Pavel Machek,
* Dominik Brodowski, Jacob Shin, and others. * Dominik Brodowski, Jacob Shin, and others.
* Originally developed by Paul Devriendt. * Originally developed by Paul Devriendt.
* Processor information obtained from Chapter 9 (Power and Thermal Management)
* of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
* Opteron Processors" available for download from www.amd.com
* *
* Tables for specific CPUs can be inferred from * Processor information obtained from Chapter 9 (Power and Thermal
* http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for
* the AMD Athlon 64 and AMD Opteron Processors" and section "2.x
* Power Management" in BKDGs for newer AMD CPU families.
*
* Tables for specific CPUs can be inferred from AMD's processor
* power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -54,6 +57,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); ...@@ -54,6 +57,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
static int cpu_family = CPU_OPTERON; static int cpu_family = CPU_OPTERON;
/* array to map SW pstate number to acpi state */
static u32 ps_to_as[8];
/* core performance boost */ /* core performance boost */
static bool cpb_capable, cpb_enabled; static bool cpb_capable, cpb_enabled;
static struct msr __percpu *msrs; static struct msr __percpu *msrs;
...@@ -80,9 +86,9 @@ static u32 find_khz_freq_from_fid(u32 fid) ...@@ -80,9 +86,9 @@ static u32 find_khz_freq_from_fid(u32 fid)
} }
static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
u32 pstate) u32 pstate)
{ {
return data[pstate].frequency; return data[ps_to_as[pstate]].frequency;
} }
/* Return the vco fid for an input fid /* Return the vco fid for an input fid
...@@ -926,23 +932,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, ...@@ -926,23 +932,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
invalidate_entry(powernow_table, i); invalidate_entry(powernow_table, i);
continue; continue;
} }
rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
if (!(hi & HW_PSTATE_VALID_MASK)) {
pr_debug("invalid pstate %d, ignoring\n", index);
invalidate_entry(powernow_table, i);
continue;
}
powernow_table[i].index = index; ps_to_as[index] = i;
/* Frequency may be rounded for these */ /* Frequency may be rounded for these */
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10) if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|| boot_cpu_data.x86 == 0x11) { || boot_cpu_data.x86 == 0x11) {
rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
if (!(hi & HW_PSTATE_VALID_MASK)) {
pr_debug("invalid pstate %d, ignoring\n", index);
invalidate_entry(powernow_table, i);
continue;
}
powernow_table[i].frequency = powernow_table[i].frequency =
freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
} else } else
powernow_table[i].frequency = powernow_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000; data->acpi_data.states[i].core_frequency * 1000;
powernow_table[i].index = index;
} }
return 0; return 0;
} }
...@@ -1189,7 +1199,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, ...@@ -1189,7 +1199,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
powernow_k8_acpi_pst_values(data, newstate); powernow_k8_acpi_pst_values(data, newstate);
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
ret = transition_frequency_pstate(data, newstate); ret = transition_frequency_pstate(data,
data->powernow_table[newstate].index);
else else
ret = transition_frequency_fidvid(data, newstate); ret = transition_frequency_fidvid(data, newstate);
if (ret) { if (ret) {
...@@ -1202,7 +1213,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, ...@@ -1202,7 +1213,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_pstate(data->powernow_table, pol->cur = find_khz_freq_from_pstate(data->powernow_table,
newstate); data->powernow_table[newstate].index);
else else
pol->cur = find_khz_freq_from_fid(data->currfid); pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0; ret = 0;
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#define pr_fmt(fmt) "cpufreq: " fmt
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new) if (freqs.old == freqs.new)
return 0; return 0;
pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
...@@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
dvfs->vddarm_min, dvfs->vddarm_min,
dvfs->vddarm_max); dvfs->vddarm_max);
if (ret != 0) { if (ret != 0) {
pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret); freqs.new, ret);
goto err; goto err;
} }
...@@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
ret = clk_set_rate(armclk, freqs.new * 1000); ret = clk_set_rate(armclk, freqs.new * 1000);
if (ret < 0) { if (ret < 0) {
pr_err("cpufreq: Failed to set rate %dkHz: %d\n", pr_err("Failed to set rate %dkHz: %d\n",
freqs.new, ret); freqs.new, ret);
goto err; goto err;
} }
...@@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
dvfs->vddarm_min, dvfs->vddarm_min,
dvfs->vddarm_max); dvfs->vddarm_max);
if (ret != 0) { if (ret != 0) {
pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret); freqs.new, ret);
goto err_clk; goto err_clk;
} }
} }
#endif #endif
pr_debug("cpufreq: Set actual frequency %lukHz\n", pr_debug("Set actual frequency %lukHz\n",
clk_get_rate(armclk) / 1000); clk_get_rate(armclk) / 1000);
return 0; return 0;
...@@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void) ...@@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
count = regulator_count_voltages(vddarm); count = regulator_count_voltages(vddarm);
if (count < 0) { if (count < 0) {
pr_err("cpufreq: Unable to check supported voltages\n"); pr_err("Unable to check supported voltages\n");
} }
freq = s3c64xx_freq_table; freq = s3c64xx_freq_table;
...@@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void) ...@@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
} }
if (!found) { if (!found) {
pr_debug("cpufreq: %dkHz unsupported by regulator\n", pr_debug("%dkHz unsupported by regulator\n",
freq->frequency); freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID; freq->frequency = CPUFREQ_ENTRY_INVALID;
} }
...@@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
return -EINVAL; return -EINVAL;
if (s3c64xx_freq_table == NULL) { if (s3c64xx_freq_table == NULL) {
pr_err("cpufreq: No frequency information for this CPU\n"); pr_err("No frequency information for this CPU\n");
return -ENODEV; return -ENODEV;
} }
armclk = clk_get(NULL, "armclk"); armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) { if (IS_ERR(armclk)) {
pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n", pr_err("Unable to obtain ARMCLK: %ld\n",
PTR_ERR(armclk)); PTR_ERR(armclk));
return PTR_ERR(armclk); return PTR_ERR(armclk);
} }
...@@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
vddarm = regulator_get(NULL, "vddarm"); vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) { if (IS_ERR(vddarm)) {
ret = PTR_ERR(vddarm); ret = PTR_ERR(vddarm);
pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret); pr_err("Failed to obtain VDDARM: %d\n", ret);
pr_err("cpufreq: Only frequency scaling available\n"); pr_err("Only frequency scaling available\n");
vddarm = NULL; vddarm = NULL;
} else { } else {
s3c64xx_cpufreq_config_regulator(); s3c64xx_cpufreq_config_regulator();
} }
vddint = regulator_get(NULL, "vddint");
if (IS_ERR(vddint)) {
ret = PTR_ERR(vddint);
pr_err("Failed to obtain VDDINT: %d\n", ret);
vddint = NULL;
}
#endif #endif
freq = s3c64xx_freq_table; freq = s3c64xx_freq_table;
...@@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
r = clk_round_rate(armclk, freq->frequency * 1000); r = clk_round_rate(armclk, freq->frequency * 1000);
r /= 1000; r /= 1000;
if (r != freq->frequency) { if (r != freq->frequency) {
pr_debug("cpufreq: %dkHz unsupported by clock\n", pr_debug("%dkHz unsupported by clock\n",
freq->frequency); freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID; freq->frequency = CPUFREQ_ENTRY_INVALID;
} }
...@@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
if (ret != 0) { if (ret != 0) {
pr_err("cpufreq: Failed to configure frequency table: %d\n", pr_err("Failed to configure frequency table: %d\n",
ret); ret);
regulator_put(vddarm); regulator_put(vddarm);
clk_put(armclk); clk_put(armclk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment