Commit 02d92950 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: (23 commits)
  [CPUFREQ] EXYNOS: Removed useless headers and codes
  [CPUFREQ] EXYNOS: Make EXYNOS common cpufreq driver
  [CPUFREQ] powernow-k8: Update copyright, maintainer and documentation information
  [CPUFREQ] powernow-k8: Fix indexing issue
  [CPUFREQ] powernow-k8: Avoid Pstate MSR accesses on systems supporting CPB
  [CPUFREQ] update lpj only if frequency has changed
  [CPUFREQ] cpufreq:userspace: fix cpu_cur_freq updation
  [CPUFREQ] Remove wall variable from cpufreq_gov_dbs_init()
  [CPUFREQ] EXYNOS4210: cpufreq code is changed for stable working
  [CPUFREQ] EXYNOS4210: Update frequency table for cpu divider
  [CPUFREQ] EXYNOS4210: Remove code about bus on cpufreq
  [CPUFREQ] s3c64xx: Use pr_fmt() for consistent log messages
  cpufreq: OMAP: fixup for omap_device changes, include <linux/module.h>
  cpufreq: OMAP: fix freq_table leak
  cpufreq: OMAP: put clk if cpu_init failed
  cpufreq: OMAP: only supports OPP library
  cpufreq: OMAP: dont support !freq_table
  cpufreq: OMAP: deny initialization if no mpudev
  cpufreq: OMAP: move clk name decision to init
  cpufreq: OMAP: notify even with bad boot frequency
  ...
parents b24ca57e 6c523c61
/* linux/arch/arm/mach-exynos/include/mach/cpufreq.h
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - CPUFreq support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
enum cpufreq_level_index {
L0, L1, L2, L3, L4,
L5, L6, L7, L8, L9,
L10, L11, L12, L13, L14,
L15, L16, L17, L18, L19,
L20,
};
struct exynos_dvfs_info {
unsigned long mpll_freq_khz;
unsigned int pll_safe_idx;
unsigned int pm_lock_idx;
unsigned int max_support_idx;
unsigned int min_support_idx;
struct clk *cpu_clk;
unsigned int *volt_table;
struct cpufreq_frequency_table *freq_table;
void (*set_freq)(unsigned int, unsigned int);
bool (*need_apll_change)(unsigned int, unsigned int);
};
extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
...@@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ ...@@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ
If in doubt, say N. If in doubt, say N.
config ARM_EXYNOS_CPUFREQ
bool "SAMSUNG EXYNOS SoCs"
depends on ARCH_EXYNOS
select ARM_EXYNOS4210_CPUFREQ if CPU_EXYNOS4210
default y
help
This adds the CPUFreq driver common part for Samsung
EXYNOS SoCs.
If in doubt, say N.
config ARM_EXYNOS4210_CPUFREQ config ARM_EXYNOS4210_CPUFREQ
bool "Samsung EXYNOS4210" bool "Samsung EXYNOS4210"
depends on CPU_EXYNOS4210
default y
help help
This adds the CPUFreq driver for Samsung EXYNOS4210 This adds the CPUFreq driver for Samsung EXYNOS4210
SoC (S5PV310 or S5PC210). SoC (S5PV310 or S5PC210).
If in doubt, say N.
...@@ -42,7 +42,9 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o ...@@ -42,7 +42,9 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o
################################################################################## ##################################################################################
# PowerPC platform drivers # PowerPC platform drivers
......
...@@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) ...@@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
pr_debug("saving %lu as reference value for loops_per_jiffy; " pr_debug("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
} }
if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) || if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
(val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new); ci->new);
......
...@@ -713,11 +713,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, ...@@ -713,11 +713,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
static int __init cpufreq_gov_dbs_init(void) static int __init cpufreq_gov_dbs_init(void)
{ {
cputime64_t wall;
u64 idle_time; u64 idle_time;
int cpu = get_cpu(); int cpu = get_cpu();
idle_time = get_cpu_idle_time_us(cpu, &wall); idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu(); put_cpu();
if (idle_time != -1ULL) { if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */ /* Idle micro accounting is supported. Use finer thresholds */
......
...@@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!per_cpu(cpu_is_managed, freq->cpu)) if (!per_cpu(cpu_is_managed, freq->cpu))
return 0; return 0;
if (val == CPUFREQ_POSTCHANGE) {
pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n", pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
freq->cpu, freq->new); freq->cpu, freq->new);
per_cpu(cpu_cur_freq, freq->cpu) = freq->new; per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
}
return 0; return 0;
} }
......
/*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - CPU frequency scaling support for EXYNOS series
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
#include <linux/suspend.h>
#include <mach/cpufreq.h>
#include <plat/cpu.h>
static struct exynos_dvfs_info *exynos_info;
static struct regulator *arm_regulator;
static struct cpufreq_freqs freqs;
static unsigned int locking_frequency;
static bool frequency_locked;
static DEFINE_MUTEX(cpufreq_lock);
int exynos_verify_speed(struct cpufreq_policy *policy)
{
return cpufreq_frequency_table_verify(policy,
exynos_info->freq_table);
}
unsigned int exynos_getspeed(unsigned int cpu)
{
return clk_get_rate(exynos_info->cpu_clk) / 1000;
}
static int exynos_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
unsigned int index, old_index;
unsigned int arm_volt, safe_arm_volt = 0;
int ret = 0;
struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
unsigned int *volt_table = exynos_info->volt_table;
unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
mutex_lock(&cpufreq_lock);
freqs.old = policy->cur;
if (frequency_locked && target_freq != locking_frequency) {
ret = -EAGAIN;
goto out;
}
if (cpufreq_frequency_table_target(policy, freq_table,
freqs.old, relation, &old_index)) {
ret = -EINVAL;
goto out;
}
if (cpufreq_frequency_table_target(policy, freq_table,
target_freq, relation, &index)) {
ret = -EINVAL;
goto out;
}
freqs.new = freq_table[index].frequency;
freqs.cpu = policy->cpu;
/*
* ARM clock source will be changed APLL to MPLL temporary
* To support this level, need to control regulator for
* required voltage level
*/
if (exynos_info->need_apll_change != NULL) {
if (exynos_info->need_apll_change(old_index, index) &&
(freq_table[index].frequency < mpll_freq_khz) &&
(freq_table[old_index].frequency < mpll_freq_khz))
safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
}
arm_volt = volt_table[index];
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
/* When the new frequency is higher than current frequency */
if ((freqs.new > freqs.old) && !safe_arm_volt) {
/* Firstly, voltage up to increase frequency */
regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
}
if (safe_arm_volt)
regulator_set_voltage(arm_regulator, safe_arm_volt,
safe_arm_volt);
if (freqs.new != freqs.old)
exynos_info->set_freq(old_index, index);
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
/* When the new frequency is lower than current frequency */
if ((freqs.new < freqs.old) ||
((freqs.new > freqs.old) && safe_arm_volt)) {
/* down the voltage after frequency change */
regulator_set_voltage(arm_regulator, arm_volt,
arm_volt);
}
out:
mutex_unlock(&cpufreq_lock);
return ret;
}
#ifdef CONFIG_PM
static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
}
static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
{
return 0;
}
#endif
/**
* exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
* context
* @notifier
* @pm_event
* @v
*
* While frequency_locked == true, target() ignores every frequency but
* locking_frequency. The locking_frequency value is the initial frequency,
* which is set by the bootloader. In order to eliminate possible
* inconsistency in clock values, we save and restore frequencies during
* suspend and resume and block CPUFREQ activities. Note that the standard
* suspend/resume cannot be used as they are too deep (syscore_ops) for
* regulator actions.
*/
static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
unsigned long pm_event, void *v)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
static unsigned int saved_frequency;
unsigned int temp;
mutex_lock(&cpufreq_lock);
switch (pm_event) {
case PM_SUSPEND_PREPARE:
if (frequency_locked)
goto out;
frequency_locked = true;
if (locking_frequency) {
saved_frequency = exynos_getspeed(0);
mutex_unlock(&cpufreq_lock);
exynos_target(policy, locking_frequency,
CPUFREQ_RELATION_H);
mutex_lock(&cpufreq_lock);
}
break;
case PM_POST_SUSPEND:
if (saved_frequency) {
/*
* While frequency_locked, only locking_frequency
* is valid for target(). In order to use
* saved_frequency while keeping frequency_locked,
* we temporarly overwrite locking_frequency.
*/
temp = locking_frequency;
locking_frequency = saved_frequency;
mutex_unlock(&cpufreq_lock);
exynos_target(policy, locking_frequency,
CPUFREQ_RELATION_H);
mutex_lock(&cpufreq_lock);
locking_frequency = temp;
}
frequency_locked = false;
break;
}
out:
mutex_unlock(&cpufreq_lock);
return NOTIFY_OK;
}
static struct notifier_block exynos_cpufreq_nb = {
.notifier_call = exynos_cpufreq_pm_notifier,
};
static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
/* set the transition latency value */
policy->cpuinfo.transition_latency = 100000;
/*
* EXYNOS4 multi-core processors has 2 cores
* that the frequency cannot be set independently.
* Each cpu is bound to the same speed.
* So the affected cpu is all of the cpus.
*/
if (num_online_cpus() == 1) {
cpumask_copy(policy->related_cpus, cpu_possible_mask);
cpumask_copy(policy->cpus, cpu_online_mask);
} else {
cpumask_setall(policy->cpus);
}
return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
}
static struct cpufreq_driver exynos_driver = {
.flags = CPUFREQ_STICKY,
.verify = exynos_verify_speed,
.target = exynos_target,
.get = exynos_getspeed,
.init = exynos_cpufreq_cpu_init,
.name = "exynos_cpufreq",
#ifdef CONFIG_PM
.suspend = exynos_cpufreq_suspend,
.resume = exynos_cpufreq_resume,
#endif
};
static int __init exynos_cpufreq_init(void)
{
int ret = -EINVAL;
exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
if (!exynos_info)
return -ENOMEM;
if (soc_is_exynos4210())
ret = exynos4210_cpufreq_init(exynos_info);
else
pr_err("%s: CPU type not found\n", __func__);
if (ret)
goto err_vdd_arm;
if (exynos_info->set_freq == NULL) {
pr_err("%s: No set_freq function (ERR)\n", __func__);
goto err_vdd_arm;
}
arm_regulator = regulator_get(NULL, "vdd_arm");
if (IS_ERR(arm_regulator)) {
pr_err("%s: failed to get resource vdd_arm\n", __func__);
goto err_vdd_arm;
}
register_pm_notifier(&exynos_cpufreq_nb);
if (cpufreq_register_driver(&exynos_driver)) {
pr_err("%s: failed to register cpufreq driver\n", __func__);
goto err_cpufreq;
}
return 0;
err_cpufreq:
unregister_pm_notifier(&exynos_cpufreq_nb);
if (!IS_ERR(arm_regulator))
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
}
late_initcall(exynos_cpufreq_init);
This diff is collapsed.
/* /*
* linux/arch/arm/plat-omap/cpu-omap.c * CPU frequency scaling for OMAP using OPP information
*
* CPU frequency scaling for OMAP
* *
* Copyright (C) 2005 Nokia Corporation * Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com> * Written by Tony Lindgren <tony@atomide.com>
* *
* Based on cpu-sa1110.c, Copyright (C) 2001 Russell King * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
* *
* Copyright (C) 2007-2011 Texas Instruments, Inc.
* - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
*
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation. * published by the Free Software Foundation.
...@@ -21,48 +22,49 @@ ...@@ -21,48 +22,49 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/opp.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <mach/hardware.h>
#include <plat/clock.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/smp_plat.h>
#include <asm/cpu.h>
#define VERY_HI_RATE 900000000 #include <plat/clock.h>
#include <plat/omap-pm.h>
#include <plat/common.h>
#include <plat/omap_device.h>
static struct cpufreq_frequency_table *freq_table; #include <mach/hardware.h>
#ifdef CONFIG_SMP
struct lpj_info {
unsigned long ref;
unsigned int freq;
};
#ifdef CONFIG_ARCH_OMAP1 static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
#define MPU_CLK "mpu" static struct lpj_info global_lpj_ref;
#else
#define MPU_CLK "virt_prcm_set"
#endif #endif
static struct cpufreq_frequency_table *freq_table;
static atomic_t freq_table_users = ATOMIC_INIT(0);
static struct clk *mpu_clk; static struct clk *mpu_clk;
static char *mpu_clk_name;
/* TODO: Add support for SDRAM timing changes */ static struct device *mpu_dev;
static int omap_verify_speed(struct cpufreq_policy *policy) static int omap_verify_speed(struct cpufreq_policy *policy)
{ {
if (freq_table) if (!freq_table)
return cpufreq_frequency_table_verify(policy, freq_table);
if (policy->cpu)
return -EINVAL; return -EINVAL;
return cpufreq_frequency_table_verify(policy, freq_table);
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
policy->min = clk_round_rate(mpu_clk, policy->min * 1000) / 1000;
policy->max = clk_round_rate(mpu_clk, policy->max * 1000) / 1000;
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
return 0;
} }
static unsigned int omap_getspeed(unsigned int cpu) static unsigned int omap_getspeed(unsigned int cpu)
{ {
unsigned long rate; unsigned long rate;
if (cpu) if (cpu >= NR_CPUS)
return 0; return 0;
rate = clk_get_rate(mpu_clk) / 1000; rate = clk_get_rate(mpu_clk) / 1000;
...@@ -73,68 +75,151 @@ static int omap_target(struct cpufreq_policy *policy, ...@@ -73,68 +75,151 @@ static int omap_target(struct cpufreq_policy *policy,
unsigned int target_freq, unsigned int target_freq,
unsigned int relation) unsigned int relation)
{ {
struct cpufreq_freqs freqs; unsigned int i;
int ret = 0; int ret = 0;
struct cpufreq_freqs freqs;
/* Ensure desired rate is within allowed range. Some govenors if (!freq_table) {
* (ondemand) will just pass target_freq=0 to get the minimum. */ dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
if (target_freq < policy->min) policy->cpu);
target_freq = policy->min; return -EINVAL;
if (target_freq > policy->max) }
target_freq = policy->max;
freqs.old = omap_getspeed(0); ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000; relation, &i);
freqs.cpu = 0; if (ret) {
dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
__func__, policy->cpu, target_freq, ret);
return ret;
}
freqs.new = freq_table[i].frequency;
if (!freqs.new) {
dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
policy->cpu, target_freq);
return -EINVAL;
}
if (freqs.old == freqs.new) freqs.old = omap_getspeed(policy->cpu);
freqs.cpu = policy->cpu;
if (freqs.old == freqs.new && policy->cur == freqs.new)
return ret; return ret;
/* notifiers */
for_each_cpu(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
}
#ifdef CONFIG_CPU_FREQ_DEBUG #ifdef CONFIG_CPU_FREQ_DEBUG
printk(KERN_DEBUG "cpufreq-omap: transition: %u --> %u\n", pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new);
freqs.old, freqs.new);
#endif #endif
ret = clk_set_rate(mpu_clk, freqs.new * 1000); ret = clk_set_rate(mpu_clk, freqs.new * 1000);
freqs.new = omap_getspeed(policy->cpu);
#ifdef CONFIG_SMP
/*
* Note that loops_per_jiffy is not updated on SMP systems in
* cpufreq driver. So, update the per-CPU loops_per_jiffy value
* on frequency transition. We need to update all dependent CPUs.
*/
for_each_cpu(i, policy->cpus) {
struct lpj_info *lpj = &per_cpu(lpj_ref, i);
if (!lpj->freq) {
lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
lpj->freq = freqs.old;
}
per_cpu(cpu_data, i).loops_per_jiffy =
cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
}
/* And don't forget to adjust the global one */
if (!global_lpj_ref.freq) {
global_lpj_ref.ref = loops_per_jiffy;
global_lpj_ref.freq = freqs.old;
}
loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
freqs.new);
#endif
/* notifiers */
for_each_cpu(i, policy->cpus) {
freqs.cpu = i;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
}
return ret; return ret;
} }
static inline void freq_table_free(void)
{
if (atomic_dec_and_test(&freq_table_users))
opp_free_cpufreq_table(mpu_dev, &freq_table);
}
static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
{ {
int result = 0; int result = 0;
mpu_clk = clk_get(NULL, MPU_CLK); mpu_clk = clk_get(NULL, mpu_clk_name);
if (IS_ERR(mpu_clk)) if (IS_ERR(mpu_clk))
return PTR_ERR(mpu_clk); return PTR_ERR(mpu_clk);
if (policy->cpu != 0) if (policy->cpu >= NR_CPUS) {
return -EINVAL; result = -EINVAL;
goto fail_ck;
}
policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
if (atomic_inc_return(&freq_table_users) == 1)
result = opp_init_cpufreq_table(mpu_dev, &freq_table);
policy->cur = policy->min = policy->max = omap_getspeed(0); if (result) {
dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
__func__, policy->cpu, result);
goto fail_ck;
}
clk_init_cpufreq_table(&freq_table);
if (freq_table) {
result = cpufreq_frequency_table_cpuinfo(policy, freq_table); result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
if (!result) if (result)
cpufreq_frequency_table_get_attr(freq_table, goto fail_table;
policy->cpu);
} else { cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
policy->cpuinfo.min_freq = clk_round_rate(mpu_clk, 0) / 1000;
policy->cpuinfo.max_freq = clk_round_rate(mpu_clk, policy->min = policy->cpuinfo.min_freq;
VERY_HI_RATE) / 1000; policy->max = policy->cpuinfo.max_freq;
policy->cur = omap_getspeed(policy->cpu);
/*
* On OMAP SMP configuartion, both processors share the voltage
* and clock. So both CPUs needs to be scaled together and hence
* needs software co-ordination. Use cpufreq affected_cpus
* interface to handle this scenario. Additional is_smp() check
* is to keep SMP_ON_UP build working.
*/
if (is_smp()) {
policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
cpumask_setall(policy->cpus);
} }
/* FIXME: what's the actual transition time? */ /* FIXME: what's the actual transition time? */
policy->cpuinfo.transition_latency = 300 * 1000; policy->cpuinfo.transition_latency = 300 * 1000;
return 0; return 0;
fail_table:
freq_table_free();
fail_ck:
clk_put(mpu_clk);
return result;
} }
static int omap_cpu_exit(struct cpufreq_policy *policy) static int omap_cpu_exit(struct cpufreq_policy *policy)
{ {
clk_exit_cpufreq_table(&freq_table); freq_table_free();
clk_put(mpu_clk); clk_put(mpu_clk);
return 0; return 0;
} }
...@@ -157,15 +242,33 @@ static struct cpufreq_driver omap_driver = { ...@@ -157,15 +242,33 @@ static struct cpufreq_driver omap_driver = {
static int __init omap_cpufreq_init(void) static int __init omap_cpufreq_init(void)
{ {
if (cpu_is_omap24xx())
mpu_clk_name = "virt_prcm_set";
else if (cpu_is_omap34xx())
mpu_clk_name = "dpll1_ck";
else if (cpu_is_omap44xx())
mpu_clk_name = "dpll_mpu_ck";
if (!mpu_clk_name) {
pr_err("%s: unsupported Silicon?\n", __func__);
return -EINVAL;
}
mpu_dev = omap_device_get_by_hwmod_name("mpu");
if (!mpu_dev) {
pr_warning("%s: unable to get the mpu device\n", __func__);
return -EINVAL;
}
return cpufreq_register_driver(&omap_driver); return cpufreq_register_driver(&omap_driver);
} }
arch_initcall(omap_cpufreq_init); static void __exit omap_cpufreq_exit(void)
{
/* cpufreq_unregister_driver(&omap_driver);
* if ever we want to remove this, upon cleanup call: }
*
* cpufreq_unregister_driver()
* cpufreq_frequency_table_put_attr()
*/
MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
MODULE_LICENSE("GPL");
module_init(omap_cpufreq_init);
module_exit(omap_cpufreq_exit);
/* /*
* (c) 2003-2010 Advanced Micro Devices, Inc. * (c) 2003-2012 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the * Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or * GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html * http://www.gnu.org/licenses/gpl.html
* *
* Support : mark.langsdorf@amd.com * Maintainer:
* Andreas Herrmann <andreas.herrmann3@amd.com>
* *
* Based on the powernow-k7.c module written by Dave Jones. * Based on the powernow-k7.c module written by Dave Jones.
* (C) 2003 Dave Jones on behalf of SuSE Labs * (C) 2003 Dave Jones on behalf of SuSE Labs
...@@ -16,12 +17,14 @@ ...@@ -16,12 +17,14 @@
* Valuable input gratefully received from Dave Jones, Pavel Machek, * Valuable input gratefully received from Dave Jones, Pavel Machek,
* Dominik Brodowski, Jacob Shin, and others. * Dominik Brodowski, Jacob Shin, and others.
* Originally developed by Paul Devriendt. * Originally developed by Paul Devriendt.
* Processor information obtained from Chapter 9 (Power and Thermal Management)
* of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
* Opteron Processors" available for download from www.amd.com
* *
* Tables for specific CPUs can be inferred from * Processor information obtained from Chapter 9 (Power and Thermal
* http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for
* the AMD Athlon 64 and AMD Opteron Processors" and section "2.x
* Power Management" in BKDGs for newer AMD CPU families.
*
* Tables for specific CPUs can be inferred from AMD's processor
* power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -54,6 +57,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); ...@@ -54,6 +57,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
static int cpu_family = CPU_OPTERON; static int cpu_family = CPU_OPTERON;
/* array to map SW pstate number to acpi state */
static u32 ps_to_as[8];
/* core performance boost */ /* core performance boost */
static bool cpb_capable, cpb_enabled; static bool cpb_capable, cpb_enabled;
static struct msr __percpu *msrs; static struct msr __percpu *msrs;
...@@ -82,7 +88,7 @@ static u32 find_khz_freq_from_fid(u32 fid) ...@@ -82,7 +88,7 @@ static u32 find_khz_freq_from_fid(u32 fid)
static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
u32 pstate) u32 pstate)
{ {
return data[pstate].frequency; return data[ps_to_as[pstate]].frequency;
} }
/* Return the vco fid for an input fid /* Return the vco fid for an input fid
...@@ -926,6 +932,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, ...@@ -926,6 +932,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
invalidate_entry(powernow_table, i); invalidate_entry(powernow_table, i);
continue; continue;
} }
ps_to_as[index] = i;
/* Frequency may be rounded for these */
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|| boot_cpu_data.x86 == 0x11) {
rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
if (!(hi & HW_PSTATE_VALID_MASK)) { if (!(hi & HW_PSTATE_VALID_MASK)) {
pr_debug("invalid pstate %d, ignoring\n", index); pr_debug("invalid pstate %d, ignoring\n", index);
...@@ -933,16 +946,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, ...@@ -933,16 +946,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
continue; continue;
} }
powernow_table[i].index = index;
/* Frequency may be rounded for these */
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|| boot_cpu_data.x86 == 0x11) {
powernow_table[i].frequency = powernow_table[i].frequency =
freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7); freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
} else } else
powernow_table[i].frequency = powernow_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000; data->acpi_data.states[i].core_frequency * 1000;
powernow_table[i].index = index;
} }
return 0; return 0;
} }
...@@ -1189,7 +1199,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, ...@@ -1189,7 +1199,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
powernow_k8_acpi_pst_values(data, newstate); powernow_k8_acpi_pst_values(data, newstate);
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
ret = transition_frequency_pstate(data, newstate); ret = transition_frequency_pstate(data,
data->powernow_table[newstate].index);
else else
ret = transition_frequency_fidvid(data, newstate); ret = transition_frequency_fidvid(data, newstate);
if (ret) { if (ret) {
...@@ -1202,7 +1213,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, ...@@ -1202,7 +1213,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
if (cpu_family == CPU_HW_PSTATE) if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_pstate(data->powernow_table, pol->cur = find_khz_freq_from_pstate(data->powernow_table,
newstate); data->powernow_table[newstate].index);
else else
pol->cur = find_khz_freq_from_fid(data->currfid); pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0; ret = 0;
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#define pr_fmt(fmt) "cpufreq: " fmt
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new) if (freqs.old == freqs.new)
return 0; return 0;
pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new); pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
...@@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
dvfs->vddarm_min, dvfs->vddarm_min,
dvfs->vddarm_max); dvfs->vddarm_max);
if (ret != 0) { if (ret != 0) {
pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret); freqs.new, ret);
goto err; goto err;
} }
...@@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
ret = clk_set_rate(armclk, freqs.new * 1000); ret = clk_set_rate(armclk, freqs.new * 1000);
if (ret < 0) { if (ret < 0) {
pr_err("cpufreq: Failed to set rate %dkHz: %d\n", pr_err("Failed to set rate %dkHz: %d\n",
freqs.new, ret); freqs.new, ret);
goto err; goto err;
} }
...@@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, ...@@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
dvfs->vddarm_min, dvfs->vddarm_min,
dvfs->vddarm_max); dvfs->vddarm_max);
if (ret != 0) { if (ret != 0) {
pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n", pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret); freqs.new, ret);
goto err_clk; goto err_clk;
} }
} }
#endif #endif
pr_debug("cpufreq: Set actual frequency %lukHz\n", pr_debug("Set actual frequency %lukHz\n",
clk_get_rate(armclk) / 1000); clk_get_rate(armclk) / 1000);
return 0; return 0;
...@@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void) ...@@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
count = regulator_count_voltages(vddarm); count = regulator_count_voltages(vddarm);
if (count < 0) { if (count < 0) {
pr_err("cpufreq: Unable to check supported voltages\n"); pr_err("Unable to check supported voltages\n");
} }
freq = s3c64xx_freq_table; freq = s3c64xx_freq_table;
...@@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void) ...@@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
} }
if (!found) { if (!found) {
pr_debug("cpufreq: %dkHz unsupported by regulator\n", pr_debug("%dkHz unsupported by regulator\n",
freq->frequency); freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID; freq->frequency = CPUFREQ_ENTRY_INVALID;
} }
...@@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
return -EINVAL; return -EINVAL;
if (s3c64xx_freq_table == NULL) { if (s3c64xx_freq_table == NULL) {
pr_err("cpufreq: No frequency information for this CPU\n"); pr_err("No frequency information for this CPU\n");
return -ENODEV; return -ENODEV;
} }
armclk = clk_get(NULL, "armclk"); armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) { if (IS_ERR(armclk)) {
pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n", pr_err("Unable to obtain ARMCLK: %ld\n",
PTR_ERR(armclk)); PTR_ERR(armclk));
return PTR_ERR(armclk); return PTR_ERR(armclk);
} }
...@@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
vddarm = regulator_get(NULL, "vddarm"); vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) { if (IS_ERR(vddarm)) {
ret = PTR_ERR(vddarm); ret = PTR_ERR(vddarm);
pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret); pr_err("Failed to obtain VDDARM: %d\n", ret);
pr_err("cpufreq: Only frequency scaling available\n"); pr_err("Only frequency scaling available\n");
vddarm = NULL; vddarm = NULL;
} else { } else {
s3c64xx_cpufreq_config_regulator(); s3c64xx_cpufreq_config_regulator();
} }
vddint = regulator_get(NULL, "vddint");
if (IS_ERR(vddint)) {
ret = PTR_ERR(vddint);
pr_err("Failed to obtain VDDINT: %d\n", ret);
vddint = NULL;
}
#endif #endif
freq = s3c64xx_freq_table; freq = s3c64xx_freq_table;
...@@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
r = clk_round_rate(armclk, freq->frequency * 1000); r = clk_round_rate(armclk, freq->frequency * 1000);
r /= 1000; r /= 1000;
if (r != freq->frequency) { if (r != freq->frequency) {
pr_debug("cpufreq: %dkHz unsupported by clock\n", pr_debug("%dkHz unsupported by clock\n",
freq->frequency); freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID; freq->frequency = CPUFREQ_ENTRY_INVALID;
} }
...@@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) ...@@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table); ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
if (ret != 0) { if (ret != 0) {
pr_err("cpufreq: Failed to configure frequency table: %d\n", pr_err("Failed to configure frequency table: %d\n",
ret); ret);
regulator_put(vddarm); regulator_put(vddarm);
clk_put(armclk); clk_put(armclk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment