Commit e4f5a3ad authored by Rafael J. Wysocki's avatar Rafael J. Wysocki

Merge branch 'pm-cpuidle'

* pm-cpuidle: (51 commits)
  cpuidle: add maintainer entry
  ARM: s3c64xx: cpuidle: use init/exit common routine
  SH: cpuidle: use init/exit common routine
  cpuidle: fix comment format
  ARM: imx: cpuidle: use init/exit common routine
  ARM: davinci: cpuidle: use init/exit common routine
  ARM: kirkwood: cpuidle: use init/exit common routine
  ARM: calxeda: cpuidle: use init/exit common routine
  ARM: tegra: cpuidle: use init/exit common routine for tegra3
  ARM: tegra: cpuidle: use init/exit common routine for tegra2
  ARM: OMAP4: cpuidle: use init/exit common routine
  ARM: shmobile: cpuidle: use init/exit common routine
  ARM: tegra: cpuidle: use init/exit common routine
  ARM: OMAP3: cpuidle: use init/exit common routine
  ARM: at91: cpuidle: use init/exit common routine
  ARM: ux500: cpuidle: use init/exit common routine
  cpuidle: make a single register function for all
  ARM: ux500: cpuidle: replace for_each_online_cpu by for_each_possible_cpu
  cpuidle: remove en_core_tk_irqen flag
  ARM: OMAP3: remove cpuidle_wrap_enter
  ...
parents ae620830 a8e39c35
...@@ -15,11 +15,17 @@ has mechanisms in place to support actual entry-exit into CPU idle states. ...@@ -15,11 +15,17 @@ has mechanisms in place to support actual entry-exit into CPU idle states.
cpuidle driver initializes the cpuidle_device structure for each CPU device cpuidle driver initializes the cpuidle_device structure for each CPU device
and registers with cpuidle using cpuidle_register_device. and registers with cpuidle using cpuidle_register_device.
If all the idle states are the same, the wrapper function cpuidle_register
could be used instead.
It can also support the dynamic changes (like battery <-> AC), by using It can also support the dynamic changes (like battery <-> AC), by using
cpuidle_pause_and_lock, cpuidle_disable_device and cpuidle_enable_device, cpuidle_pause_and_lock, cpuidle_disable_device and cpuidle_enable_device,
cpuidle_resume_and_unlock. cpuidle_resume_and_unlock.
Interfaces: Interfaces:
extern int cpuidle_register(struct cpuidle_driver *drv,
const struct cpumask *const coupled_cpus);
extern int cpuidle_unregister(struct cpuidle_driver *drv);
extern int cpuidle_register_driver(struct cpuidle_driver *drv); extern int cpuidle_register_driver(struct cpuidle_driver *drv);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev); extern int cpuidle_register_device(struct cpuidle_device *dev);
......
...@@ -2206,6 +2206,15 @@ S: Maintained ...@@ -2206,6 +2206,15 @@ S: Maintained
F: drivers/cpufreq/ F: drivers/cpufreq/
F: include/linux/cpufreq.h F: include/linux/cpufreq.h
CPUIDLE DRIVERS
M: Rafael J. Wysocki <rjw@sisk.pl>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
CPUID/MSR DRIVER CPUID/MSR DRIVER
M: "H. Peter Anvin" <hpa@zytor.com> M: "H. Peter Anvin" <hpa@zytor.com>
S: Maintained S: Maintained
......
...@@ -56,7 +56,6 @@ CONFIG_AEABI=y ...@@ -56,7 +56,6 @@ CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_CPU_IDLE=y CONFIG_CPU_IDLE=y
CONFIG_CPU_IDLE_KIRKWOOD=y
CONFIG_NET=y CONFIG_NET=y
CONFIG_PACKET=y CONFIG_PACKET=y
CONFIG_UNIX=y CONFIG_UNIX=y
......
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
#define AT91_MAX_STATES 2 #define AT91_MAX_STATES 2
static DEFINE_PER_CPU(struct cpuidle_device, at91_cpuidle_device);
/* Actual code that puts the SoC in different idle states */ /* Actual code that puts the SoC in different idle states */
static int at91_enter_idle(struct cpuidle_device *dev, static int at91_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
...@@ -47,7 +45,6 @@ static int at91_enter_idle(struct cpuidle_device *dev, ...@@ -47,7 +45,6 @@ static int at91_enter_idle(struct cpuidle_device *dev,
static struct cpuidle_driver at91_idle_driver = { static struct cpuidle_driver at91_idle_driver = {
.name = "at91_idle", .name = "at91_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states[0] = ARM_CPUIDLE_WFI_STATE, .states[0] = ARM_CPUIDLE_WFI_STATE,
.states[1] = { .states[1] = {
.enter = at91_enter_idle, .enter = at91_enter_idle,
...@@ -61,20 +58,9 @@ static struct cpuidle_driver at91_idle_driver = { ...@@ -61,20 +58,9 @@ static struct cpuidle_driver at91_idle_driver = {
}; };
/* Initialize CPU idle by registering the idle states */ /* Initialize CPU idle by registering the idle states */
static int at91_init_cpuidle(void) static int __init at91_init_cpuidle(void)
{ {
struct cpuidle_device *device; return cpuidle_register(&at91_idle_driver, NULL);
device = &per_cpu(at91_cpuidle_device, smp_processor_id());
device->state_count = AT91_MAX_STATES;
cpuidle_register_driver(&at91_idle_driver);
if (cpuidle_register_device(device)) {
printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
return -EIO;
}
return 0;
} }
device_initcall(at91_init_cpuidle); device_initcall(at91_init_cpuidle);
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#define DAVINCI_CPUIDLE_MAX_STATES 2 #define DAVINCI_CPUIDLE_MAX_STATES 2
static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
static void __iomem *ddr2_reg_base; static void __iomem *ddr2_reg_base;
static bool ddr2_pdown; static bool ddr2_pdown;
...@@ -50,14 +49,10 @@ static void davinci_save_ddr_power(int enter, bool pdown) ...@@ -50,14 +49,10 @@ static void davinci_save_ddr_power(int enter, bool pdown)
/* Actual code that puts the SoC in different idle states */ /* Actual code that puts the SoC in different idle states */
static int davinci_enter_idle(struct cpuidle_device *dev, static int davinci_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv, int index)
int index)
{ {
davinci_save_ddr_power(1, ddr2_pdown); davinci_save_ddr_power(1, ddr2_pdown);
cpu_do_idle();
index = cpuidle_wrap_enter(dev, drv, index,
arm_cpuidle_simple_enter);
davinci_save_ddr_power(0, ddr2_pdown); davinci_save_ddr_power(0, ddr2_pdown);
return index; return index;
...@@ -66,7 +61,6 @@ static int davinci_enter_idle(struct cpuidle_device *dev, ...@@ -66,7 +61,6 @@ static int davinci_enter_idle(struct cpuidle_device *dev,
static struct cpuidle_driver davinci_idle_driver = { static struct cpuidle_driver davinci_idle_driver = {
.name = "cpuidle-davinci", .name = "cpuidle-davinci",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states[0] = ARM_CPUIDLE_WFI_STATE, .states[0] = ARM_CPUIDLE_WFI_STATE,
.states[1] = { .states[1] = {
.enter = davinci_enter_idle, .enter = davinci_enter_idle,
...@@ -81,12 +75,8 @@ static struct cpuidle_driver davinci_idle_driver = { ...@@ -81,12 +75,8 @@ static struct cpuidle_driver davinci_idle_driver = {
static int __init davinci_cpuidle_probe(struct platform_device *pdev) static int __init davinci_cpuidle_probe(struct platform_device *pdev)
{ {
int ret;
struct cpuidle_device *device;
struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
if (!pdata) { if (!pdata) {
dev_err(&pdev->dev, "cannot get platform data\n"); dev_err(&pdev->dev, "cannot get platform data\n");
return -ENOENT; return -ENOENT;
...@@ -96,20 +86,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev) ...@@ -96,20 +86,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
ddr2_pdown = pdata->ddr2_pdown; ddr2_pdown = pdata->ddr2_pdown;
ret = cpuidle_register_driver(&davinci_idle_driver); return cpuidle_register(&davinci_idle_driver, NULL);
if (ret) {
dev_err(&pdev->dev, "failed to register driver\n");
return ret;
}
ret = cpuidle_register_device(device);
if (ret) {
dev_err(&pdev->dev, "failed to register device\n");
cpuidle_unregister_driver(&davinci_idle_driver);
return ret;
}
return 0;
} }
static struct platform_driver davinci_cpuidle_driver = { static struct platform_driver davinci_cpuidle_driver = {
......
...@@ -58,7 +58,6 @@ static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device); ...@@ -58,7 +58,6 @@ static DEFINE_PER_CPU(struct cpuidle_device, exynos4_cpuidle_device);
static struct cpuidle_driver exynos4_idle_driver = { static struct cpuidle_driver exynos4_idle_driver = {
.name = "exynos4_idle", .name = "exynos4_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
}; };
/* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */ /* Ext-GIC nIRQ/nFIQ is the only wakeup source in AFTR */
......
...@@ -30,7 +30,7 @@ obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o ...@@ -30,7 +30,7 @@ obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o
obj-$(CONFIG_CPU_FREQ_IMX) += cpufreq.o obj-$(CONFIG_CPU_FREQ_IMX) += cpufreq.o
ifeq ($(CONFIG_CPU_IDLE),y) ifeq ($(CONFIG_CPU_IDLE),y)
obj-y += cpuidle.o obj-$(CONFIG_SOC_IMX5) += cpuidle-imx5.o
obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o obj-$(CONFIG_SOC_IMX6Q) += cpuidle-imx6q.o
endif endif
......
/*
* Copyright (C) 2012 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cpuidle.h>
#include <linux/module.h>
#include <asm/system_misc.h>
static int imx5_cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
arm_pm_idle();
return index;
}
static struct cpuidle_driver imx5_cpuidle_driver = {
.name = "imx5_cpuidle",
.owner = THIS_MODULE,
.states[0] = {
.enter = imx5_cpuidle_enter,
.exit_latency = 2,
.target_residency = 1,
.flags = CPUIDLE_FLAG_TIME_VALID,
.name = "IMX5 SRPG",
.desc = "CPU state retained,powered off",
},
.state_count = 1,
};
int __init imx5_cpuidle_init(void)
{
return cpuidle_register(&imx5_cpuidle_driver, NULL);
}
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/clockchips.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/cpuidle.h> #include <asm/cpuidle.h>
...@@ -21,10 +20,6 @@ static DEFINE_SPINLOCK(master_lock); ...@@ -21,10 +20,6 @@ static DEFINE_SPINLOCK(master_lock);
static int imx6q_enter_wait(struct cpuidle_device *dev, static int imx6q_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
int cpu = dev->cpu;
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
if (atomic_inc_return(&master) == num_online_cpus()) { if (atomic_inc_return(&master) == num_online_cpus()) {
/* /*
* With this lock, we prevent other cpu to exit and enter * With this lock, we prevent other cpu to exit and enter
...@@ -43,26 +38,13 @@ static int imx6q_enter_wait(struct cpuidle_device *dev, ...@@ -43,26 +38,13 @@ static int imx6q_enter_wait(struct cpuidle_device *dev,
cpu_do_idle(); cpu_do_idle();
done: done:
atomic_dec(&master); atomic_dec(&master);
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
return index; return index;
} }
/*
* For each cpu, setup the broadcast timer because local timer
* stops for the states other than WFI.
*/
static void imx6q_setup_broadcast_timer(void *arg)
{
int cpu = smp_processor_id();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
}
static struct cpuidle_driver imx6q_cpuidle_driver = { static struct cpuidle_driver imx6q_cpuidle_driver = {
.name = "imx6q_cpuidle", .name = "imx6q_cpuidle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states = { .states = {
/* WFI */ /* WFI */
ARM_CPUIDLE_WFI_STATE, ARM_CPUIDLE_WFI_STATE,
...@@ -70,7 +52,8 @@ static struct cpuidle_driver imx6q_cpuidle_driver = { ...@@ -70,7 +52,8 @@ static struct cpuidle_driver imx6q_cpuidle_driver = {
{ {
.exit_latency = 50, .exit_latency = 50,
.target_residency = 75, .target_residency = 75,
.flags = CPUIDLE_FLAG_TIME_VALID, .flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_TIMER_STOP,
.enter = imx6q_enter_wait, .enter = imx6q_enter_wait,
.name = "WAIT", .name = "WAIT",
.desc = "Clock off", .desc = "Clock off",
...@@ -88,8 +71,5 @@ int __init imx6q_cpuidle_init(void) ...@@ -88,8 +71,5 @@ int __init imx6q_cpuidle_init(void)
/* Set chicken bit to get a reliable WAIT mode support */ /* Set chicken bit to get a reliable WAIT mode support */
imx6q_set_chicken_bit(); imx6q_set_chicken_bit();
/* Configure the broadcast timer on each cpu */ return cpuidle_register(&imx6q_cpuidle_driver, NULL);
on_each_cpu(imx6q_setup_broadcast_timer, NULL, 1);
return imx_cpuidle_init(&imx6q_cpuidle_driver);
} }
/*
* Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2012 Linaro Ltd.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/cpuidle.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
static struct cpuidle_device __percpu * imx_cpuidle_devices;
static void __init imx_cpuidle_devices_uninit(void)
{
int cpu_id;
struct cpuidle_device *dev;
for_each_possible_cpu(cpu_id) {
dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id);
cpuidle_unregister_device(dev);
}
free_percpu(imx_cpuidle_devices);
}
int __init imx_cpuidle_init(struct cpuidle_driver *drv)
{
struct cpuidle_device *dev;
int cpu_id, ret;
if (drv->state_count > CPUIDLE_STATE_MAX) {
pr_err("%s: state_count exceeds maximum\n", __func__);
return -EINVAL;
}
ret = cpuidle_register_driver(drv);
if (ret) {
pr_err("%s: Failed to register cpuidle driver with error: %d\n",
__func__, ret);
return ret;
}
imx_cpuidle_devices = alloc_percpu(struct cpuidle_device);
if (imx_cpuidle_devices == NULL) {
ret = -ENOMEM;
goto unregister_drv;
}
/* initialize state data for each cpuidle_device */
for_each_possible_cpu(cpu_id) {
dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id);
dev->cpu = cpu_id;
dev->state_count = drv->state_count;
ret = cpuidle_register_device(dev);
if (ret) {
pr_err("%s: Failed to register cpu %u, error: %d\n",
__func__, cpu_id, ret);
goto uninit;
}
}
return 0;
uninit:
imx_cpuidle_devices_uninit();
unregister_drv:
cpuidle_unregister_driver(drv);
return ret;
}
...@@ -10,18 +10,16 @@ ...@@ -10,18 +10,16 @@
* http://www.gnu.org/copyleft/gpl.html * http://www.gnu.org/copyleft/gpl.html
*/ */
#include <linux/cpuidle.h>
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
extern int imx_cpuidle_init(struct cpuidle_driver *drv); extern int imx5_cpuidle_init(void);
extern int imx6q_cpuidle_init(void); extern int imx6q_cpuidle_init(void);
#else #else
static inline int imx_cpuidle_init(struct cpuidle_driver *drv) static inline int imx5_cpuidle_init(void)
{ {
return -ENODEV; return 0;
} }
static inline int imx6q_cpuidle_init(void) static inline int imx6q_cpuidle_init(void)
{ {
return -ENODEV; return 0;
} }
#endif #endif
...@@ -149,33 +149,6 @@ static void imx5_pm_idle(void) ...@@ -149,33 +149,6 @@ static void imx5_pm_idle(void)
imx5_cpu_do_idle(); imx5_cpu_do_idle();
} }
static int imx5_cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
int ret;
ret = imx5_cpu_do_idle();
if (ret < 0)
return ret;
return idx;
}
static struct cpuidle_driver imx5_cpuidle_driver = {
.name = "imx5_cpuidle",
.owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states[0] = {
.enter = imx5_cpuidle_enter,
.exit_latency = 2,
.target_residency = 1,
.flags = CPUIDLE_FLAG_TIME_VALID,
.name = "IMX5 SRPG",
.desc = "CPU state retained,powered off",
},
.state_count = 1,
};
static int __init imx5_pm_common_init(void) static int __init imx5_pm_common_init(void)
{ {
int ret; int ret;
...@@ -193,8 +166,7 @@ static int __init imx5_pm_common_init(void) ...@@ -193,8 +166,7 @@ static int __init imx5_pm_common_init(void)
/* Set the registers to the default cpu idle state. */ /* Set the registers to the default cpu idle state. */
mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE); mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE);
imx_cpuidle_init(&imx5_cpuidle_driver); return imx5_cpuidle_init();
return 0;
} }
void __init imx51_pm_init(void) void __init imx51_pm_init(void)
......
...@@ -249,7 +249,6 @@ extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); ...@@ -249,7 +249,6 @@ extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
extern int omap4_finish_suspend(unsigned long cpu_state); extern int omap4_finish_suspend(unsigned long cpu_state);
extern void omap4_cpu_resume(void); extern void omap4_cpu_resume(void);
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
extern u32 omap4_mpuss_read_prev_context_state(void);
#else #else
static inline int omap4_enter_lowpower(unsigned int cpu, static inline int omap4_enter_lowpower(unsigned int cpu,
unsigned int power_state) unsigned int power_state)
...@@ -277,10 +276,6 @@ static inline int omap4_finish_suspend(unsigned long cpu_state) ...@@ -277,10 +276,6 @@ static inline int omap4_finish_suspend(unsigned long cpu_state)
static inline void omap4_cpu_resume(void) static inline void omap4_cpu_resume(void)
{} {}
static inline u32 omap4_mpuss_read_prev_context_state(void)
{
return 0;
}
#endif #endif
struct omap_sdrc_params; struct omap_sdrc_params;
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <asm/cpuidle.h>
#include "powerdomain.h" #include "powerdomain.h"
#include "clockdomain.h" #include "clockdomain.h"
...@@ -99,11 +100,15 @@ static struct omap3_idle_statedata omap3_idle_data[] = { ...@@ -99,11 +100,15 @@ static struct omap3_idle_statedata omap3_idle_data[] = {
}, },
}; };
/* Private functions */ /**
* omap3_enter_idle - Programs OMAP3 to enter the specified state
static int __omap3_enter_idle(struct cpuidle_device *dev, * @dev: cpuidle device
struct cpuidle_driver *drv, * @drv: cpuidle driver
int index) * @index: the index of state to be entered
*/
static int omap3_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{ {
struct omap3_idle_statedata *cx = &omap3_idle_data[index]; struct omap3_idle_statedata *cx = &omap3_idle_data[index];
...@@ -148,22 +153,6 @@ static int __omap3_enter_idle(struct cpuidle_device *dev, ...@@ -148,22 +153,6 @@ static int __omap3_enter_idle(struct cpuidle_device *dev,
return index; return index;
} }
/**
* omap3_enter_idle - Programs OMAP3 to enter the specified state
* @dev: cpuidle device
* @drv: cpuidle driver
* @index: the index of state to be entered
*
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
static inline int omap3_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
{
return cpuidle_wrap_enter(dev, drv, index, __omap3_enter_idle);
}
/** /**
* next_valid_state - Find next valid C-state * next_valid_state - Find next valid C-state
* @dev: cpuidle device * @dev: cpuidle device
...@@ -271,11 +260,9 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, ...@@ -271,11 +260,9 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
return ret; return ret;
} }
static DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
static struct cpuidle_driver omap3_idle_driver = { static struct cpuidle_driver omap3_idle_driver = {
.name = "omap3_idle", .name = "omap3_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.states = { .states = {
{ {
.enter = omap3_enter_idle_bm, .enter = omap3_enter_idle_bm,
...@@ -348,8 +335,6 @@ static struct cpuidle_driver omap3_idle_driver = { ...@@ -348,8 +335,6 @@ static struct cpuidle_driver omap3_idle_driver = {
*/ */
int __init omap3_idle_init(void) int __init omap3_idle_init(void)
{ {
struct cpuidle_device *dev;
mpu_pd = pwrdm_lookup("mpu_pwrdm"); mpu_pd = pwrdm_lookup("mpu_pwrdm");
core_pd = pwrdm_lookup("core_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm");
per_pd = pwrdm_lookup("per_pwrdm"); per_pd = pwrdm_lookup("per_pwrdm");
...@@ -358,16 +343,5 @@ int __init omap3_idle_init(void) ...@@ -358,16 +343,5 @@ int __init omap3_idle_init(void)
if (!mpu_pd || !core_pd || !per_pd || !cam_pd) if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
return -ENODEV; return -ENODEV;
cpuidle_register_driver(&omap3_idle_driver); return cpuidle_register(&omap3_idle_driver, NULL);
dev = &per_cpu(omap3_idle_dev, smp_processor_id());
dev->cpu = 0;
if (cpuidle_register_device(dev)) {
printk(KERN_ERR "%s: CPUidle register device failed\n",
__func__);
return -EIO;
}
return 0;
} }
/* /*
* OMAP4 CPU idle Routines * OMAP4+ CPU idle Routines
* *
* Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011-2013 Texas Instruments, Inc.
* Santosh Shilimkar <santosh.shilimkar@ti.com> * Santosh Shilimkar <santosh.shilimkar@ti.com>
* Rajendra Nayak <rnayak@ti.com> * Rajendra Nayak <rnayak@ti.com>
* *
...@@ -14,8 +14,8 @@ ...@@ -14,8 +14,8 @@
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpu_pm.h> #include <linux/cpu_pm.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/clockchips.h>
#include <asm/cpuidle.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
#include "common.h" #include "common.h"
...@@ -24,13 +24,13 @@ ...@@ -24,13 +24,13 @@
#include "clockdomain.h" #include "clockdomain.h"
/* Machine specific information */ /* Machine specific information */
struct omap4_idle_statedata { struct idle_statedata {
u32 cpu_state; u32 cpu_state;
u32 mpu_logic_state; u32 mpu_logic_state;
u32 mpu_state; u32 mpu_state;
}; };
static struct omap4_idle_statedata omap4_idle_data[] = { static struct idle_statedata omap4_idle_data[] = {
{ {
.cpu_state = PWRDM_POWER_ON, .cpu_state = PWRDM_POWER_ON,
.mpu_state = PWRDM_POWER_ON, .mpu_state = PWRDM_POWER_ON,
...@@ -53,11 +53,12 @@ static struct clockdomain *cpu_clkdm[NR_CPUS]; ...@@ -53,11 +53,12 @@ static struct clockdomain *cpu_clkdm[NR_CPUS];
static atomic_t abort_barrier; static atomic_t abort_barrier;
static bool cpu_done[NR_CPUS]; static bool cpu_done[NR_CPUS];
static struct idle_statedata *state_ptr = &omap4_idle_data[0];
/* Private functions */ /* Private functions */
/** /**
* omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
* @dev: cpuidle device * @dev: cpuidle device
* @drv: cpuidle driver * @drv: cpuidle driver
* @index: the index of state to be entered * @index: the index of state to be entered
...@@ -66,7 +67,7 @@ static bool cpu_done[NR_CPUS]; ...@@ -66,7 +67,7 @@ static bool cpu_done[NR_CPUS];
* specified low power state selected by the governor. * specified low power state selected by the governor.
* Returns the amount of time spent in the low power state. * Returns the amount of time spent in the low power state.
*/ */
static int omap4_enter_idle_simple(struct cpuidle_device *dev, static int omap_enter_idle_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
int index) int index)
{ {
...@@ -77,12 +78,11 @@ static int omap4_enter_idle_simple(struct cpuidle_device *dev, ...@@ -77,12 +78,11 @@ static int omap4_enter_idle_simple(struct cpuidle_device *dev,
return index; return index;
} }
static int omap4_enter_idle_coupled(struct cpuidle_device *dev, static int omap_enter_idle_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
int index) int index)
{ {
struct omap4_idle_statedata *cx = &omap4_idle_data[index]; struct idle_statedata *cx = state_ptr + index;
int cpu_id = smp_processor_id();
local_fiq_disable(); local_fiq_disable();
...@@ -109,8 +109,6 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -109,8 +109,6 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
} }
} }
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
/* /*
* Call idle CPU PM enter notifier chain so that * Call idle CPU PM enter notifier chain so that
* VFP and per CPU interrupt context is saved. * VFP and per CPU interrupt context is saved.
...@@ -149,11 +147,10 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -149,11 +147,10 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
* Call idle CPU cluster PM exit notifier chain * Call idle CPU cluster PM exit notifier chain
* to restore GIC and wakeupgen context. * to restore GIC and wakeupgen context.
*/ */
if (omap4_mpuss_read_prev_context_state()) if ((cx->mpu_state == PWRDM_POWER_RET) &&
(cx->mpu_logic_state == PWRDM_POWER_OFF))
cpu_cluster_pm_exit(); cpu_cluster_pm_exit();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
fail: fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier); cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
cpu_done[dev->cpu] = false; cpu_done[dev->cpu] = false;
...@@ -163,49 +160,38 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev, ...@@ -163,49 +160,38 @@ static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
return index; return index;
} }
/*
* For each cpu, setup the broadcast timer because local timers
* stops for the states above C1.
*/
static void omap_setup_broadcast_timer(void *arg)
{
int cpu = smp_processor_id();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
}
static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
static struct cpuidle_driver omap4_idle_driver = { static struct cpuidle_driver omap4_idle_driver = {
.name = "omap4_idle", .name = "omap4_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states = { .states = {
{ {
/* C1 - CPU0 ON + CPU1 ON + MPU ON */ /* C1 - CPU0 ON + CPU1 ON + MPU ON */
.exit_latency = 2 + 2, .exit_latency = 2 + 2,
.target_residency = 5, .target_residency = 5,
.flags = CPUIDLE_FLAG_TIME_VALID, .flags = CPUIDLE_FLAG_TIME_VALID,
.enter = omap4_enter_idle_simple, .enter = omap_enter_idle_simple,
.name = "C1", .name = "C1",
.desc = "MPUSS ON" .desc = "CPUx ON, MPUSS ON"
}, },
{ {
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
.exit_latency = 328 + 440, .exit_latency = 328 + 440,
.target_residency = 960, .target_residency = 960,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
.enter = omap4_enter_idle_coupled, CPUIDLE_FLAG_TIMER_STOP,
.enter = omap_enter_idle_coupled,
.name = "C2", .name = "C2",
.desc = "MPUSS CSWR", .desc = "CPUx OFF, MPUSS CSWR",
}, },
{ {
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
.exit_latency = 460 + 518, .exit_latency = 460 + 518,
.target_residency = 1100, .target_residency = 1100,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
.enter = omap4_enter_idle_coupled, CPUIDLE_FLAG_TIMER_STOP,
.enter = omap_enter_idle_coupled,
.name = "C3", .name = "C3",
.desc = "MPUSS OSWR", .desc = "CPUx OFF, MPUSS OSWR",
}, },
}, },
.state_count = ARRAY_SIZE(omap4_idle_data), .state_count = ARRAY_SIZE(omap4_idle_data),
...@@ -215,16 +201,13 @@ static struct cpuidle_driver omap4_idle_driver = { ...@@ -215,16 +201,13 @@ static struct cpuidle_driver omap4_idle_driver = {
/* Public functions */ /* Public functions */
/** /**
* omap4_idle_init - Init routine for OMAP4 idle * omap4_idle_init - Init routine for OMAP4+ idle
* *
* Registers the OMAP4 specific cpuidle driver to the cpuidle * Registers the OMAP4+ specific cpuidle driver to the cpuidle
* framework with the valid set of states. * framework with the valid set of states.
*/ */
int __init omap4_idle_init(void) int __init omap4_idle_init(void)
{ {
struct cpuidle_device *dev;
unsigned int cpu_id = 0;
mpu_pd = pwrdm_lookup("mpu_pwrdm"); mpu_pd = pwrdm_lookup("mpu_pwrdm");
cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
...@@ -236,22 +219,5 @@ int __init omap4_idle_init(void) ...@@ -236,22 +219,5 @@ int __init omap4_idle_init(void)
if (!cpu_clkdm[0] || !cpu_clkdm[1]) if (!cpu_clkdm[0] || !cpu_clkdm[1])
return -ENODEV; return -ENODEV;
/* Configure the broadcast timer on each cpu */ return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
for_each_cpu(cpu_id, cpu_online_mask) {
dev = &per_cpu(omap4_idle_dev, cpu_id);
dev->cpu = cpu_id;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
dev->coupled_cpus = *cpu_online_mask;
#endif
cpuidle_register_driver(&omap4_idle_driver);
if (cpuidle_register_device(dev)) {
pr_err("%s: CPUidle register failed\n", __func__);
return -EIO;
}
}
return 0;
} }
...@@ -139,20 +139,6 @@ static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id) ...@@ -139,20 +139,6 @@ static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
} }
} }
/**
* omap4_mpuss_read_prev_context_state:
* Function returns the MPUSS previous context state
*/
u32 omap4_mpuss_read_prev_context_state(void)
{
u32 reg;
reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
return reg;
}
/* /*
* Store the CPU cluster state for L2X0 low power operations. * Store the CPU cluster state for L2X0 low power operations.
*/ */
......
...@@ -40,12 +40,9 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev, ...@@ -40,12 +40,9 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
return index; return index;
} }
static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
static struct cpuidle_driver s3c64xx_cpuidle_driver = { static struct cpuidle_driver s3c64xx_cpuidle_driver = {
.name = "s3c64xx_cpuidle", .name = "s3c64xx_cpuidle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states = { .states = {
{ {
.enter = s3c64xx_enter_idle, .enter = s3c64xx_enter_idle,
...@@ -61,16 +58,6 @@ static struct cpuidle_driver s3c64xx_cpuidle_driver = { ...@@ -61,16 +58,6 @@ static struct cpuidle_driver s3c64xx_cpuidle_driver = {
static int __init s3c64xx_init_cpuidle(void) static int __init s3c64xx_init_cpuidle(void)
{ {
int ret; return cpuidle_register(&s3c64xx_cpuidle_driver, NULL);
cpuidle_register_driver(&s3c64xx_cpuidle_driver);
ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
if (ret) {
pr_err("Failed to register cpuidle device: %d\n", ret);
return ret;
}
return 0;
} }
device_initcall(s3c64xx_init_cpuidle); device_initcall(s3c64xx_init_cpuidle);
...@@ -16,39 +16,22 @@ ...@@ -16,39 +16,22 @@
#include <asm/cpuidle.h> #include <asm/cpuidle.h>
#include <asm/io.h> #include <asm/io.h>
int shmobile_enter_wfi(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int index)
{
cpu_do_idle();
return 0;
}
static struct cpuidle_device shmobile_cpuidle_dev;
static struct cpuidle_driver shmobile_cpuidle_default_driver = { static struct cpuidle_driver shmobile_cpuidle_default_driver = {
.name = "shmobile_cpuidle", .name = "shmobile_cpuidle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states[0] = ARM_CPUIDLE_WFI_STATE, .states[0] = ARM_CPUIDLE_WFI_STATE,
.states[0].enter = shmobile_enter_wfi,
.safe_state_index = 0, /* C1 */ .safe_state_index = 0, /* C1 */
.state_count = 1, .state_count = 1,
}; };
static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver; static struct cpuidle_driver *cpuidle_drv = &shmobile_cpuidle_default_driver;
void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv) void __init shmobile_cpuidle_set_driver(struct cpuidle_driver *drv)
{ {
cpuidle_drv = drv; cpuidle_drv = drv;
} }
int shmobile_cpuidle_init(void) int __init shmobile_cpuidle_init(void)
{ {
struct cpuidle_device *dev = &shmobile_cpuidle_dev; return cpuidle_register(cpuidle_drv, NULL);
cpuidle_register_driver(cpuidle_drv);
dev->state_count = cpuidle_drv->state_count;
cpuidle_register_device(dev);
return 0;
} }
...@@ -13,9 +13,6 @@ extern int shmobile_clk_init(void); ...@@ -13,9 +13,6 @@ extern int shmobile_clk_init(void);
extern void shmobile_handle_irq_intc(struct pt_regs *); extern void shmobile_handle_irq_intc(struct pt_regs *);
extern struct platform_suspend_ops shmobile_suspend_ops; extern struct platform_suspend_ops shmobile_suspend_ops;
struct cpuidle_driver; struct cpuidle_driver;
struct cpuidle_device;
extern int shmobile_enter_wfi(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv); extern void shmobile_cpuidle_set_driver(struct cpuidle_driver *drv);
extern void sh7372_init_irq(void); extern void sh7372_init_irq(void);
......
...@@ -410,11 +410,9 @@ static int sh7372_enter_a4s(struct cpuidle_device *dev, ...@@ -410,11 +410,9 @@ static int sh7372_enter_a4s(struct cpuidle_device *dev,
static struct cpuidle_driver sh7372_cpuidle_driver = { static struct cpuidle_driver sh7372_cpuidle_driver = {
.name = "sh7372_cpuidle", .name = "sh7372_cpuidle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.state_count = 5, .state_count = 5,
.safe_state_index = 0, /* C1 */ .safe_state_index = 0, /* C1 */
.states[0] = ARM_CPUIDLE_WFI_STATE, .states[0] = ARM_CPUIDLE_WFI_STATE,
.states[0].enter = shmobile_enter_wfi,
.states[1] = { .states[1] = {
.name = "C2", .name = "C2",
.desc = "Core Standby Mode", .desc = "Core Standby Mode",
...@@ -450,12 +448,12 @@ static struct cpuidle_driver sh7372_cpuidle_driver = { ...@@ -450,12 +448,12 @@ static struct cpuidle_driver sh7372_cpuidle_driver = {
}, },
}; };
static void sh7372_cpuidle_init(void) static void __init sh7372_cpuidle_init(void)
{ {
shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver); shmobile_cpuidle_set_driver(&sh7372_cpuidle_driver);
} }
#else #else
static void sh7372_cpuidle_init(void) {} static void __init sh7372_cpuidle_init(void) {}
#endif #endif
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
......
...@@ -23,39 +23,13 @@ ...@@ -23,39 +23,13 @@
static struct cpuidle_driver tegra_idle_driver = { static struct cpuidle_driver tegra_idle_driver = {
.name = "tegra_idle", .name = "tegra_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.state_count = 1, .state_count = 1,
.states = { .states = {
[0] = ARM_CPUIDLE_WFI_STATE_PWR(600), [0] = ARM_CPUIDLE_WFI_STATE_PWR(600),
}, },
}; };
static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
int __init tegra114_cpuidle_init(void) int __init tegra114_cpuidle_init(void)
{ {
int ret; return cpuidle_register(&tegra_idle_driver, NULL);
unsigned int cpu;
struct cpuidle_device *dev;
struct cpuidle_driver *drv = &tegra_idle_driver;
ret = cpuidle_register_driver(&tegra_idle_driver);
if (ret) {
pr_err("CPUidle driver registration failed\n");
return ret;
}
for_each_possible_cpu(cpu) {
dev = &per_cpu(tegra_idle_device, cpu);
dev->cpu = cpu;
dev->state_count = drv->state_count;
ret = cpuidle_register_device(dev);
if (ret) {
pr_err("CPU%u: CPUidle device registration failed\n",
cpu);
return ret;
}
}
return 0;
} }
...@@ -43,32 +43,33 @@ static atomic_t abort_barrier; ...@@ -43,32 +43,33 @@ static atomic_t abort_barrier;
static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
int index); int index);
#define TEGRA20_MAX_STATES 2
#else
#define TEGRA20_MAX_STATES 1
#endif #endif
static struct cpuidle_state tegra_idle_states[] = {
[0] = ARM_CPUIDLE_WFI_STATE_PWR(600),
#ifdef CONFIG_PM_SLEEP
[1] = {
.enter = tegra20_idle_lp2_coupled,
.exit_latency = 5000,
.target_residency = 10000,
.power_usage = 0,
.flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_COUPLED,
.name = "powered-down",
.desc = "CPU power gated",
},
#endif
};
static struct cpuidle_driver tegra_idle_driver = { static struct cpuidle_driver tegra_idle_driver = {
.name = "tegra_idle", .name = "tegra_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1, .states = {
ARM_CPUIDLE_WFI_STATE_PWR(600),
#ifdef CONFIG_PM_SLEEP
{
.enter = tegra20_idle_lp2_coupled,
.exit_latency = 5000,
.target_residency = 10000,
.power_usage = 0,
.flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_COUPLED,
.name = "powered-down",
.desc = "CPU power gated",
},
#endif
},
.state_count = TEGRA20_MAX_STATES,
.safe_state_index = 0,
}; };
static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE); static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
...@@ -217,39 +218,8 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev, ...@@ -217,39 +218,8 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
int __init tegra20_cpuidle_init(void) int __init tegra20_cpuidle_init(void)
{ {
int ret;
unsigned int cpu;
struct cpuidle_device *dev;
struct cpuidle_driver *drv = &tegra_idle_driver;
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
tegra_tear_down_cpu = tegra20_tear_down_cpu; tegra_tear_down_cpu = tegra20_tear_down_cpu;
#endif #endif
return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
drv->state_count = ARRAY_SIZE(tegra_idle_states);
memcpy(drv->states, tegra_idle_states,
drv->state_count * sizeof(drv->states[0]));
ret = cpuidle_register_driver(&tegra_idle_driver);
if (ret) {
pr_err("CPUidle driver registration failed\n");
return ret;
}
for_each_possible_cpu(cpu) {
dev = &per_cpu(tegra_idle_device, cpu);
dev->cpu = cpu;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
dev->coupled_cpus = *cpu_possible_mask;
#endif
dev->state_count = drv->state_count;
ret = cpuidle_register_device(dev);
if (ret) {
pr_err("CPU%u: CPUidle device registration failed\n",
cpu);
return ret;
}
}
return 0;
} }
...@@ -43,7 +43,6 @@ static int tegra30_idle_lp2(struct cpuidle_device *dev, ...@@ -43,7 +43,6 @@ static int tegra30_idle_lp2(struct cpuidle_device *dev,
static struct cpuidle_driver tegra_idle_driver = { static struct cpuidle_driver tegra_idle_driver = {
.name = "tegra_idle", .name = "tegra_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
.state_count = 2, .state_count = 2,
#else #else
...@@ -65,8 +64,6 @@ static struct cpuidle_driver tegra_idle_driver = { ...@@ -65,8 +64,6 @@ static struct cpuidle_driver tegra_idle_driver = {
}, },
}; };
static DEFINE_PER_CPU(struct cpuidle_device, tegra_idle_device);
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev, static bool tegra30_cpu_cluster_power_down(struct cpuidle_device *dev,
struct cpuidle_driver *drv, struct cpuidle_driver *drv,
...@@ -157,32 +154,8 @@ static int tegra30_idle_lp2(struct cpuidle_device *dev, ...@@ -157,32 +154,8 @@ static int tegra30_idle_lp2(struct cpuidle_device *dev,
int __init tegra30_cpuidle_init(void) int __init tegra30_cpuidle_init(void)
{ {
int ret;
unsigned int cpu;
struct cpuidle_device *dev;
struct cpuidle_driver *drv = &tegra_idle_driver;
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
tegra_tear_down_cpu = tegra30_tear_down_cpu; tegra_tear_down_cpu = tegra30_tear_down_cpu;
#endif #endif
return cpuidle_register(&tegra_idle_driver, NULL);
ret = cpuidle_register_driver(&tegra_idle_driver);
if (ret) {
pr_err("CPUidle driver registration failed\n");
return ret;
}
for_each_possible_cpu(cpu) {
dev = &per_cpu(tegra_idle_device, cpu);
dev->cpu = cpu;
dev->state_count = drv->state_count;
ret = cpuidle_register_device(dev);
if (ret) {
pr_err("CPU%u: CPUidle device registration failed\n",
cpu);
return ret;
}
}
return 0;
} }
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/clockchips.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/smp.h> #include <linux/smp.h>
...@@ -22,7 +21,6 @@ ...@@ -22,7 +21,6 @@
static atomic_t master = ATOMIC_INIT(0); static atomic_t master = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(master_lock); static DEFINE_SPINLOCK(master_lock);
static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device);
static inline int ux500_enter_idle(struct cpuidle_device *dev, static inline int ux500_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
...@@ -30,8 +28,6 @@ static inline int ux500_enter_idle(struct cpuidle_device *dev, ...@@ -30,8 +28,6 @@ static inline int ux500_enter_idle(struct cpuidle_device *dev,
int this_cpu = smp_processor_id(); int this_cpu = smp_processor_id();
bool recouple = false; bool recouple = false;
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu);
if (atomic_inc_return(&master) == num_online_cpus()) { if (atomic_inc_return(&master) == num_online_cpus()) {
/* With this lock, we prevent the other cpu to exit and enter /* With this lock, we prevent the other cpu to exit and enter
...@@ -91,22 +87,20 @@ static inline int ux500_enter_idle(struct cpuidle_device *dev, ...@@ -91,22 +87,20 @@ static inline int ux500_enter_idle(struct cpuidle_device *dev,
spin_unlock(&master_lock); spin_unlock(&master_lock);
} }
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu);
return index; return index;
} }
static struct cpuidle_driver ux500_idle_driver = { static struct cpuidle_driver ux500_idle_driver = {
.name = "ux500_idle", .name = "ux500_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states = { .states = {
ARM_CPUIDLE_WFI_STATE, ARM_CPUIDLE_WFI_STATE,
{ {
.enter = ux500_enter_idle, .enter = ux500_enter_idle,
.exit_latency = 70, .exit_latency = 70,
.target_residency = 260, .target_residency = 260,
.flags = CPUIDLE_FLAG_TIME_VALID, .flags = CPUIDLE_FLAG_TIME_VALID |
CPUIDLE_FLAG_TIMER_STOP,
.name = "ApIdle", .name = "ApIdle",
.desc = "ARM Retention", .desc = "ARM Retention",
}, },
...@@ -115,59 +109,13 @@ static struct cpuidle_driver ux500_idle_driver = { ...@@ -115,59 +109,13 @@ static struct cpuidle_driver ux500_idle_driver = {
.state_count = 2, .state_count = 2,
}; };
/*
* For each cpu, setup the broadcast timer because we will
* need to migrate the timers for the states >= ApIdle.
*/
static void ux500_setup_broadcast_timer(void *arg)
{
int cpu = smp_processor_id();
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
}
int __init ux500_idle_init(void) int __init ux500_idle_init(void)
{ {
int ret, cpu;
struct cpuidle_device *device;
/* Configure wake up reasons */ /* Configure wake up reasons */
prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
PRCMU_WAKEUP(ABB)); PRCMU_WAKEUP(ABB));
/* return cpuidle_register(&ux500_idle_driver, NULL);
* Configure the timer broadcast for each cpu, that must
* be done from the cpu context, so we use a smp cross
* call with 'on_each_cpu'.
*/
on_each_cpu(ux500_setup_broadcast_timer, NULL, 1);
ret = cpuidle_register_driver(&ux500_idle_driver);
if (ret) {
printk(KERN_ERR "failed to register ux500 idle driver\n");
return ret;
}
for_each_online_cpu(cpu) {
device = &per_cpu(ux500_cpuidle_device, cpu);
device->cpu = cpu;
ret = cpuidle_register_device(device);
if (ret) {
printk(KERN_ERR "Failed to register cpuidle "
"device for cpu%d\n", cpu);
goto out_unregister;
}
}
out:
return ret;
out_unregister:
for_each_online_cpu(cpu) {
device = &per_cpu(ux500_cpuidle_device, cpu);
cpuidle_unregister_device(device);
}
cpuidle_unregister_driver(&ux500_idle_driver);
goto out;
} }
device_initcall(ux500_idle_init); device_initcall(ux500_idle_init);
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
#include "pseries.h" #include "pseries.h"
struct cpuidle_driver pseries_idle_driver = { struct cpuidle_driver pseries_idle_driver = {
.name = "pseries_idle", .name = "pseries_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
}; };
#define MAX_IDLE_STATE_COUNT 2 #define MAX_IDLE_STATE_COUNT 2
...@@ -33,10 +33,8 @@ static int max_idle_state = MAX_IDLE_STATE_COUNT - 1; ...@@ -33,10 +33,8 @@ static int max_idle_state = MAX_IDLE_STATE_COUNT - 1;
static struct cpuidle_device __percpu *pseries_cpuidle_devices; static struct cpuidle_device __percpu *pseries_cpuidle_devices;
static struct cpuidle_state *cpuidle_state_table; static struct cpuidle_state *cpuidle_state_table;
static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) static inline void idle_loop_prolog(unsigned long *in_purr)
{ {
*kt_before = ktime_get();
*in_purr = mfspr(SPRN_PURR); *in_purr = mfspr(SPRN_PURR);
/* /*
* Indicate to the HV that we are idle. Now would be * Indicate to the HV that we are idle. Now would be
...@@ -45,12 +43,10 @@ static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before) ...@@ -45,12 +43,10 @@ static inline void idle_loop_prolog(unsigned long *in_purr, ktime_t *kt_before)
get_lppaca()->idle = 1; get_lppaca()->idle = 1;
} }
static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) static inline void idle_loop_epilog(unsigned long in_purr)
{ {
get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr;
get_lppaca()->idle = 0; get_lppaca()->idle = 0;
return ktime_to_us(ktime_sub(ktime_get(), kt_before));
} }
static int snooze_loop(struct cpuidle_device *dev, static int snooze_loop(struct cpuidle_device *dev,
...@@ -58,10 +54,9 @@ static int snooze_loop(struct cpuidle_device *dev, ...@@ -58,10 +54,9 @@ static int snooze_loop(struct cpuidle_device *dev,
int index) int index)
{ {
unsigned long in_purr; unsigned long in_purr;
ktime_t kt_before;
int cpu = dev->cpu; int cpu = dev->cpu;
idle_loop_prolog(&in_purr, &kt_before); idle_loop_prolog(&in_purr);
local_irq_enable(); local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG); set_thread_flag(TIF_POLLING_NRFLAG);
...@@ -75,8 +70,8 @@ static int snooze_loop(struct cpuidle_device *dev, ...@@ -75,8 +70,8 @@ static int snooze_loop(struct cpuidle_device *dev,
clear_thread_flag(TIF_POLLING_NRFLAG); clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb(); smp_mb();
dev->last_residency = idle_loop_epilog(in_purr);
(int)idle_loop_epilog(in_purr, kt_before);
return index; return index;
} }
...@@ -102,9 +97,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, ...@@ -102,9 +97,8 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
int index) int index)
{ {
unsigned long in_purr; unsigned long in_purr;
ktime_t kt_before;
idle_loop_prolog(&in_purr, &kt_before); idle_loop_prolog(&in_purr);
get_lppaca()->donate_dedicated_cpu = 1; get_lppaca()->donate_dedicated_cpu = 1;
ppc64_runlatch_off(); ppc64_runlatch_off();
...@@ -112,8 +106,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev, ...@@ -112,8 +106,9 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
check_and_cede_processor(); check_and_cede_processor();
get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->donate_dedicated_cpu = 0;
dev->last_residency =
(int)idle_loop_epilog(in_purr, kt_before); idle_loop_epilog(in_purr);
return index; return index;
} }
...@@ -122,9 +117,8 @@ static int shared_cede_loop(struct cpuidle_device *dev, ...@@ -122,9 +117,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
int index) int index)
{ {
unsigned long in_purr; unsigned long in_purr;
ktime_t kt_before;
idle_loop_prolog(&in_purr, &kt_before); idle_loop_prolog(&in_purr);
/* /*
* Yield the processor to the hypervisor. We return if * Yield the processor to the hypervisor. We return if
...@@ -135,8 +129,8 @@ static int shared_cede_loop(struct cpuidle_device *dev, ...@@ -135,8 +129,8 @@ static int shared_cede_loop(struct cpuidle_device *dev,
*/ */
check_and_cede_processor(); check_and_cede_processor();
dev->last_residency = idle_loop_epilog(in_purr);
(int)idle_loop_epilog(in_purr, kt_before);
return index; return index;
} }
......
...@@ -14,9 +14,9 @@ struct swsusp_arch_regs { ...@@ -14,9 +14,9 @@ struct swsusp_arch_regs {
void sh_mobile_call_standby(unsigned long mode); void sh_mobile_call_standby(unsigned long mode);
#ifdef CONFIG_CPU_IDLE #ifdef CONFIG_CPU_IDLE
void sh_mobile_setup_cpuidle(void); int sh_mobile_setup_cpuidle(void);
#else #else
static inline void sh_mobile_setup_cpuidle(void) {} static inline int sh_mobile_setup_cpuidle(void) { return 0; }
#endif #endif
/* notifier chains for pre/post sleep hooks */ /* notifier chains for pre/post sleep hooks */
......
...@@ -51,70 +51,53 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev, ...@@ -51,70 +51,53 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
return k; return k;
} }
static struct cpuidle_device cpuidle_dev;
static struct cpuidle_driver cpuidle_driver = { static struct cpuidle_driver cpuidle_driver = {
.name = "sh_idle", .name = "sh_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1, .states = {
{
.exit_latency = 1,
.target_residency = 1 * 2,
.power_usage = 3,
.flags = CPUIDLE_FLAG_TIME_VALID,
.enter = cpuidle_sleep_enter,
.name = "C1",
.desc = "SuperH Sleep Mode",
},
{
.exit_latency = 100,
.target_residency = 1 * 2,
.power_usage = 1,
.flags = CPUIDLE_FLAG_TIME_VALID,
.enter = cpuidle_sleep_enter,
.name = "C2",
.desc = "SuperH Sleep Mode [SF]",
.disabled = true,
},
{
.exit_latency = 2300,
.target_residency = 1 * 2,
.power_usage = 1,
.flags = CPUIDLE_FLAG_TIME_VALID,
.enter = cpuidle_sleep_enter,
.name = "C3",
.desc = "SuperH Mobile Standby Mode [SF]",
.disabled = true,
},
},
.safe_state_index = 0,
.state_count = 3,
}; };
void sh_mobile_setup_cpuidle(void) int __init sh_mobile_setup_cpuidle(void)
{ {
struct cpuidle_device *dev = &cpuidle_dev; int ret;
struct cpuidle_driver *drv = &cpuidle_driver;
struct cpuidle_state *state;
int i;
if (sh_mobile_sleep_supported & SUSP_SH_SF)
cpuidle_driver.states[1].disabled = false;
for (i = 0; i < CPUIDLE_STATE_MAX; i++) { if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
drv->states[i].name[0] = '\0'; cpuidle_driver.states[2].disabled = false;
drv->states[i].desc[0] = '\0';
}
i = CPUIDLE_DRIVER_STATE_START; return cpuidle_register(&cpuidle_driver);
state = &drv->states[i++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
state->exit_latency = 1;
state->target_residency = 1 * 2;
state->power_usage = 3;
state->flags = 0;
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = cpuidle_sleep_enter;
drv->safe_state_index = i-1;
if (sh_mobile_sleep_supported & SUSP_SH_SF) {
state = &drv->states[i++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
strncpy(state->desc, "SuperH Sleep Mode [SF]",
CPUIDLE_DESC_LEN);
state->exit_latency = 100;
state->target_residency = 1 * 2;
state->power_usage = 1;
state->flags = 0;
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = cpuidle_sleep_enter;
}
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
state = &drv->states[i++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
CPUIDLE_DESC_LEN);
state->exit_latency = 2300;
state->target_residency = 1 * 2;
state->power_usage = 1;
state->flags = 0;
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = cpuidle_sleep_enter;
}
drv->state_count = i;
dev->state_count = i;
cpuidle_register_driver(&cpuidle_driver);
cpuidle_register_device(dev);
} }
...@@ -150,8 +150,7 @@ static const struct platform_suspend_ops sh_pm_ops = { ...@@ -150,8 +150,7 @@ static const struct platform_suspend_ops sh_pm_ops = {
static int __init sh_pm_init(void) static int __init sh_pm_init(void)
{ {
suspend_set_ops(&sh_pm_ops); suspend_set_ops(&sh_pm_ops);
sh_mobile_setup_cpuidle(); return sh_mobile_setup_cpuidle();
return 0;
} }
late_initcall(sh_pm_init); late_initcall(sh_pm_init);
...@@ -373,7 +373,6 @@ static int apm_cpu_idle(struct cpuidle_device *dev, ...@@ -373,7 +373,6 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
static struct cpuidle_driver apm_idle_driver = { static struct cpuidle_driver apm_idle_driver = {
.name = "apm_idle", .name = "apm_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states = { .states = {
{ /* entry 0 is for polling */ }, { /* entry 0 is for polling */ },
{ /* entry 1 is for APM idle */ { /* entry 1 is for APM idle */
......
...@@ -918,7 +918,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -918,7 +918,6 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver acpi_idle_driver = { struct cpuidle_driver acpi_idle_driver = {
.name = "acpi_idle", .name = "acpi_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
}; };
/** /**
......
...@@ -39,10 +39,4 @@ config CPU_IDLE_CALXEDA ...@@ -39,10 +39,4 @@ config CPU_IDLE_CALXEDA
help help
Select this to enable cpuidle on Calxeda processors. Select this to enable cpuidle on Calxeda processors.
config CPU_IDLE_KIRKWOOD
bool "CPU Idle Driver for Kirkwood processors"
depends on ARCH_KIRKWOOD
help
Select this to enable cpuidle on Kirkwood processors.
endif endif
...@@ -6,4 +6,4 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/ ...@@ -6,4 +6,4 @@ obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
obj-$(CONFIG_CPU_IDLE_KIRKWOOD) += cpuidle-kirkwood.o obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
/* /*
* Copyright 2012 Calxeda, Inc. * Copyright 2012 Calxeda, Inc.
* *
* Based on arch/arm/plat-mxc/cpuidle.c: * Based on arch/arm/plat-mxc/cpuidle.c: #v3.7
* Copyright 2012 Freescale Semiconductor, Inc. * Copyright 2012 Freescale Semiconductor, Inc.
* Copyright 2012 Linaro Ltd. * Copyright 2012 Linaro Ltd.
* *
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
* *
* You should have received a copy of the GNU General Public License along with * You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>. * this program. If not, see <http://www.gnu.org/licenses/>.
*
* Maintainer: Rob Herring <rob.herring@calxeda.com>
*/ */
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
...@@ -35,8 +37,6 @@ ...@@ -35,8 +37,6 @@
extern void highbank_set_cpu_jump(int cpu, void *jump_addr); extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
extern void *scu_base_addr; extern void *scu_base_addr;
static struct cpuidle_device __percpu *calxeda_idle_cpuidle_devices;
static inline unsigned int get_auxcr(void) static inline unsigned int get_auxcr(void)
{ {
unsigned int val; unsigned int val;
...@@ -85,22 +85,8 @@ static int calxeda_pwrdown_idle(struct cpuidle_device *dev, ...@@ -85,22 +85,8 @@ static int calxeda_pwrdown_idle(struct cpuidle_device *dev,
return index; return index;
} }
static void calxeda_idle_cpuidle_devices_uninit(void)
{
int i;
struct cpuidle_device *dev;
for_each_possible_cpu(i) {
dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, i);
cpuidle_unregister_device(dev);
}
free_percpu(calxeda_idle_cpuidle_devices);
}
static struct cpuidle_driver calxeda_idle_driver = { static struct cpuidle_driver calxeda_idle_driver = {
.name = "calxeda_idle", .name = "calxeda_idle",
.en_core_tk_irqen = 1,
.states = { .states = {
ARM_CPUIDLE_WFI_STATE, ARM_CPUIDLE_WFI_STATE,
{ {
...@@ -118,44 +104,9 @@ static struct cpuidle_driver calxeda_idle_driver = { ...@@ -118,44 +104,9 @@ static struct cpuidle_driver calxeda_idle_driver = {
static int __init calxeda_cpuidle_init(void) static int __init calxeda_cpuidle_init(void)
{ {
int cpu_id;
int ret;
struct cpuidle_device *dev;
struct cpuidle_driver *drv = &calxeda_idle_driver;
if (!of_machine_is_compatible("calxeda,highbank")) if (!of_machine_is_compatible("calxeda,highbank"))
return -ENODEV; return -ENODEV;
ret = cpuidle_register_driver(drv); return cpuidle_register(&calxeda_idle_driver, NULL);
if (ret)
return ret;
calxeda_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
if (calxeda_idle_cpuidle_devices == NULL) {
ret = -ENOMEM;
goto unregister_drv;
}
/* initialize state data for each cpuidle_device */
for_each_possible_cpu(cpu_id) {
dev = per_cpu_ptr(calxeda_idle_cpuidle_devices, cpu_id);
dev->cpu = cpu_id;
dev->state_count = drv->state_count;
ret = cpuidle_register_device(dev);
if (ret) {
pr_err("Failed to register cpu %u, error: %d\n",
cpu_id, ret);
goto uninit;
}
}
return 0;
uninit:
calxeda_idle_cpuidle_devices_uninit();
unregister_drv:
cpuidle_unregister_driver(drv);
return ret;
} }
module_init(calxeda_cpuidle_init); module_init(calxeda_cpuidle_init);
/* /*
* arch/arm/mach-kirkwood/cpuidle.c
*
* CPU idle Marvell Kirkwood SoCs * CPU idle Marvell Kirkwood SoCs
* *
* This file is licensed under the terms of the GNU General Public * This file is licensed under the terms of the GNU General Public
...@@ -11,6 +9,9 @@ ...@@ -11,6 +9,9 @@
* to implement two idle states - * to implement two idle states -
* #1 wait-for-interrupt * #1 wait-for-interrupt
* #2 wait-for-interrupt and DDR self refresh * #2 wait-for-interrupt and DDR self refresh
*
* Maintainer: Jason Cooper <jason@lakedaemon.net>
* Maintainer: Andrew Lunn <andrew@lunn.ch>
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -41,7 +42,6 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev, ...@@ -41,7 +42,6 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev,
static struct cpuidle_driver kirkwood_idle_driver = { static struct cpuidle_driver kirkwood_idle_driver = {
.name = "kirkwood_idle", .name = "kirkwood_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
.states[0] = ARM_CPUIDLE_WFI_STATE, .states[0] = ARM_CPUIDLE_WFI_STATE,
.states[1] = { .states[1] = {
.enter = kirkwood_enter_idle, .enter = kirkwood_enter_idle,
...@@ -53,9 +53,6 @@ static struct cpuidle_driver kirkwood_idle_driver = { ...@@ -53,9 +53,6 @@ static struct cpuidle_driver kirkwood_idle_driver = {
}, },
.state_count = KIRKWOOD_MAX_STATES, .state_count = KIRKWOOD_MAX_STATES,
}; };
static struct cpuidle_device *device;
static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
/* Initialize CPU idle by registering the idle states */ /* Initialize CPU idle by registering the idle states */
static int kirkwood_cpuidle_probe(struct platform_device *pdev) static int kirkwood_cpuidle_probe(struct platform_device *pdev)
...@@ -66,26 +63,16 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev) ...@@ -66,26 +63,16 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
if (res == NULL) if (res == NULL)
return -EINVAL; return -EINVAL;
ddr_operation_base = devm_request_and_ioremap(&pdev->dev, res); ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
if (!ddr_operation_base) if (IS_ERR(ddr_operation_base))
return -EADDRNOTAVAIL; return PTR_ERR(ddr_operation_base);
device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); return cpuidle_register(&kirkwood_idle_driver, NULL);
device->state_count = KIRKWOOD_MAX_STATES;
cpuidle_register_driver(&kirkwood_idle_driver);
if (cpuidle_register_device(device)) {
pr_err("kirkwood_init_cpuidle: Failed registering\n");
return -EIO;
}
return 0;
} }
int kirkwood_cpuidle_remove(struct platform_device *pdev) int kirkwood_cpuidle_remove(struct platform_device *pdev)
{ {
cpuidle_unregister_device(device); cpuidle_unregister(&kirkwood_idle_driver);
cpuidle_unregister_driver(&kirkwood_idle_driver);
return 0; return 0;
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* This code is licenced under the GPL. * This code is licenced under the GPL.
*/ */
#include <linux/clockchips.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -23,6 +24,7 @@ ...@@ -23,6 +24,7 @@
#include "cpuidle.h" #include "cpuidle.h"
DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
DEFINE_PER_CPU(struct cpuidle_device, cpuidle_dev);
DEFINE_MUTEX(cpuidle_lock); DEFINE_MUTEX(cpuidle_lock);
LIST_HEAD(cpuidle_detected_devices); LIST_HEAD(cpuidle_detected_devices);
...@@ -42,24 +44,6 @@ void disable_cpuidle(void) ...@@ -42,24 +44,6 @@ void disable_cpuidle(void)
static int __cpuidle_register_device(struct cpuidle_device *dev); static int __cpuidle_register_device(struct cpuidle_device *dev);
static inline int cpuidle_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct cpuidle_state *target_state = &drv->states[index];
return target_state->enter(dev, drv, index);
}
static inline int cpuidle_enter_tk(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
return cpuidle_wrap_enter(dev, drv, index, cpuidle_enter);
}
typedef int (*cpuidle_enter_t)(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static cpuidle_enter_t cpuidle_enter_ops;
/** /**
* cpuidle_play_dead - cpu off-lining * cpuidle_play_dead - cpu off-lining
* *
...@@ -89,11 +73,27 @@ int cpuidle_play_dead(void) ...@@ -89,11 +73,27 @@ int cpuidle_play_dead(void)
* @next_state: index into drv->states of the state to enter * @next_state: index into drv->states of the state to enter
*/ */
int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int next_state) int index)
{ {
int entered_state; int entered_state;
entered_state = cpuidle_enter_ops(dev, drv, next_state); struct cpuidle_state *target_state = &drv->states[index];
ktime_t time_start, time_end;
s64 diff;
time_start = ktime_get();
entered_state = target_state->enter(dev, drv, index);
time_end = ktime_get();
local_irq_enable();
diff = ktime_to_us(ktime_sub(time_end, time_start));
if (diff > INT_MAX)
diff = INT_MAX;
dev->last_residency = (int) diff;
if (entered_state >= 0) { if (entered_state >= 0) {
/* Update cpuidle counters */ /* Update cpuidle counters */
...@@ -146,12 +146,20 @@ int cpuidle_idle_call(void) ...@@ -146,12 +146,20 @@ int cpuidle_idle_call(void)
trace_cpu_idle_rcuidle(next_state, dev->cpu); trace_cpu_idle_rcuidle(next_state, dev->cpu);
if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
&dev->cpu);
if (cpuidle_state_is_coupled(dev, drv, next_state)) if (cpuidle_state_is_coupled(dev, drv, next_state))
entered_state = cpuidle_enter_state_coupled(dev, drv, entered_state = cpuidle_enter_state_coupled(dev, drv,
next_state); next_state);
else else
entered_state = cpuidle_enter_state(dev, drv, next_state); entered_state = cpuidle_enter_state(dev, drv, next_state);
if (drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP)
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
&dev->cpu);
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
/* give the governor an opportunity to reflect on the outcome */ /* give the governor an opportunity to reflect on the outcome */
...@@ -222,37 +230,6 @@ void cpuidle_resume(void) ...@@ -222,37 +230,6 @@ void cpuidle_resume(void)
mutex_unlock(&cpuidle_lock); mutex_unlock(&cpuidle_lock);
} }
/**
* cpuidle_wrap_enter - performs timekeeping and irqen around enter function
* @dev: pointer to a valid cpuidle_device object
* @drv: pointer to a valid cpuidle_driver object
* @index: index of the target cpuidle state.
*/
int cpuidle_wrap_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index,
int (*enter)(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index))
{
ktime_t time_start, time_end;
s64 diff;
time_start = ktime_get();
index = enter(dev, drv, index);
time_end = ktime_get();
local_irq_enable();
diff = ktime_to_us(ktime_sub(time_end, time_start));
if (diff > INT_MAX)
diff = INT_MAX;
dev->last_residency = (int) diff;
return index;
}
#ifdef CONFIG_ARCH_HAS_CPU_RELAX #ifdef CONFIG_ARCH_HAS_CPU_RELAX
static int poll_idle(struct cpuidle_device *dev, static int poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
...@@ -324,9 +301,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev) ...@@ -324,9 +301,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return ret; return ret;
} }
cpuidle_enter_ops = drv->en_core_tk_irqen ?
cpuidle_enter_tk : cpuidle_enter;
poll_idle_init(drv); poll_idle_init(drv);
ret = cpuidle_add_device_sysfs(dev); ret = cpuidle_add_device_sysfs(dev);
...@@ -480,6 +454,77 @@ void cpuidle_unregister_device(struct cpuidle_device *dev) ...@@ -480,6 +454,77 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_unregister_device); EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
/**
* cpuidle_unregister: unregister a driver and the devices. This function
* can be used only if the driver has been previously registered through
* the cpuidle_register function.
*
* @drv: a valid pointer to a struct cpuidle_driver
*/
void cpuidle_unregister(struct cpuidle_driver *drv)
{
int cpu;
struct cpuidle_device *device;
for_each_possible_cpu(cpu) {
device = &per_cpu(cpuidle_dev, cpu);
cpuidle_unregister_device(device);
}
cpuidle_unregister_driver(drv);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister);
/**
* cpuidle_register: registers the driver and the cpu devices with the
* coupled_cpus passed as parameter. This function is used for all common
* initialization pattern there are in the arch specific drivers. The
* devices is globally defined in this file.
*
* @drv : a valid pointer to a struct cpuidle_driver
* @coupled_cpus: a cpumask for the coupled states
*
* Returns 0 on success, < 0 otherwise
*/
int cpuidle_register(struct cpuidle_driver *drv,
const struct cpumask *const coupled_cpus)
{
int ret, cpu;
struct cpuidle_device *device;
ret = cpuidle_register_driver(drv);
if (ret) {
pr_err("failed to register cpuidle driver\n");
return ret;
}
for_each_possible_cpu(cpu) {
device = &per_cpu(cpuidle_dev, cpu);
device->cpu = cpu;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
/*
* On multiplatform for ARM, the coupled idle states could
* enabled in the kernel even if the cpuidle driver does not
* use it. Note, coupled_cpus is a struct copy.
*/
if (coupled_cpus)
device->coupled_cpus = *coupled_cpus;
#endif
ret = cpuidle_register_device(device);
if (!ret)
continue;
pr_err("Failed to register cpuidle device for cpu%d\n", cpu);
cpuidle_unregister(drv);
break;
}
return ret;
}
EXPORT_SYMBOL_GPL(cpuidle_register);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void smp_callback(void *v) static void smp_callback(void *v)
......
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpumask.h>
#include <linux/clockchips.h>
#include "cpuidle.h" #include "cpuidle.h"
...@@ -19,9 +21,28 @@ DEFINE_SPINLOCK(cpuidle_driver_lock); ...@@ -19,9 +21,28 @@ DEFINE_SPINLOCK(cpuidle_driver_lock);
static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu); static void __cpuidle_set_cpu_driver(struct cpuidle_driver *drv, int cpu);
static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu); static struct cpuidle_driver * __cpuidle_get_cpu_driver(int cpu);
static void __cpuidle_driver_init(struct cpuidle_driver *drv) static void cpuidle_setup_broadcast_timer(void *arg)
{ {
int cpu = smp_processor_id();
clockevents_notify((long)(arg), &cpu);
}
static void __cpuidle_driver_init(struct cpuidle_driver *drv, int cpu)
{
int i;
drv->refcnt = 0; drv->refcnt = 0;
for (i = drv->state_count - 1; i >= 0 ; i--) {
if (!(drv->states[i].flags & CPUIDLE_FLAG_TIMER_STOP))
continue;
drv->bctimer = 1;
on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);
break;
}
} }
static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
...@@ -35,7 +56,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu) ...@@ -35,7 +56,7 @@ static int __cpuidle_register_driver(struct cpuidle_driver *drv, int cpu)
if (__cpuidle_get_cpu_driver(cpu)) if (__cpuidle_get_cpu_driver(cpu))
return -EBUSY; return -EBUSY;
__cpuidle_driver_init(drv); __cpuidle_driver_init(drv, cpu);
__cpuidle_set_cpu_driver(drv, cpu); __cpuidle_set_cpu_driver(drv, cpu);
...@@ -49,6 +70,12 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu) ...@@ -49,6 +70,12 @@ static void __cpuidle_unregister_driver(struct cpuidle_driver *drv, int cpu)
if (!WARN_ON(drv->refcnt > 0)) if (!WARN_ON(drv->refcnt > 0))
__cpuidle_set_cpu_driver(NULL, cpu); __cpuidle_set_cpu_driver(NULL, cpu);
if (drv->bctimer) {
drv->bctimer = 0;
on_each_cpu_mask(get_cpu_mask(cpu), cpuidle_setup_broadcast_timer,
(void *)CLOCK_EVT_NOTIFY_BROADCAST_OFF, 1);
}
} }
#ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS #ifdef CONFIG_CPU_IDLE_MULTIPLE_DRIVERS
......
...@@ -71,7 +71,6 @@ ...@@ -71,7 +71,6 @@
static struct cpuidle_driver intel_idle_driver = { static struct cpuidle_driver intel_idle_driver = {
.name = "intel_idle", .name = "intel_idle",
.owner = THIS_MODULE, .owner = THIS_MODULE,
.en_core_tk_irqen = 1,
}; };
/* intel_idle.max_cstate=0 disables driver */ /* intel_idle.max_cstate=0 disables driver */
static int max_cstate = CPUIDLE_STATE_MAX - 1; static int max_cstate = CPUIDLE_STATE_MAX - 1;
...@@ -339,7 +338,6 @@ static int intel_idle(struct cpuidle_device *dev, ...@@ -339,7 +338,6 @@ static int intel_idle(struct cpuidle_device *dev,
if (!(lapic_timer_reliable_states & (1 << (cstate)))) if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
stop_critical_timings();
if (!need_resched()) { if (!need_resched()) {
__monitor((void *)&current_thread_info()->flags, 0, 0); __monitor((void *)&current_thread_info()->flags, 0, 0);
...@@ -348,8 +346,6 @@ static int intel_idle(struct cpuidle_device *dev, ...@@ -348,8 +346,6 @@ static int intel_idle(struct cpuidle_device *dev,
__mwait(eax, ecx); __mwait(eax, ecx);
} }
start_critical_timings();
if (!(lapic_timer_reliable_states & (1 << (cstate)))) if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
......
...@@ -8,6 +8,20 @@ ...@@ -8,6 +8,20 @@
#ifndef _LINUX_CLOCKCHIPS_H #ifndef _LINUX_CLOCKCHIPS_H
#define _LINUX_CLOCKCHIPS_H #define _LINUX_CLOCKCHIPS_H
/* Clock event notification values */
enum clock_event_nofitiers {
CLOCK_EVT_NOTIFY_ADD,
CLOCK_EVT_NOTIFY_BROADCAST_ON,
CLOCK_EVT_NOTIFY_BROADCAST_OFF,
CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
CLOCK_EVT_NOTIFY_SUSPEND,
CLOCK_EVT_NOTIFY_RESUME,
CLOCK_EVT_NOTIFY_CPU_DYING,
CLOCK_EVT_NOTIFY_CPU_DEAD,
};
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD #ifdef CONFIG_GENERIC_CLOCKEVENTS_BUILD
#include <linux/clocksource.h> #include <linux/clocksource.h>
...@@ -26,20 +40,6 @@ enum clock_event_mode { ...@@ -26,20 +40,6 @@ enum clock_event_mode {
CLOCK_EVT_MODE_RESUME, CLOCK_EVT_MODE_RESUME,
}; };
/* Clock event notification values */
enum clock_event_nofitiers {
CLOCK_EVT_NOTIFY_ADD,
CLOCK_EVT_NOTIFY_BROADCAST_ON,
CLOCK_EVT_NOTIFY_BROADCAST_OFF,
CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
CLOCK_EVT_NOTIFY_SUSPEND,
CLOCK_EVT_NOTIFY_RESUME,
CLOCK_EVT_NOTIFY_CPU_DYING,
CLOCK_EVT_NOTIFY_CPU_DEAD,
};
/* /*
* Clock event features * Clock event features
*/ */
...@@ -173,7 +173,7 @@ extern int tick_receive_broadcast(void); ...@@ -173,7 +173,7 @@ extern int tick_receive_broadcast(void);
#ifdef CONFIG_GENERIC_CLOCKEVENTS #ifdef CONFIG_GENERIC_CLOCKEVENTS
extern void clockevents_notify(unsigned long reason, void *arg); extern void clockevents_notify(unsigned long reason, void *arg);
#else #else
# define clockevents_notify(reason, arg) do { } while (0) static inline void clockevents_notify(unsigned long reason, void *arg) {}
#endif #endif
#else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */ #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
...@@ -181,7 +181,7 @@ extern void clockevents_notify(unsigned long reason, void *arg); ...@@ -181,7 +181,7 @@ extern void clockevents_notify(unsigned long reason, void *arg);
static inline void clockevents_suspend(void) {} static inline void clockevents_suspend(void) {}
static inline void clockevents_resume(void) {} static inline void clockevents_resume(void) {}
#define clockevents_notify(reason, arg) do { } while (0) static inline void clockevents_notify(unsigned long reason, void *arg) {}
#endif #endif
......
...@@ -57,6 +57,7 @@ struct cpuidle_state { ...@@ -57,6 +57,7 @@ struct cpuidle_state {
/* Idle State Flags */ /* Idle State Flags */
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */ #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
...@@ -104,8 +105,8 @@ struct cpuidle_driver { ...@@ -104,8 +105,8 @@ struct cpuidle_driver {
struct module *owner; struct module *owner;
int refcnt; int refcnt;
/* set to 1 to use the core cpuidle time keeping (for all states). */ /* used by the cpuidle framework to setup the broadcast timer */
unsigned int en_core_tk_irqen:1; unsigned int bctimer:1;
/* states array must be ordered in decreasing power consumption */ /* states array must be ordered in decreasing power consumption */
struct cpuidle_state states[CPUIDLE_STATE_MAX]; struct cpuidle_state states[CPUIDLE_STATE_MAX];
int state_count; int state_count;
...@@ -122,17 +123,15 @@ extern void cpuidle_driver_unref(void); ...@@ -122,17 +123,15 @@ extern void cpuidle_driver_unref(void);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev); extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev); extern void cpuidle_unregister_device(struct cpuidle_device *dev);
extern int cpuidle_register(struct cpuidle_driver *drv,
const struct cpumask *const coupled_cpus);
extern void cpuidle_unregister(struct cpuidle_driver *drv);
extern void cpuidle_pause_and_lock(void); extern void cpuidle_pause_and_lock(void);
extern void cpuidle_resume_and_unlock(void); extern void cpuidle_resume_and_unlock(void);
extern void cpuidle_pause(void); extern void cpuidle_pause(void);
extern void cpuidle_resume(void); extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev); extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev); extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_wrap_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index,
int (*enter)(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index));
extern int cpuidle_play_dead(void); extern int cpuidle_play_dead(void);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
...@@ -151,7 +150,10 @@ static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } ...@@ -151,7 +150,10 @@ static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev) static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; } {return -ENODEV; }
static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { } static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
static inline int cpuidle_register(struct cpuidle_driver *drv,
const struct cpumask *const coupled_cpus)
{return -ENODEV; }
static inline void cpuidle_unregister(struct cpuidle_driver *drv) { }
static inline void cpuidle_pause_and_lock(void) { } static inline void cpuidle_pause_and_lock(void) { }
static inline void cpuidle_resume_and_unlock(void) { } static inline void cpuidle_resume_and_unlock(void) { }
static inline void cpuidle_pause(void) { } static inline void cpuidle_pause(void) { }
...@@ -159,11 +161,6 @@ static inline void cpuidle_resume(void) { } ...@@ -159,11 +161,6 @@ static inline void cpuidle_resume(void) { }
static inline int cpuidle_enable_device(struct cpuidle_device *dev) static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; } {return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_wrap_enter(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index,
int (*enter)(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index))
{ return -ENODEV; }
static inline int cpuidle_play_dead(void) {return -ENODEV; } static inline int cpuidle_play_dead(void) {return -ENODEV; }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment