Commit 7e69f2b1 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Linus Torvalds

[PATCH] clocksource: Remove the update callback

The clocksource code allows direct updates of the rating of a given
clocksource now.  Change TSC unstable tracking to use this interface and
remove the update callback.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: john stultz <johnstul@us.ibm.com>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 877fe380
...@@ -60,12 +60,6 @@ static inline int check_tsc_unstable(void) ...@@ -60,12 +60,6 @@ static inline int check_tsc_unstable(void)
return tsc_unstable; return tsc_unstable;
} }
void mark_tsc_unstable(void)
{
tsc_unstable = 1;
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
/* Accellerators for sched_clock() /* Accellerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits) * convert from cycles(64bits) => nanoseconds (64bits)
* basic equation: * basic equation:
...@@ -295,7 +289,6 @@ core_initcall(cpufreq_tsc); ...@@ -295,7 +289,6 @@ core_initcall(cpufreq_tsc);
/* clock source code */ /* clock source code */
static unsigned long current_tsc_khz = 0; static unsigned long current_tsc_khz = 0;
static int tsc_update_callback(void);
static cycle_t read_tsc(void) static cycle_t read_tsc(void)
{ {
...@@ -313,37 +306,28 @@ static struct clocksource clocksource_tsc = { ...@@ -313,37 +306,28 @@ static struct clocksource clocksource_tsc = {
.mask = CLOCKSOURCE_MASK(64), .mask = CLOCKSOURCE_MASK(64),
.mult = 0, /* to be set */ .mult = 0, /* to be set */
.shift = 22, .shift = 22,
.update_callback = tsc_update_callback,
.flags = CLOCK_SOURCE_IS_CONTINUOUS | .flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY, CLOCK_SOURCE_MUST_VERIFY,
}; };
static int tsc_update_callback(void) void mark_tsc_unstable(void)
{ {
int change = 0; if (!tsc_unstable) {
tsc_unstable = 1;
/* check to see if we should switch to the safe clocksource: */ /* Can be called before registration */
if (clocksource_tsc.rating != 0 && check_tsc_unstable()) { if (clocksource_tsc.mult)
clocksource_change_rating(&clocksource_tsc, 0); clocksource_change_rating(&clocksource_tsc, 0);
change = 1; else
} clocksource_tsc.rating = 0;
/* only update if tsc_khz has changed: */
if (current_tsc_khz != tsc_khz) {
current_tsc_khz = tsc_khz;
clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
clocksource_tsc.shift);
change = 1;
} }
return change;
} }
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d) static int __init dmi_mark_tsc_unstable(struct dmi_system_id *d)
{ {
printk(KERN_NOTICE "%s detected: marking TSC unstable.\n", printk(KERN_NOTICE "%s detected: marking TSC unstable.\n",
d->ident); d->ident);
mark_tsc_unstable(); tsc_unstable = 1;
return 0; return 0;
} }
...@@ -415,11 +399,12 @@ __cpuinit int unsynchronized_tsc(void) ...@@ -415,11 +399,12 @@ __cpuinit int unsynchronized_tsc(void)
* Intel systems are normally all synchronized. * Intel systems are normally all synchronized.
* Exceptions must mark TSC as unstable: * Exceptions must mark TSC as unstable:
*/ */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
return 0;
/* assume multi socket systems are not synchronized: */ /* assume multi socket systems are not synchronized: */
return num_possible_cpus() > 1; if (num_possible_cpus() > 1)
tsc_unstable = 1;
}
return tsc_unstable;
} }
static int __init init_tsc_clocksource(void) static int __init init_tsc_clocksource(void)
...@@ -429,8 +414,7 @@ static int __init init_tsc_clocksource(void) ...@@ -429,8 +414,7 @@ static int __init init_tsc_clocksource(void)
/* check blacklist */ /* check blacklist */
dmi_check_system(bad_tsc_dmi_table); dmi_check_system(bad_tsc_dmi_table);
if (unsynchronized_tsc()) /* mark unstable if unsynced */ unsynchronized_tsc();
mark_tsc_unstable();
current_tsc_khz = tsc_khz; current_tsc_khz = tsc_khz;
clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz, clocksource_tsc.mult = clocksource_khz2mult(current_tsc_khz,
clocksource_tsc.shift); clocksource_tsc.shift);
......
...@@ -44,7 +44,6 @@ typedef u64 cycle_t; ...@@ -44,7 +44,6 @@ typedef u64 cycle_t;
* subtraction of non 64 bit counters * subtraction of non 64 bit counters
* @mult: cycle to nanosecond multiplier * @mult: cycle to nanosecond multiplier
* @shift: cycle to nanosecond divisor (power of two) * @shift: cycle to nanosecond divisor (power of two)
* @update_callback: called when safe to alter clocksource values
* @flags: flags describing special properties * @flags: flags describing special properties
* @cycle_interval: Used internally by timekeeping core, please ignore. * @cycle_interval: Used internally by timekeeping core, please ignore.
* @xtime_interval: Used internally by timekeeping core, please ignore. * @xtime_interval: Used internally by timekeeping core, please ignore.
...@@ -57,7 +56,6 @@ struct clocksource { ...@@ -57,7 +56,6 @@ struct clocksource {
cycle_t mask; cycle_t mask;
u32 mult; u32 mult;
u32 shift; u32 shift;
int (*update_callback)(void);
unsigned long flags; unsigned long flags;
/* timekeeping specific data, ignore */ /* timekeeping specific data, ignore */
......
...@@ -848,8 +848,6 @@ static int change_clocksource(void) ...@@ -848,8 +848,6 @@ static int change_clocksource(void)
printk(KERN_INFO "Time: %s clocksource has been installed.\n", printk(KERN_INFO "Time: %s clocksource has been installed.\n",
clock->name); clock->name);
return 1; return 1;
} else if (clock->update_callback) {
return clock->update_callback();
} }
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment