Commit 1489939f authored by john stultz's avatar john stultz Committed by Linus Torvalds

[PATCH] time: x86_64: convert x86_64 to use GENERIC_TIME

This patch converts x86_64 to use the GENERIC_TIME infrastructure and adds
clocksource structures for both TSC and HPET (ACPI PM is shared w/ i386).

[akpm@osdl.org: fix printk timestamps]
[akpm@osdl.org: fix printk ckeanups]
[akpm@osdl.org: hpet build fix]
Signed-off-by: default avatarJohn Stultz <johnstul@us.ibm.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andi Kleen <ak@muc.de>
Cc: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c37e7bb5
...@@ -24,6 +24,10 @@ config X86 ...@@ -24,6 +24,10 @@ config X86
bool bool
default y default y
config GENERIC_TIME
bool
default y
config ZONE_DMA32 config ZONE_DMA32
bool bool
default y default y
......
...@@ -786,7 +786,7 @@ static void setup_APIC_timer(unsigned int clocks) ...@@ -786,7 +786,7 @@ static void setup_APIC_timer(unsigned int clocks)
/* Turn off PIT interrupt if we use APIC timer as main timer. /* Turn off PIT interrupt if we use APIC timer as main timer.
Only works with the PM timer right now Only works with the PM timer right now
TBD fix it for HPET too. */ TBD fix it for HPET too. */
if (vxtime.mode == VXTIME_PMTMR && if ((pmtmr_ioport != 0) &&
smp_processor_id() == boot_cpu_id && smp_processor_id() == boot_cpu_id &&
apic_runs_main_timer == 1 && apic_runs_main_timer == 1 &&
!cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) { !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
......
...@@ -21,12 +21,6 @@ unsigned long hpet_tick; /* HPET clocks / interrupt */ ...@@ -21,12 +21,6 @@ unsigned long hpet_tick; /* HPET clocks / interrupt */
int hpet_use_timer; /* Use counter of hpet for time keeping, int hpet_use_timer; /* Use counter of hpet for time keeping,
* otherwise PIT * otherwise PIT
*/ */
unsigned int do_gettimeoffset_hpet(void)
{
/* cap counter read to one tick to avoid inconsistencies */
unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
return (min(counter,hpet_tick) * vxtime.quot) >> US_SCALE;
}
#ifdef CONFIG_HPET #ifdef CONFIG_HPET
static __init int late_hpet_init(void) static __init int late_hpet_init(void)
...@@ -451,3 +445,61 @@ static int __init nohpet_setup(char *s) ...@@ -451,3 +445,61 @@ static int __init nohpet_setup(char *s)
__setup("nohpet", nohpet_setup); __setup("nohpet", nohpet_setup);
#define HPET_MASK 0xFFFFFFFF
#define HPET_SHIFT 22
/* FSEC = 10^-15 NSEC = 10^-9 */
#define FSEC_PER_NSEC 1000000
static void *hpet_ptr;
static cycle_t read_hpet(void)
{
return (cycle_t)readl(hpet_ptr);
}
struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
.read = read_hpet,
.mask = (cycle_t)HPET_MASK,
.mult = 0, /* set below */
.shift = HPET_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init init_hpet_clocksource(void)
{
unsigned long hpet_period;
void __iomem *hpet_base;
u64 tmp;
if (!hpet_address)
return -ENODEV;
/* calculate the hpet address: */
hpet_base = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
hpet_ptr = hpet_base + HPET_COUNTER;
/* calculate the frequency: */
hpet_period = readl(hpet_base + HPET_PERIOD);
/*
* hpet period is in femto seconds per cycle
* so we need to convert this to ns/cyc units
* aproximated by mult/2^shift
*
* fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
* fsec/cyc * 1ns/1000000fsec * 2^shift = mult
* fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
* (fsec/cyc << shift)/1000000 = mult
* (hpet_period << shift)/FSEC_PER_NSEC = mult
*/
tmp = (u64)hpet_period << HPET_SHIFT;
do_div(tmp, FSEC_PER_NSEC);
clocksource_hpet.mult = (u32)tmp;
return clocksource_register(&clocksource_hpet);
}
module_init(init_hpet_clocksource);
...@@ -24,15 +24,6 @@ ...@@ -24,15 +24,6 @@
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/vsyscall.h> #include <asm/vsyscall.h>
/* The I/O port the PMTMR resides at.
* The location is detected during setup_arch(),
* in arch/i386/kernel/acpi/boot.c */
u32 pmtmr_ioport __read_mostly;
/* value of the Power timer at last timer interrupt */
static u32 offset_delay;
static u32 last_pmtmr_tick;
#define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */ #define ACPI_PM_MASK 0xFFFFFF /* limit it to 24 bits */
static inline u32 cyc2us(u32 cycles) static inline u32 cyc2us(u32 cycles)
...@@ -48,38 +39,6 @@ static inline u32 cyc2us(u32 cycles) ...@@ -48,38 +39,6 @@ static inline u32 cyc2us(u32 cycles)
return (cycles >> 10); return (cycles >> 10);
} }
int pmtimer_mark_offset(void)
{
static int first_run = 1;
unsigned long tsc;
u32 lost;
u32 tick = inl(pmtmr_ioport);
u32 delta;
delta = cyc2us((tick - last_pmtmr_tick) & ACPI_PM_MASK);
last_pmtmr_tick = tick;
monotonic_base += delta * NSEC_PER_USEC;
delta += offset_delay;
lost = delta / (USEC_PER_SEC / HZ);
offset_delay = delta % (USEC_PER_SEC / HZ);
rdtscll(tsc);
vxtime.last_tsc = tsc - offset_delay * (u64)cpu_khz / 1000;
/* don't calculate delay for first run,
or if we've got less then a tick */
if (first_run || (lost < 1)) {
first_run = 0;
offset_delay = 0;
}
return lost - 1;
}
static unsigned pmtimer_wait_tick(void) static unsigned pmtimer_wait_tick(void)
{ {
u32 a, b; u32 a, b;
...@@ -101,23 +60,6 @@ void pmtimer_wait(unsigned us) ...@@ -101,23 +60,6 @@ void pmtimer_wait(unsigned us)
} while (cyc2us(b - a) < us); } while (cyc2us(b - a) < us);
} }
void pmtimer_resume(void)
{
last_pmtmr_tick = inl(pmtmr_ioport);
}
unsigned int do_gettimeoffset_pm(void)
{
u32 now, offset, delta = 0;
offset = last_pmtmr_tick;
now = inl(pmtmr_ioport);
delta = (now - offset) & ACPI_PM_MASK;
return offset_delay + cyc2us(delta);
}
static int __init nopmtimer_setup(char *s) static int __init nopmtimer_setup(char *s)
{ {
pmtmr_ioport = 0; pmtmr_ioport = 0;
......
...@@ -982,7 +982,6 @@ void __init smp_cpus_done(unsigned int max_cpus) ...@@ -982,7 +982,6 @@ void __init smp_cpus_done(unsigned int max_cpus)
smp_cleanup_boot(); smp_cleanup_boot();
setup_ioapic_dest(); setup_ioapic_dest();
check_nmi_watchdog(); check_nmi_watchdog();
time_init_gtod();
} }
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
......
This diff is collapsed.
...@@ -9,32 +9,11 @@ ...@@ -9,32 +9,11 @@
#include <asm/timex.h> #include <asm/timex.h>
int notsc __initdata = 0; static int notsc __initdata = 0;
unsigned int cpu_khz; /* TSC clocks / usec, not used here */ unsigned int cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz); EXPORT_SYMBOL(cpu_khz);
/*
* do_gettimeoffset() returns microseconds since last timer interrupt was
* triggered by hardware. A memory read of HPET is slower than a register read
* of TSC, but much more reliable. It's also synchronized to the timer
* interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
* timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
* This is not a problem, because jiffies hasn't updated either. They are bound
* together by xtime_lock.
*/
unsigned int do_gettimeoffset_tsc(void)
{
unsigned long t;
unsigned long x;
t = get_cycles_sync();
if (t < vxtime.last_tsc)
t = vxtime.last_tsc; /* hack */
x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> US_SCALE;
return x;
}
static unsigned int cyc2ns_scale __read_mostly; static unsigned int cyc2ns_scale __read_mostly;
void set_cyc2ns_scale(unsigned long khz) void set_cyc2ns_scale(unsigned long khz)
...@@ -42,7 +21,7 @@ void set_cyc2ns_scale(unsigned long khz) ...@@ -42,7 +21,7 @@ void set_cyc2ns_scale(unsigned long khz)
cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz; cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
} }
unsigned long long cycles_2_ns(unsigned long long cyc) static unsigned long long cycles_2_ns(unsigned long long cyc)
{ {
return (cyc * cyc2ns_scale) >> NS_SCALE; return (cyc * cyc2ns_scale) >> NS_SCALE;
} }
...@@ -61,6 +40,12 @@ unsigned long long sched_clock(void) ...@@ -61,6 +40,12 @@ unsigned long long sched_clock(void)
return cycles_2_ns(a); return cycles_2_ns(a);
} }
static int tsc_unstable;
static inline int check_tsc_unstable(void)
{
return tsc_unstable;
}
#ifdef CONFIG_CPU_FREQ #ifdef CONFIG_CPU_FREQ
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
...@@ -89,24 +74,6 @@ static void handle_cpufreq_delayed_get(struct work_struct *v) ...@@ -89,24 +74,6 @@ static void handle_cpufreq_delayed_get(struct work_struct *v)
cpufreq_delayed_issched = 0; cpufreq_delayed_issched = 0;
} }
/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
* to verify the CPU frequency the timing core thinks the CPU is running
* at is still correct.
*/
void cpufreq_delayed_get(void)
{
static int warned;
if (cpufreq_init && !cpufreq_delayed_issched) {
cpufreq_delayed_issched = 1;
if (!warned) {
warned = 1;
printk(KERN_DEBUG "Losing some ticks... "
"checking if CPU frequency changed.\n");
}
schedule_work(&cpufreq_delayed_get_work);
}
}
static unsigned int ref_freq = 0; static unsigned int ref_freq = 0;
static unsigned long loops_per_jiffy_ref = 0; static unsigned long loops_per_jiffy_ref = 0;
...@@ -142,7 +109,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, ...@@ -142,7 +109,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new); cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
if (!(freq->flags & CPUFREQ_CONST_LOOPS)) if (!(freq->flags & CPUFREQ_CONST_LOOPS))
vxtime.tsc_quot = (USEC_PER_MSEC << US_SCALE) / cpu_khz; mark_tsc_unstable();
} }
set_cyc2ns_scale(cpu_khz_ref); set_cyc2ns_scale(cpu_khz_ref);
...@@ -169,12 +136,6 @@ core_initcall(cpufreq_tsc); ...@@ -169,12 +136,6 @@ core_initcall(cpufreq_tsc);
static int tsc_unstable = 0; static int tsc_unstable = 0;
void mark_tsc_unstable(void)
{
tsc_unstable = 1;
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
/* /*
* Make an educated guess if the TSC is trustworthy and synchronized * Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs. * over all CPUs.
...@@ -210,3 +171,49 @@ int __init notsc_setup(char *s) ...@@ -210,3 +171,49 @@ int __init notsc_setup(char *s)
} }
__setup("notsc", notsc_setup); __setup("notsc", notsc_setup);
/* clock source code: */
static cycle_t read_tsc(void)
{
cycle_t ret = (cycle_t)get_cycles_sync();
return ret;
}
static struct clocksource clocksource_tsc = {
.name = "tsc",
.rating = 300,
.read = read_tsc,
.mask = CLOCKSOURCE_MASK(64),
.shift = 22,
.flags = CLOCK_SOURCE_IS_CONTINUOUS |
CLOCK_SOURCE_MUST_VERIFY,
};
void mark_tsc_unstable(void)
{
if (!tsc_unstable) {
tsc_unstable = 1;
/* Change only the rating, when not registered */
if (clocksource_tsc.mult)
clocksource_change_rating(&clocksource_tsc, 0);
else
clocksource_tsc.rating = 0;
}
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
static int __init init_tsc_clocksource(void)
{
if (!notsc) {
clocksource_tsc.mult = clocksource_khz2mult(cpu_khz,
clocksource_tsc.shift);
if (check_tsc_unstable())
clocksource_tsc.rating = 0;
return clocksource_register(&clocksource_tsc);
}
return 0;
}
module_init(init_tsc_clocksource);
...@@ -117,7 +117,7 @@ __setup("hcheck_reboot", hangcheck_parse_reboot); ...@@ -117,7 +117,7 @@ __setup("hcheck_reboot", hangcheck_parse_reboot);
__setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks); __setup("hcheck_dump_tasks", hangcheck_parse_dump_tasks);
#endif /* not MODULE */ #endif /* not MODULE */
#if defined(CONFIG_X86_64) || defined(CONFIG_S390) #if defined(CONFIG_S390)
# define HAVE_MONOTONIC # define HAVE_MONOTONIC
# define TIMER_FREQ 1000000000ULL # define TIMER_FREQ 1000000000ULL
#elif defined(CONFIG_IA64) #elif defined(CONFIG_IA64)
......
...@@ -45,11 +45,9 @@ extern u32 pmtmr_ioport; ...@@ -45,11 +45,9 @@ extern u32 pmtmr_ioport;
#else #else
#define pmtmr_ioport 0 #define pmtmr_ioport 0
#endif #endif
extern unsigned long long monotonic_base;
extern int sysctl_vsyscall; extern int sysctl_vsyscall;
extern int nohpet; extern int nohpet;
extern unsigned long vxtime_hz; extern unsigned long vxtime_hz;
extern void time_init_gtod(void);
extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2))); extern void early_printk(const char *fmt, ...) __attribute__((format(printf,1,2)));
......
...@@ -28,9 +28,6 @@ extern int read_current_timer(unsigned long *timer_value); ...@@ -28,9 +28,6 @@ extern int read_current_timer(unsigned long *timer_value);
#define US_SCALE 32 /* 2^32, arbitralrily chosen */ #define US_SCALE 32 /* 2^32, arbitralrily chosen */
extern struct vxtime_data vxtime; extern struct vxtime_data vxtime;
extern void mark_tsc_unstable(void);
extern unsigned int do_gettimeoffset_hpet(void);
extern unsigned int do_gettimeoffset_tsc(void);
extern void set_cyc2ns_scale(unsigned long khz); extern void set_cyc2ns_scale(unsigned long khz);
extern int notsc;
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment