Commit def0a9b2 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched_clock: Make it NMI safe

Arjan complained about the suckyness of TSC on modern machines, and
asked if we could do something about that for PERF_SAMPLE_TIME.

Make cpu_clock() NMI safe by removing the spinlock and using
cmpxchg. This also makes it smaller and more robust.

Affects architectures that use HAVE_UNSTABLE_SCHED_CLOCK, i.e. IA64
and x86.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent cf450a73
...@@ -2955,10 +2955,7 @@ void perf_prepare_sample(struct perf_event_header *header, ...@@ -2955,10 +2955,7 @@ void perf_prepare_sample(struct perf_event_header *header,
} }
if (sample_type & PERF_SAMPLE_TIME) { if (sample_type & PERF_SAMPLE_TIME) {
/* data->time = perf_clock();
* Maybe do better on x86 and provide cpu_clock_nmi()
*/
data->time = sched_clock();
header->size += sizeof(data->time); header->size += sizeof(data->time);
} }
...@@ -3488,7 +3485,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) ...@@ -3488,7 +3485,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
.misc = 0, .misc = 0,
.size = sizeof(throttle_event), .size = sizeof(throttle_event),
}, },
.time = sched_clock(), .time = perf_clock(),
.id = primary_counter_id(counter), .id = primary_counter_id(counter),
.stream_id = counter->id, .stream_id = counter->id,
}; };
...@@ -3540,7 +3537,7 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi, ...@@ -3540,7 +3537,7 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
} }
if (counter->attr.freq) { if (counter->attr.freq) {
u64 now = sched_clock(); u64 now = perf_clock();
s64 delta = now - hwc->freq_stamp; s64 delta = now - hwc->freq_stamp;
hwc->freq_stamp = now; hwc->freq_stamp = now;
......
...@@ -48,13 +48,6 @@ static __read_mostly int sched_clock_running; ...@@ -48,13 +48,6 @@ static __read_mostly int sched_clock_running;
__read_mostly int sched_clock_stable; __read_mostly int sched_clock_stable;
struct sched_clock_data { struct sched_clock_data {
/*
* Raw spinlock - this is a special case: this might be called
* from within instrumentation code so we dont want to do any
* instrumentation ourselves.
*/
raw_spinlock_t lock;
u64 tick_raw; u64 tick_raw;
u64 tick_gtod; u64 tick_gtod;
u64 clock; u64 clock;
...@@ -80,7 +73,6 @@ void sched_clock_init(void) ...@@ -80,7 +73,6 @@ void sched_clock_init(void)
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct sched_clock_data *scd = cpu_sdc(cpu); struct sched_clock_data *scd = cpu_sdc(cpu);
scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
scd->tick_raw = 0; scd->tick_raw = 0;
scd->tick_gtod = ktime_now; scd->tick_gtod = ktime_now;
scd->clock = ktime_now; scd->clock = ktime_now;
...@@ -109,14 +101,19 @@ static inline u64 wrap_max(u64 x, u64 y) ...@@ -109,14 +101,19 @@ static inline u64 wrap_max(u64 x, u64 y)
* - filter out backward motion * - filter out backward motion
* - use the GTOD tick value to create a window to filter crazy TSC values * - use the GTOD tick value to create a window to filter crazy TSC values
*/ */
static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) static u64 sched_clock_local(struct sched_clock_data *scd)
{ {
s64 delta = now - scd->tick_raw; u64 now, clock, old_clock, min_clock, max_clock;
u64 clock, min_clock, max_clock; s64 delta;
again:
now = sched_clock();
delta = now - scd->tick_raw;
if (unlikely(delta < 0)) if (unlikely(delta < 0))
delta = 0; delta = 0;
old_clock = scd->clock;
/* /*
* scd->clock = clamp(scd->tick_gtod + delta, * scd->clock = clamp(scd->tick_gtod + delta,
* max(scd->tick_gtod, scd->clock), * max(scd->tick_gtod, scd->clock),
...@@ -124,84 +121,73 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) ...@@ -124,84 +121,73 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
*/ */
clock = scd->tick_gtod + delta; clock = scd->tick_gtod + delta;
min_clock = wrap_max(scd->tick_gtod, scd->clock); min_clock = wrap_max(scd->tick_gtod, old_clock);
max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC); max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
clock = wrap_max(clock, min_clock); clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock); clock = wrap_min(clock, max_clock);
scd->clock = clock; if (cmpxchg(&scd->clock, old_clock, clock) != old_clock)
goto again;
return scd->clock; return clock;
} }
static void lock_double_clock(struct sched_clock_data *data1, static u64 sched_clock_remote(struct sched_clock_data *scd)
struct sched_clock_data *data2)
{ {
if (data1 < data2) { struct sched_clock_data *my_scd = this_scd();
__raw_spin_lock(&data1->lock); u64 this_clock, remote_clock;
__raw_spin_lock(&data2->lock); u64 *ptr, old_val, val;
sched_clock_local(my_scd);
again:
this_clock = my_scd->clock;
remote_clock = scd->clock;
/*
* Use the opportunity that we have both locks
* taken to couple the two clocks: we take the
* larger time as the latest time for both
* runqueues. (this creates monotonic movement)
*/
if (likely((s64)(remote_clock - this_clock) < 0)) {
ptr = &scd->clock;
old_val = remote_clock;
val = this_clock;
} else { } else {
__raw_spin_lock(&data2->lock); /*
__raw_spin_lock(&data1->lock); * Should be rare, but possible:
*/
ptr = &my_scd->clock;
old_val = this_clock;
val = remote_clock;
} }
if (cmpxchg(ptr, old_val, val) != old_val)
goto again;
return val;
} }
u64 sched_clock_cpu(int cpu) u64 sched_clock_cpu(int cpu)
{ {
u64 now, clock, this_clock, remote_clock;
struct sched_clock_data *scd; struct sched_clock_data *scd;
u64 clock;
WARN_ON_ONCE(!irqs_disabled());
if (sched_clock_stable) if (sched_clock_stable)
return sched_clock(); return sched_clock();
scd = cpu_sdc(cpu);
/*
* Normally this is not called in NMI context - but if it is,
* trying to do any locking here is totally lethal.
*/
if (unlikely(in_nmi()))
return scd->clock;
if (unlikely(!sched_clock_running)) if (unlikely(!sched_clock_running))
return 0ull; return 0ull;
WARN_ON_ONCE(!irqs_disabled()); scd = cpu_sdc(cpu);
now = sched_clock();
if (cpu != raw_smp_processor_id()) {
struct sched_clock_data *my_scd = this_scd();
lock_double_clock(scd, my_scd);
this_clock = __update_sched_clock(my_scd, now);
remote_clock = scd->clock;
/*
* Use the opportunity that we have both locks
* taken to couple the two clocks: we take the
* larger time as the latest time for both
* runqueues. (this creates monotonic movement)
*/
if (likely((s64)(remote_clock - this_clock) < 0)) {
clock = this_clock;
scd->clock = clock;
} else {
/*
* Should be rare, but possible:
*/
clock = remote_clock;
my_scd->clock = remote_clock;
}
__raw_spin_unlock(&my_scd->lock);
} else {
__raw_spin_lock(&scd->lock);
clock = __update_sched_clock(scd, now);
}
__raw_spin_unlock(&scd->lock); if (cpu != smp_processor_id())
clock = sched_clock_remote(scd);
else
clock = sched_clock_local(scd);
return clock; return clock;
} }
...@@ -223,11 +209,9 @@ void sched_clock_tick(void) ...@@ -223,11 +209,9 @@ void sched_clock_tick(void)
now_gtod = ktime_to_ns(ktime_get()); now_gtod = ktime_to_ns(ktime_get());
now = sched_clock(); now = sched_clock();
__raw_spin_lock(&scd->lock);
scd->tick_raw = now; scd->tick_raw = now;
scd->tick_gtod = now_gtod; scd->tick_gtod = now_gtod;
__update_sched_clock(scd, now); sched_clock_local(scd);
__raw_spin_unlock(&scd->lock);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment