Commit 609b07b7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "A fix for KVM's scheduler clock which (erroneously) was always marked
  unstable, a fix for RT/DL load balancing, plus latency fixes"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/clock, x86/tsc: Rework the x86 'unstable' sched_clock() interface
  sched/core: Fix pick_next_task() for RT,DL
  sched/fair: Make select_idle_cpu() more aggressive
parents c3abcabe f94c8d11
......@@ -556,10 +556,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (check_tsc_unstable())
clear_sched_clock_stable();
} else {
clear_sched_clock_stable();
}
/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
......
......@@ -105,8 +105,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c)
#ifdef CONFIG_X86_64
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
#endif
clear_sched_clock_stable();
}
static void init_centaur(struct cpuinfo_x86 *c)
......
......@@ -88,7 +88,6 @@ static void default_init(struct cpuinfo_x86 *c)
strcpy(c->x86_model_id, "386");
}
#endif
clear_sched_clock_stable();
}
static const struct cpu_dev default_cpu = {
......@@ -1077,8 +1076,6 @@ static void identify_cpu(struct cpuinfo_x86 *c)
*/
if (this_cpu->c_init)
this_cpu->c_init(c);
else
clear_sched_clock_stable();
/* Disable the PN if appropriate */
squash_the_stupid_serial_number(c);
......
......@@ -185,7 +185,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
break;
}
clear_sched_clock_stable();
}
static void init_cyrix(struct cpuinfo_x86 *c)
......
......@@ -162,10 +162,6 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (check_tsc_unstable())
clear_sched_clock_stable();
} else {
clear_sched_clock_stable();
}
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
......
......@@ -16,8 +16,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
if (xlvl >= 0x80860001)
c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
}
clear_sched_clock_stable();
}
static void init_transmeta(struct cpuinfo_x86 *c)
......
......@@ -327,9 +327,16 @@ unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
static inline bool using_native_sched_clock(void)
{
return pv_time_ops.sched_clock == native_sched_clock;
}
#else
unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock")));
static inline bool using_native_sched_clock(void) { return true; }
#endif
int check_tsc_unstable(void)
......@@ -1112,7 +1119,9 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
{
if (tsc_unstable)
return;
tsc_unstable = 1;
if (using_native_sched_clock())
clear_sched_clock_stable();
disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to clocksource watchdog\n");
......@@ -1135,19 +1144,21 @@ static struct clocksource clocksource_tsc = {
void mark_tsc_unstable(char *reason)
{
if (!tsc_unstable) {
if (tsc_unstable)
return;
tsc_unstable = 1;
if (using_native_sched_clock())
clear_sched_clock_stable();
disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult)
if (clocksource_tsc.mult) {
clocksource_mark_unstable(&clocksource_tsc);
else {
} else {
clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
clocksource_tsc.rating = 0;
}
}
}
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
......
......@@ -3287,10 +3287,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
struct task_struct *p;
/*
* Optimization: we know that if all tasks are in
* the fair class we can call that function directly:
* Optimization: we know that if all tasks are in the fair class we can
* call that function directly, but only if the @prev task wasn't of a
* higher scheduling class, because otherwise those loose the
* opportunity to pull in more work from other CPUs.
*/
if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
if (likely((prev->sched_class == &idle_sched_class ||
prev->sched_class == &fair_sched_class) &&
rq->nr_running == rq->cfs.h_nr_running)) {
p = fair_sched_class.pick_next_task(rq, prev, rf);
if (unlikely(p == RETRY_TASK))
goto again;
......
......@@ -5799,7 +5799,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
* Due to large variance we need a large fuzz factor; hackbench in
* particularly is sensitive here.
*/
if ((avg_idle / 512) < avg_cost)
if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
return -1;
time = local_clock();
......
......@@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
*/
SCHED_FEAT(TTWU_QUEUE, true)
/*
* When doing wakeups, attempt to limit superfluous scans of the LLC domain.
*/
SCHED_FEAT(SIS_AVG_CPU, false)
#ifdef HAVE_RT_PUSH_IPI
/*
* In order to avoid a thundering herd attack of CPUs that are
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment