Commit d291676c authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  sched debug: dont print kernel address in /proc/sched_debug
  sched: fix typo in the FAIR_GROUP_SCHED branch
  sched: improve rq-clock overflow logic
parents 3dab307e 5167e75f
...@@ -263,6 +263,7 @@ struct rq { ...@@ -263,6 +263,7 @@ struct rq {
unsigned int clock_warps, clock_overflows; unsigned int clock_warps, clock_overflows;
unsigned int clock_unstable_events; unsigned int clock_unstable_events;
u64 tick_timestamp;
atomic_t nr_iowait; atomic_t nr_iowait;
...@@ -341,7 +342,10 @@ static void __update_rq_clock(struct rq *rq) ...@@ -341,7 +342,10 @@ static void __update_rq_clock(struct rq *rq)
/* /*
* Catch too large forward jumps too: * Catch too large forward jumps too:
*/ */
if (unlikely(delta > 2*TICK_NSEC)) { if (unlikely(clock + delta > rq->tick_timestamp + TICK_NSEC)) {
if (clock < rq->tick_timestamp + TICK_NSEC)
clock = rq->tick_timestamp + TICK_NSEC;
else
clock++; clock++;
rq->clock_overflows++; rq->clock_overflows++;
} else { } else {
...@@ -3308,9 +3312,16 @@ void scheduler_tick(void) ...@@ -3308,9 +3312,16 @@ void scheduler_tick(void)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct task_struct *curr = rq->curr; struct task_struct *curr = rq->curr;
u64 next_tick = rq->tick_timestamp + TICK_NSEC;
spin_lock(&rq->lock); spin_lock(&rq->lock);
__update_rq_clock(rq); __update_rq_clock(rq);
/*
* Let rq->clock advance by at least TICK_NSEC:
*/
if (unlikely(rq->clock < next_tick))
rq->clock = next_tick;
rq->tick_timestamp = rq->clock;
update_cpu_load(rq); update_cpu_load(rq);
if (curr != rq->idle) /* FIXME: needed? */ if (curr != rq->idle) /* FIXME: needed? */
curr->sched_class->task_tick(rq, curr); curr->sched_class->task_tick(rq, curr);
......
...@@ -108,7 +108,7 @@ print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) ...@@ -108,7 +108,7 @@ print_cfs_rq_runtime_sum(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
{ {
SEQ_printf(m, "\ncfs_rq %p\n", cfs_rq); SEQ_printf(m, "\ncfs_rq\n");
#define P(x) \ #define P(x) \
SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x)) SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(cfs_rq->x))
......
...@@ -959,13 +959,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -959,13 +959,12 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) { for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
struct cfs_rq *this_cfs_rq; struct cfs_rq *this_cfs_rq;
long imbalances; long imbalance;
unsigned long maxload; unsigned long maxload;
this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu); this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
imbalance = busy_cfs_rq->load.weight - imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
this_cfs_rq->load.weight;
/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */ /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
if (imbalance <= 0) if (imbalance <= 0)
continue; continue;
...@@ -976,7 +975,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, ...@@ -976,7 +975,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
*this_best_prio = cfs_rq_best_prio(this_cfs_rq); *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
#else #else
#define maxload rem_load_move # define maxload rem_load_move
#endif #endif
/* pass busy_cfs_rq argument into /* pass busy_cfs_rq argument into
* load_balance_[start|next]_fair iterators * load_balance_[start|next]_fair iterators
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment