Commit 9620639b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-core-for-linus' of...

Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits)
  sched: Resched proper CPU on yield_to()
  sched: Allow users with sufficient RLIMIT_NICE to change from SCHED_IDLE policy
  sched: Allow SCHED_BATCH to preempt SCHED_IDLE tasks
  sched: Clean up the IRQ_TIME_ACCOUNTING code
  sched: Add #ifdef around irq time accounting functions
  sched, autogroup: Stop claiming ownership of the root task group
  sched, autogroup: Stop going ahead if autogroup is disabled
  sched, autogroup, sysctl: Use proc_dointvec_minmax() instead
  sched: Fix the group_imb logic
  sched: Clean up some f_b_g() comments
  sched: Clean up remnants of sd_idle
  sched: Wholesale removal of sd_idle logic
  sched: Add yield_to(task, preempt) functionality
  sched: Use a buddy to implement yield_task_fair()
  sched: Limit the scope of clear_buddies
  sched: Check the right ->nr_running in yield_task_fair()
  sched: Avoid expensive initial update_cfs_load(), on UP too
  sched: Fix switch_from_fair()
  sched: Simplify the idle scheduling class
  softirqs: Account ksoftirqd time as cpustat softirq
  ...
parents a926021c 6d1cafd8
...@@ -30,6 +30,9 @@ typedef u64 cputime64_t; ...@@ -30,6 +30,9 @@ typedef u64 cputime64_t;
#define cputime64_to_jiffies64(__ct) (__ct) #define cputime64_to_jiffies64(__ct) (__ct)
#define jiffies64_to_cputime64(__jif) (__jif) #define jiffies64_to_cputime64(__jif) (__jif)
#define cputime_to_cputime64(__ct) ((u64) __ct) #define cputime_to_cputime64(__ct) ((u64) __ct)
#define cputime64_gt(__a, __b) ((__a) > (__b))
#define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct)
/* /*
......
...@@ -427,6 +427,13 @@ extern void raise_softirq(unsigned int nr); ...@@ -427,6 +427,13 @@ extern void raise_softirq(unsigned int nr);
*/ */
DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
static inline struct task_struct *this_cpu_ksoftirqd(void)
{
return this_cpu_read(ksoftirqd);
}
/* Try to send a softirq to a remote cpu. If this cannot be done, the /* Try to send a softirq to a remote cpu. If this cannot be done, the
* work will be queued to the local cpu. * work will be queued to the local cpu.
*/ */
......
...@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x); ...@@ -307,6 +307,7 @@ extern clock_t jiffies_to_clock_t(long x);
extern unsigned long clock_t_to_jiffies(unsigned long x); extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x); extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x); extern u64 nsec_to_clock_t(u64 x);
extern u64 nsecs_to_jiffies64(u64 n);
extern unsigned long nsecs_to_jiffies(u64 n); extern unsigned long nsecs_to_jiffies(u64 n);
#define TIMESTAMP_SIZE 30 #define TIMESTAMP_SIZE 30
......
...@@ -1058,6 +1058,7 @@ struct sched_class { ...@@ -1058,6 +1058,7 @@ struct sched_class {
void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
void (*yield_task) (struct rq *rq); void (*yield_task) (struct rq *rq);
bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
...@@ -1084,12 +1085,10 @@ struct sched_class { ...@@ -1084,12 +1085,10 @@ struct sched_class {
void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
void (*task_fork) (struct task_struct *p); void (*task_fork) (struct task_struct *p);
void (*switched_from) (struct rq *this_rq, struct task_struct *task, void (*switched_from) (struct rq *this_rq, struct task_struct *task);
int running); void (*switched_to) (struct rq *this_rq, struct task_struct *task);
void (*switched_to) (struct rq *this_rq, struct task_struct *task,
int running);
void (*prio_changed) (struct rq *this_rq, struct task_struct *task, void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
int oldprio, int running); int oldprio);
unsigned int (*get_rr_interval) (struct rq *rq, unsigned int (*get_rr_interval) (struct rq *rq,
struct task_struct *task); struct task_struct *task);
...@@ -1715,7 +1714,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * ...@@ -1715,7 +1714,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
/* /*
* Per process flags * Per process flags
*/ */
#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
#define PF_STARTING 0x00000002 /* being created */ #define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
...@@ -1945,8 +1943,6 @@ int sched_rt_handler(struct ctl_table *table, int write, ...@@ -1945,8 +1943,6 @@ int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos); loff_t *ppos);
extern unsigned int sysctl_sched_compat_yield;
#ifdef CONFIG_SCHED_AUTOGROUP #ifdef CONFIG_SCHED_AUTOGROUP
extern unsigned int sysctl_sched_autogroup_enabled; extern unsigned int sysctl_sched_autogroup_enabled;
...@@ -1977,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p) ...@@ -1977,6 +1973,7 @@ static inline int rt_mutex_getprio(struct task_struct *p)
# define rt_mutex_adjust_pi(p) do { } while (0) # define rt_mutex_adjust_pi(p) do { } while (0)
#endif #endif
extern bool yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice); extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p); extern int task_prio(const struct task_struct *p);
extern int task_nice(const struct task_struct *p); extern int task_nice(const struct task_struct *p);
......
This diff is collapsed.
...@@ -12,7 +12,6 @@ static atomic_t autogroup_seq_nr; ...@@ -12,7 +12,6 @@ static atomic_t autogroup_seq_nr;
static void __init autogroup_init(struct task_struct *init_task) static void __init autogroup_init(struct task_struct *init_task)
{ {
autogroup_default.tg = &root_task_group; autogroup_default.tg = &root_task_group;
root_task_group.autogroup = &autogroup_default;
kref_init(&autogroup_default.kref); kref_init(&autogroup_default.kref);
init_rwsem(&autogroup_default.lock); init_rwsem(&autogroup_default.lock);
init_task->signal->autogroup = &autogroup_default; init_task->signal->autogroup = &autogroup_default;
...@@ -130,7 +129,7 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg) ...@@ -130,7 +129,7 @@ task_wants_autogroup(struct task_struct *p, struct task_group *tg)
static inline bool task_group_is_autogroup(struct task_group *tg) static inline bool task_group_is_autogroup(struct task_group *tg)
{ {
return tg != &root_task_group && tg->autogroup; return !!tg->autogroup;
} }
static inline struct task_group * static inline struct task_group *
...@@ -161,11 +160,15 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) ...@@ -161,11 +160,15 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
p->signal->autogroup = autogroup_kref_get(ag); p->signal->autogroup = autogroup_kref_get(ag);
if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
goto out;
t = p; t = p;
do { do {
sched_move_task(t); sched_move_task(t);
} while_each_thread(p, t); } while_each_thread(p, t);
out:
unlock_task_sighand(p, &flags); unlock_task_sighand(p, &flags);
autogroup_kref_put(prev); autogroup_kref_put(prev);
} }
...@@ -247,10 +250,14 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) ...@@ -247,10 +250,14 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
{ {
struct autogroup *ag = autogroup_task_get(p); struct autogroup *ag = autogroup_task_get(p);
if (!task_group_is_autogroup(ag->tg))
goto out;
down_read(&ag->lock); down_read(&ag->lock);
seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice); seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
up_read(&ag->lock); up_read(&ag->lock);
out:
autogroup_kref_put(ag); autogroup_kref_put(ag);
} }
#endif /* CONFIG_PROC_FS */ #endif /* CONFIG_PROC_FS */
...@@ -258,9 +265,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m) ...@@ -258,9 +265,7 @@ void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
#ifdef CONFIG_SCHED_DEBUG #ifdef CONFIG_SCHED_DEBUG
static inline int autogroup_path(struct task_group *tg, char *buf, int buflen) static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
{ {
int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled); if (!task_group_is_autogroup(tg))
if (!enabled || !tg->autogroup)
return 0; return 0;
return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id); return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
......
#ifdef CONFIG_SCHED_AUTOGROUP #ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup { struct autogroup {
/*
* reference doesn't mean how many thread attach to this
* autogroup now. It just stands for the number of task
* could use this autogroup.
*/
struct kref kref; struct kref kref;
struct task_group *tg; struct task_group *tg;
struct rw_semaphore lock; struct rw_semaphore lock;
......
...@@ -179,7 +179,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) ...@@ -179,7 +179,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
raw_spin_lock_irqsave(&rq->lock, flags); raw_spin_lock_irqsave(&rq->lock, flags);
if (cfs_rq->rb_leftmost) if (cfs_rq->rb_leftmost)
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
last = __pick_last_entity(cfs_rq); last = __pick_last_entity(cfs_rq);
if (last) if (last)
max_vruntime = last->vruntime; max_vruntime = last->vruntime;
......
This diff is collapsed.
...@@ -52,31 +52,15 @@ static void set_curr_task_idle(struct rq *rq) ...@@ -52,31 +52,15 @@ static void set_curr_task_idle(struct rq *rq)
{ {
} }
static void switched_to_idle(struct rq *rq, struct task_struct *p, static void switched_to_idle(struct rq *rq, struct task_struct *p)
int running)
{ {
/* Can this actually happen?? */ BUG();
if (running)
resched_task(rq->curr);
else
check_preempt_curr(rq, p, 0);
} }
static void prio_changed_idle(struct rq *rq, struct task_struct *p, static void
int oldprio, int running) prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
{ {
/* This can happen for hot plug CPUS */ BUG();
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
if (running) {
if (p->prio > oldprio)
resched_task(rq->curr);
} else
check_preempt_curr(rq, p, 0);
} }
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
......
...@@ -1599,8 +1599,7 @@ static void rq_offline_rt(struct rq *rq) ...@@ -1599,8 +1599,7 @@ static void rq_offline_rt(struct rq *rq)
* When switch from the rt queue, we bring ourselves to a position * When switch from the rt queue, we bring ourselves to a position
* that we might want to pull RT tasks from other runqueues. * that we might want to pull RT tasks from other runqueues.
*/ */
static void switched_from_rt(struct rq *rq, struct task_struct *p, static void switched_from_rt(struct rq *rq, struct task_struct *p)
int running)
{ {
/* /*
* If there are other RT tasks then we will reschedule * If there are other RT tasks then we will reschedule
...@@ -1609,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p, ...@@ -1609,7 +1608,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p,
* we may need to handle the pulling of RT tasks * we may need to handle the pulling of RT tasks
* now. * now.
*/ */
if (!rq->rt.rt_nr_running) if (p->se.on_rq && !rq->rt.rt_nr_running)
pull_rt_task(rq); pull_rt_task(rq);
} }
...@@ -1628,8 +1627,7 @@ static inline void init_sched_rt_class(void) ...@@ -1628,8 +1627,7 @@ static inline void init_sched_rt_class(void)
* with RT tasks. In this case we try to push them off to * with RT tasks. In this case we try to push them off to
* other runqueues. * other runqueues.
*/ */
static void switched_to_rt(struct rq *rq, struct task_struct *p, static void switched_to_rt(struct rq *rq, struct task_struct *p)
int running)
{ {
int check_resched = 1; int check_resched = 1;
...@@ -1640,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p, ...@@ -1640,7 +1638,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
* If that current running task is also an RT task * If that current running task is also an RT task
* then see if we can move to another run queue. * then see if we can move to another run queue.
*/ */
if (!running) { if (p->se.on_rq && rq->curr != p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (rq->rt.overloaded && push_rt_task(rq) && if (rq->rt.overloaded && push_rt_task(rq) &&
/* Don't resched if we changed runqueues */ /* Don't resched if we changed runqueues */
...@@ -1656,10 +1654,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p, ...@@ -1656,10 +1654,13 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p,
* Priority of the task has changed. This may cause * Priority of the task has changed. This may cause
* us to initiate a push or pull. * us to initiate a push or pull.
*/ */
static void prio_changed_rt(struct rq *rq, struct task_struct *p, static void
int oldprio, int running) prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
{ {
if (running) { if (!p->se.on_rq)
return;
if (rq->curr == p) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* If our priority decreases while running, we * If our priority decreases while running, we
......
...@@ -59,14 +59,13 @@ static void set_curr_task_stop(struct rq *rq) ...@@ -59,14 +59,13 @@ static void set_curr_task_stop(struct rq *rq)
{ {
} }
static void switched_to_stop(struct rq *rq, struct task_struct *p, static void switched_to_stop(struct rq *rq, struct task_struct *p)
int running)
{ {
BUG(); /* its impossible to change to this class */ BUG(); /* its impossible to change to this class */
} }
static void prio_changed_stop(struct rq *rq, struct task_struct *p, static void
int oldprio, int running) prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
{ {
BUG(); /* how!?, what priority? */ BUG(); /* how!?, what priority? */
} }
......
...@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_stat); ...@@ -54,7 +54,7 @@ EXPORT_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
char *softirq_to_name[NR_SOFTIRQS] = { char *softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
...@@ -721,7 +721,6 @@ static int run_ksoftirqd(void * __bind_cpu) ...@@ -721,7 +721,6 @@ static int run_ksoftirqd(void * __bind_cpu)
{ {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
current->flags |= PF_KSOFTIRQD;
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
preempt_disable(); preempt_disable();
if (!local_softirq_pending()) { if (!local_softirq_pending()) {
......
...@@ -361,20 +361,13 @@ static struct ctl_table kern_table[] = { ...@@ -361,20 +361,13 @@ static struct ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = sched_rt_handler, .proc_handler = sched_rt_handler,
}, },
{
.procname = "sched_compat_yield",
.data = &sysctl_sched_compat_yield,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
#ifdef CONFIG_SCHED_AUTOGROUP #ifdef CONFIG_SCHED_AUTOGROUP
{ {
.procname = "sched_autogroup_enabled", .procname = "sched_autogroup_enabled",
.data = &sysctl_sched_autogroup_enabled, .data = &sysctl_sched_autogroup_enabled,
.maxlen = sizeof(unsigned int), .maxlen = sizeof(unsigned int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec_minmax,
.extra1 = &zero, .extra1 = &zero,
.extra2 = &one, .extra2 = &one,
}, },
......
...@@ -645,7 +645,7 @@ u64 nsec_to_clock_t(u64 x) ...@@ -645,7 +645,7 @@ u64 nsec_to_clock_t(u64 x)
} }
/** /**
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies * nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
* *
* @n: nsecs in u64 * @n: nsecs in u64
* *
...@@ -657,7 +657,7 @@ u64 nsec_to_clock_t(u64 x) ...@@ -657,7 +657,7 @@ u64 nsec_to_clock_t(u64 x)
* NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512) * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
* ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
*/ */
unsigned long nsecs_to_jiffies(u64 n) u64 nsecs_to_jiffies64(u64 n)
{ {
#if (NSEC_PER_SEC % HZ) == 0 #if (NSEC_PER_SEC % HZ) == 0
/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */ /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
...@@ -674,6 +674,25 @@ unsigned long nsecs_to_jiffies(u64 n) ...@@ -674,6 +674,25 @@ unsigned long nsecs_to_jiffies(u64 n)
#endif #endif
} }
/**
* nsecs_to_jiffies - Convert nsecs in u64 to jiffies
*
* @n: nsecs in u64
*
* Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
* And this doesn't return MAX_JIFFY_OFFSET since this function is designed
* for scheduler, not for use in device drivers to calculate timeout value.
*
* note:
* NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
* ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
*/
unsigned long nsecs_to_jiffies(u64 n)
{
return (unsigned long)nsecs_to_jiffies64(n);
}
#if (BITS_PER_LONG < 64) #if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void) u64 get_jiffies_64(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment