Commit 7e1fb765 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched

* git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched:
  futex: correctly return -EFAULT not -EINVAL
  lockdep: in_range() fix
  lockdep: fix debug_show_all_locks()
  sched: style cleanups
  futex: fix for futex_wait signal stack corruption
parents ad658cec cde898fa
...@@ -7,12 +7,25 @@ ...@@ -7,12 +7,25 @@
#ifndef _LINUX_THREAD_INFO_H #ifndef _LINUX_THREAD_INFO_H
#define _LINUX_THREAD_INFO_H #define _LINUX_THREAD_INFO_H
#include <linux/types.h>
/* /*
* System call restart block. * System call restart block.
*/ */
struct restart_block { struct restart_block {
long (*fn)(struct restart_block *); long (*fn)(struct restart_block *);
union {
struct {
unsigned long arg0, arg1, arg2, arg3; unsigned long arg0, arg1, arg2, arg3;
};
/* For futex_wait */
struct {
u32 *uaddr;
u32 val;
u32 flags;
u64 time;
} futex;
};
}; };
extern long do_no_restart_syscall(struct restart_block *parm); extern long do_no_restart_syscall(struct restart_block *parm);
......
...@@ -658,7 +658,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) ...@@ -658,7 +658,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (curval == -EFAULT) if (curval == -EFAULT)
ret = -EFAULT; ret = -EFAULT;
if (curval != uval) else if (curval != uval)
ret = -EINVAL; ret = -EINVAL;
if (ret) { if (ret) {
spin_unlock(&pi_state->pi_mutex.wait_lock); spin_unlock(&pi_state->pi_mutex.wait_lock);
...@@ -1149,9 +1149,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, ...@@ -1149,9 +1149,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
/* /*
* In case we must use restart_block to restart a futex_wait, * In case we must use restart_block to restart a futex_wait,
* we encode in the 'arg3' shared capability * we encode in the 'flags' shared capability
*/ */
#define ARG3_SHARED 1 #define FLAGS_SHARED 1
static long futex_wait_restart(struct restart_block *restart); static long futex_wait_restart(struct restart_block *restart);
...@@ -1290,12 +1290,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1290,12 +1290,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
struct restart_block *restart; struct restart_block *restart;
restart = &current_thread_info()->restart_block; restart = &current_thread_info()->restart_block;
restart->fn = futex_wait_restart; restart->fn = futex_wait_restart;
restart->arg0 = (unsigned long)uaddr; restart->futex.uaddr = (u32 *)uaddr;
restart->arg1 = (unsigned long)val; restart->futex.val = val;
restart->arg2 = (unsigned long)abs_time; restart->futex.time = abs_time->tv64;
restart->arg3 = 0; restart->futex.flags = 0;
if (fshared) if (fshared)
restart->arg3 |= ARG3_SHARED; restart->futex.flags |= FLAGS_SHARED;
return -ERESTART_RESTARTBLOCK; return -ERESTART_RESTARTBLOCK;
} }
...@@ -1310,15 +1311,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1310,15 +1311,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
static long futex_wait_restart(struct restart_block *restart) static long futex_wait_restart(struct restart_block *restart)
{ {
u32 __user *uaddr = (u32 __user *)restart->arg0; u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
u32 val = (u32)restart->arg1;
ktime_t *abs_time = (ktime_t *)restart->arg2;
struct rw_semaphore *fshared = NULL; struct rw_semaphore *fshared = NULL;
ktime_t t;
t.tv64 = restart->futex.time;
restart->fn = do_no_restart_syscall; restart->fn = do_no_restart_syscall;
if (restart->arg3 & ARG3_SHARED) if (restart->futex.flags & FLAGS_SHARED)
fshared = &current->mm->mmap_sem; fshared = &current->mm->mmap_sem;
return (long)futex_wait(uaddr, fshared, val, abs_time); return (long)futex_wait(uaddr, fshared, restart->futex.val, &t);
} }
......
...@@ -3054,11 +3054,6 @@ void __init lockdep_info(void) ...@@ -3054,11 +3054,6 @@ void __init lockdep_info(void)
#endif #endif
} }
static inline int in_range(const void *start, const void *addr, const void *end)
{
return addr >= start && addr <= end;
}
static void static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from, print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
const void *mem_to, struct held_lock *hlock) const void *mem_to, struct held_lock *hlock)
...@@ -3080,6 +3075,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, ...@@ -3080,6 +3075,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
dump_stack(); dump_stack();
} }
static inline int not_in_range(const void* mem_from, unsigned long mem_len,
const void* lock_from, unsigned long lock_len)
{
return lock_from + lock_len <= mem_from ||
mem_from + mem_len <= lock_from;
}
/* /*
* Called when kernel memory is freed (or unmapped), or if a lock * Called when kernel memory is freed (or unmapped), or if a lock
* is destroyed or reinitialized - this code checks whether there is * is destroyed or reinitialized - this code checks whether there is
...@@ -3087,7 +3089,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, ...@@ -3087,7 +3089,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
*/ */
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{ {
const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
struct task_struct *curr = current; struct task_struct *curr = current;
struct held_lock *hlock; struct held_lock *hlock;
unsigned long flags; unsigned long flags;
...@@ -3100,14 +3101,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) ...@@ -3100,14 +3101,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
for (i = 0; i < curr->lockdep_depth; i++) { for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i; hlock = curr->held_locks + i;
lock_from = (void *)hlock->instance; if (not_in_range(mem_from, mem_len, hlock->instance,
lock_to = (void *)(hlock->instance + 1); sizeof(*hlock->instance)))
if (!in_range(mem_from, lock_from, mem_to) &&
!in_range(mem_from, lock_to, mem_to))
continue; continue;
print_freed_lock_bug(curr, mem_from, mem_to, hlock); print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break; break;
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -3173,6 +3171,13 @@ void debug_show_all_locks(void) ...@@ -3173,6 +3171,13 @@ void debug_show_all_locks(void)
printk(" locked it.\n"); printk(" locked it.\n");
do_each_thread(g, p) { do_each_thread(g, p) {
/*
* It's not reliable to print a task's held locks
* if it's not sleeping (or if it's not the current
* task):
*/
if (p->state == TASK_RUNNING && p != current)
continue;
if (p->lockdep_depth) if (p->lockdep_depth)
lockdep_print_held_locks(p); lockdep_print_held_locks(p);
if (!unlock) if (!unlock)
......
...@@ -211,7 +211,6 @@ static inline struct task_group *task_group(struct task_struct *p) ...@@ -211,7 +211,6 @@ static inline struct task_group *task_group(struct task_struct *p)
#else #else
tg = &init_task_group; tg = &init_task_group;
#endif #endif
return tg; return tg;
} }
...@@ -249,14 +248,15 @@ struct cfs_rq { ...@@ -249,14 +248,15 @@ struct cfs_rq {
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
/* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in /*
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
* (like users, containers etc.) * (like users, containers etc.)
* *
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
* list is used during load balance. * list is used during load balance.
*/ */
struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */ struct list_head leaf_cfs_rq_list;
struct task_group *tg; /* group that "owns" this runqueue */ struct task_group *tg; /* group that "owns" this runqueue */
#endif #endif
}; };
...@@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ...@@ -4390,8 +4390,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @policy: new policy. * @policy: new policy.
* @param: structure containing the new RT priority. * @param: structure containing the new RT priority.
*/ */
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, asmlinkage long
struct sched_param __user *param) sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{ {
/* negative values for policy are not valid */ /* negative values for policy are not valid */
if (policy < 0) if (policy < 0)
...@@ -5245,11 +5245,12 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) ...@@ -5245,11 +5245,12 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
* kernel threads (both mm NULL), since they never * kernel threads (both mm NULL), since they never
* leave kernel. * leave kernel.
*/ */
if (p->mm && printk_ratelimit()) if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no " printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n", "longer affine to cpu%d\n",
task_pid_nr(p), p->comm, dead_cpu); task_pid_nr(p), p->comm, dead_cpu);
} }
}
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); } while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
} }
...@@ -5612,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ...@@ -5612,9 +5613,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
migrate_nr_uninterruptible(rq); migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0); BUG_ON(rq->nr_running != 0);
/* No need to migrate the tasks: it was best-effort if /*
* No need to migrate the tasks: it was best-effort if
* they didn't take sched_hotcpu_mutex. Just wake up * they didn't take sched_hotcpu_mutex. Just wake up
* the requestors. */ * the requestors.
*/
spin_lock_irq(&rq->lock); spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) { while (!list_empty(&rq->migration_queue)) {
struct migration_req *req; struct migration_req *req;
...@@ -5999,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0; ...@@ -5999,8 +6002,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
static DEFINE_PER_CPU(struct sched_domain, cpu_domains); static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, static int
struct sched_group **sg) cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{ {
if (sg) if (sg)
*sg = &per_cpu(sched_group_cpus, cpu); *sg = &per_cpu(sched_group_cpus, cpu);
...@@ -6017,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core); ...@@ -6017,8 +6020,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
#endif #endif
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, static int
struct sched_group **sg) cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{ {
int group; int group;
cpumask_t mask = per_cpu(cpu_sibling_map, cpu); cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
...@@ -6029,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, ...@@ -6029,8 +6032,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
return group; return group;
} }
#elif defined(CONFIG_SCHED_MC) #elif defined(CONFIG_SCHED_MC)
static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, static int
struct sched_group **sg) cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{ {
if (sg) if (sg)
*sg = &per_cpu(sched_group_core, cpu); *sg = &per_cpu(sched_group_core, cpu);
...@@ -6041,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map, ...@@ -6041,8 +6044,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
static DEFINE_PER_CPU(struct sched_domain, phys_domains); static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_phys); static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, static int
struct sched_group **sg) cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{ {
int group; int group;
#ifdef CONFIG_SCHED_MC #ifdef CONFIG_SCHED_MC
...@@ -7193,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) ...@@ -7193,16 +7196,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
return &tg->css; return &tg->css;
} }
static void cpu_cgroup_destroy(struct cgroup_subsys *ss, static void
struct cgroup *cgrp) cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{ {
struct task_group *tg = cgroup_tg(cgrp); struct task_group *tg = cgroup_tg(cgrp);
sched_destroy_group(tg); sched_destroy_group(tg);
} }
static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, static int
struct cgroup *cgrp, struct task_struct *tsk) cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
struct task_struct *tsk)
{ {
/* We don't support RT-tasks being in separate groups */ /* We don't support RT-tasks being in separate groups */
if (tsk->sched_class != &fair_sched_class) if (tsk->sched_class != &fair_sched_class)
...@@ -7308,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create( ...@@ -7308,8 +7312,8 @@ static struct cgroup_subsys_state *cpuacct_create(
} }
/* destroy an existing cpu accounting group */ /* destroy an existing cpu accounting group */
static void cpuacct_destroy(struct cgroup_subsys *ss, static void
struct cgroup *cont) cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
{ {
struct cpuacct *ca = cgroup_ca(cont); struct cpuacct *ca = cgroup_ca(cont);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment