Commit aa16cd8d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-fixes-for-linus' of...

Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  futex: Handle futex value corruption gracefully
  futex: Handle user space corruption gracefully
  futex_lock_pi() key refcnt fix
  softlockup: Add sched_clock_tick() to avoid kernel warning on kgdb resume
parents a3a71ca9 59647b6a
...@@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p); ...@@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_DETECT_SOFTLOCKUP #ifdef CONFIG_DETECT_SOFTLOCKUP
extern void softlockup_tick(void); extern void softlockup_tick(void);
extern void touch_softlockup_watchdog(void); extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void); extern void touch_all_softlockup_watchdogs(void);
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
void __user *buffer, void __user *buffer,
...@@ -323,6 +324,9 @@ static inline void softlockup_tick(void) ...@@ -323,6 +324,9 @@ static inline void softlockup_tick(void)
static inline void touch_softlockup_watchdog(void) static inline void touch_softlockup_watchdog(void)
{ {
} }
static inline void touch_softlockup_watchdog_sync(void)
{
}
static inline void touch_all_softlockup_watchdogs(void) static inline void touch_all_softlockup_watchdogs(void)
{ {
} }
......
...@@ -530,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, ...@@ -530,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
return -EINVAL; return -EINVAL;
WARN_ON(!atomic_read(&pi_state->refcount)); WARN_ON(!atomic_read(&pi_state->refcount));
WARN_ON(pid && pi_state->owner &&
pi_state->owner->pid != pid); /*
* When pi_state->owner is NULL then the owner died
* and another waiter is on the fly. pi_state->owner
* is fixed up by the task which acquires
* pi_state->rt_mutex.
*
* We do not check for pid == 0 which can happen when
* the owner died and robust_list_exit() cleared the
* TID.
*/
if (pid && pi_state->owner) {
/*
* Bail out if user space manipulated the
* futex value.
*/
if (pid != task_pid_vnr(pi_state->owner))
return -EINVAL;
}
atomic_inc(&pi_state->refcount); atomic_inc(&pi_state->refcount);
*ps = pi_state; *ps = pi_state;
...@@ -758,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) ...@@ -758,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (!pi_state) if (!pi_state)
return -EINVAL; return -EINVAL;
/*
* If current does not own the pi_state then the futex is
* inconsistent and user space fiddled with the futex value.
*/
if (pi_state->owner != current)
return -EINVAL;
raw_spin_lock(&pi_state->pi_mutex.wait_lock); raw_spin_lock(&pi_state->pi_mutex.wait_lock);
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
...@@ -1971,7 +1995,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, ...@@ -1971,7 +1995,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
/* Unqueue and drop the lock */ /* Unqueue and drop the lock */
unqueue_me_pi(&q); unqueue_me_pi(&q);
goto out; goto out_put_key;
out_unlock_put_key: out_unlock_put_key:
queue_unlock(&q, hb); queue_unlock(&q, hb);
......
...@@ -599,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs) ...@@ -599,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs)
/* Signal the primary CPU that we are done: */ /* Signal the primary CPU that we are done: */
atomic_set(&cpu_in_kgdb[cpu], 0); atomic_set(&cpu_in_kgdb[cpu], 0);
touch_softlockup_watchdog(); touch_softlockup_watchdog_sync();
clocksource_touch_watchdog(); clocksource_touch_watchdog();
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -1453,7 +1453,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) ...@@ -1453,7 +1453,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
(kgdb_info[cpu].task && (kgdb_info[cpu].task &&
kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
atomic_set(&kgdb_active, -1); atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog(); touch_softlockup_watchdog_sync();
clocksource_touch_watchdog(); clocksource_touch_watchdog();
local_irq_restore(flags); local_irq_restore(flags);
...@@ -1553,7 +1553,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) ...@@ -1553,7 +1553,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
} }
/* Free kgdb_active */ /* Free kgdb_active */
atomic_set(&kgdb_active, -1); atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog(); touch_softlockup_watchdog_sync();
clocksource_touch_watchdog(); clocksource_touch_watchdog();
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock); ...@@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock);
static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
static DEFINE_PER_CPU(bool, softlock_touch_sync);
static int __read_mostly did_panic; static int __read_mostly did_panic;
int __read_mostly softlockup_thresh = 60; int __read_mostly softlockup_thresh = 60;
...@@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void) ...@@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void)
} }
EXPORT_SYMBOL(touch_softlockup_watchdog); EXPORT_SYMBOL(touch_softlockup_watchdog);
void touch_softlockup_watchdog_sync(void)
{
__raw_get_cpu_var(softlock_touch_sync) = true;
__raw_get_cpu_var(softlockup_touch_ts) = 0;
}
void touch_all_softlockup_watchdogs(void) void touch_all_softlockup_watchdogs(void)
{ {
int cpu; int cpu;
...@@ -118,6 +125,14 @@ void softlockup_tick(void) ...@@ -118,6 +125,14 @@ void softlockup_tick(void)
} }
if (touch_ts == 0) { if (touch_ts == 0) {
if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) {
/*
* If the time stamp was touched atomically
* make sure the scheduler tick is up to date.
*/
per_cpu(softlock_touch_sync, this_cpu) = false;
sched_clock_tick();
}
__touch_softlockup_watchdog(); __touch_softlockup_watchdog();
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment