Commit 95d248d1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Borislav Petkov:

 - Prevent the leaking of a debug timer in futex_waitv()

 - A preempt-RT mutex locking fix, adding the proper acquire semantics

* tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  futex: Fix futex_waitv() hrtimer debug object leak on kcalloc error
  rtmutex: Add acquire semantics for rtmutex lock acquisition slow path
parents 8b419482 94cd8fa0
...@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters, ...@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
} }
futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL); futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
if (!futexv) if (!futexv) {
return -ENOMEM; ret = -ENOMEM;
goto destroy_timer;
}
ret = futex_parse_waitv(futexv, waiters, nr_futexes); ret = futex_parse_waitv(futexv, waiters, nr_futexes);
if (!ret) if (!ret)
ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL); ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
kfree(futexv);
destroy_timer:
if (timeout) { if (timeout) {
hrtimer_cancel(&to.timer); hrtimer_cancel(&to.timer);
destroy_hrtimer_on_stack(&to.timer); destroy_hrtimer_on_stack(&to.timer);
} }
kfree(futexv);
return ret; return ret;
} }
......
...@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock, ...@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
* set this bit before looking at the lock. * set this bit before looking at the lock.
*/ */
static __always_inline void static __always_inline struct task_struct *
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
{ {
unsigned long val = (unsigned long)owner; unsigned long val = (unsigned long)owner;
if (rt_mutex_has_waiters(lock)) if (rt_mutex_has_waiters(lock))
val |= RT_MUTEX_HAS_WAITERS; val |= RT_MUTEX_HAS_WAITERS;
WRITE_ONCE(lock->owner, (struct task_struct *)val); return (struct task_struct *)val;
}
static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
{
/*
* lock->wait_lock is held but explicit acquire semantics are needed
* for a new lock owner so WRITE_ONCE is insufficient.
*/
xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
}
static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
{
/* lock->wait_lock is held so the unlock provides release semantics. */
WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
} }
static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
...@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock) ...@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS); ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
} }
static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock) static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
{ {
unsigned long owner, *p = (unsigned long *) &lock->owner; unsigned long owner, *p = (unsigned long *) &lock->owner;
...@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock) ...@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
* still set. * still set.
*/ */
owner = READ_ONCE(*p); owner = READ_ONCE(*p);
if (owner & RT_MUTEX_HAS_WAITERS) if (owner & RT_MUTEX_HAS_WAITERS) {
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); /*
* See rt_mutex_set_owner() and rt_mutex_clear_owner() on
* why xchg_acquire() is used for updating owner for
* locking and WRITE_ONCE() for unlocking.
*
* WRITE_ONCE() would work for the acquire case too, but
* in case that the lock acquisition failed it might
* force other lockers into the slow path unnecessarily.
*/
if (acquire_lock)
xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
else
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
} }
/* /*
...@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock) ...@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
owner = *p; owner = *p;
} while (cmpxchg_relaxed(p, owner, } while (cmpxchg_relaxed(p, owner,
owner | RT_MUTEX_HAS_WAITERS) != owner); owner | RT_MUTEX_HAS_WAITERS) != owner);
/*
* The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
* operations in the event of contention. Ensure the successful
* cmpxchg is visible.
*/
smp_mb__after_atomic();
} }
/* /*
...@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock) ...@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
* try_to_take_rt_mutex() sets the lock waiters bit * try_to_take_rt_mutex() sets the lock waiters bit
* unconditionally. Clean this up. * unconditionally. Clean this up.
*/ */
fixup_rt_mutex_waiters(lock); fixup_rt_mutex_waiters(lock, true);
return ret; return ret;
} }
...@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock, ...@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit * try_to_take_rt_mutex() sets the waiter bit
* unconditionally. We might have to fix that up. * unconditionally. We might have to fix that up.
*/ */
fixup_rt_mutex_waiters(lock); fixup_rt_mutex_waiters(lock, true);
trace_contention_end(lock, ret); trace_contention_end(lock, ret);
...@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock) ...@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
* try_to_take_rt_mutex() sets the waiter bit unconditionally. * try_to_take_rt_mutex() sets the waiter bit unconditionally.
* We might have to fix that up: * We might have to fix that up:
*/ */
fixup_rt_mutex_waiters(lock); fixup_rt_mutex_waiters(lock, true);
debug_rt_mutex_free_waiter(&waiter); debug_rt_mutex_free_waiter(&waiter);
trace_contention_end(lock, 0); trace_contention_end(lock, 0);
......
...@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, ...@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock) void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
{ {
debug_rt_mutex_proxy_unlock(lock); debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL); rt_mutex_clear_owner(lock);
} }
/** /**
...@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, ...@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up. * have to fix that up.
*/ */
fixup_rt_mutex_waiters(lock); fixup_rt_mutex_waiters(lock, true);
raw_spin_unlock_irq(&lock->wait_lock); raw_spin_unlock_irq(&lock->wait_lock);
return ret; return ret;
...@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, ...@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up. * have to fix that up.
*/ */
fixup_rt_mutex_waiters(lock); fixup_rt_mutex_waiters(lock, false);
raw_spin_unlock_irq(&lock->wait_lock); raw_spin_unlock_irq(&lock->wait_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment