Commit 358c331f authored by Thomas Gleixner's avatar Thomas Gleixner

rtmutex: Simplify and document try_to_take_rtmutex()

The current implementation of try_to_take_rtmutex() is correct, but
requires more than a single brain twist to understand the clever
encoded conditionals.

Untangle it and document the cases proper.

Looks less efficient at the first glance, but actually reduces the
binary code size on x8664 by 80 bytes.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 88f2b4c1
...@@ -533,76 +533,119 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, ...@@ -533,76 +533,119 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* *
* Must be called with lock->wait_lock held. * Must be called with lock->wait_lock held.
* *
* @lock: the lock to be acquired. * @lock: The lock to be acquired.
* @task: the task which wants to acquire the lock * @task: The task which wants to acquire the lock
* @waiter: the waiter that is queued to the lock's wait list. (could be NULL) * @waiter: The waiter that is queued to the lock's wait list if the
* callsite called task_blocked_on_lock(), otherwise NULL
*/ */
static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter) struct rt_mutex_waiter *waiter)
{ {
unsigned long flags;
/* /*
* We have to be careful here if the atomic speedups are * Before testing whether we can acquire @lock, we set the
* enabled, such that, when * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
* - no other waiter is on the lock * other tasks which try to modify @lock into the slow path
* - the lock has been released since we did the cmpxchg * and they serialize on @lock->wait_lock.
* the lock can be released or taken while we are doing the *
* checks and marking the lock with RT_MUTEX_HAS_WAITERS. * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
* as explained at the top of this file if and only if:
* *
* The atomic acquire/release aware variant of * - There is a lock owner. The caller must fixup the
* mark_rt_mutex_waiters uses a cmpxchg loop. After setting * transient state if it does a trylock or leaves the lock
* the WAITERS bit, the atomic release / acquire can not * function due to a signal or timeout.
* happen anymore and lock->wait_lock protects us from the
* non-atomic case.
* *
* Note, that this might set lock->owner = * - @task acquires the lock and there are no other
* RT_MUTEX_HAS_WAITERS in the case the lock is not contended * waiters. This is undone in rt_mutex_set_owner(@task) at
* any more. This is fixed up when we take the ownership. * the end of this function.
* This is the transitional state explained at the top of this file.
*/ */
mark_rt_mutex_waiters(lock); mark_rt_mutex_waiters(lock);
/*
* If @lock has an owner, give up.
*/
if (rt_mutex_owner(lock)) if (rt_mutex_owner(lock))
return 0; return 0;
/* /*
* It will get the lock because of one of these conditions: * If @waiter != NULL, @task has already enqueued the waiter
* 1) there is no waiter * into @lock waiter list. If @waiter == NULL then this is a
* 2) higher priority than waiters * trylock attempt.
* 3) it is top waiter */
if (waiter) {
/*
* If waiter is not the highest priority waiter of
* @lock, give up.
*/
if (waiter != rt_mutex_top_waiter(lock))
return 0;
/*
* We can acquire the lock. Remove the waiter from the
* lock waiters list.
*/
rt_mutex_dequeue(lock, waiter);
} else {
/*
* If the lock has waiters already we check whether @task is
* eligible to take over the lock.
*
* If there are no other waiters, @task can acquire
* the lock. @task->pi_blocked_on is NULL, so it does
* not need to be dequeued.
*/ */
if (rt_mutex_has_waiters(lock)) { if (rt_mutex_has_waiters(lock)) {
if (task->prio >= rt_mutex_top_waiter(lock)->prio) { /*
if (!waiter || waiter != rt_mutex_top_waiter(lock)) * If @task->prio is greater than or equal to
* the top waiter priority (kernel view),
* @task lost.
*/
if (task->prio >= rt_mutex_top_waiter(lock)->prio)
return 0; return 0;
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
* waiters order.
*/
} else {
/*
* No waiters. Take the lock without the
* pi_lock dance.@task->pi_blocked_on is NULL
* and we have no waiters to enqueue in @task
* pi waiters list.
*/
goto takeit;
} }
} }
if (waiter || rt_mutex_has_waiters(lock)) { /*
unsigned long flags; * Clear @task->pi_blocked_on. Requires protection by
struct rt_mutex_waiter *top; * @task->pi_lock. Redundant operation for the @waiter == NULL
* case, but conditionals are more expensive than a redundant
* store.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags); raw_spin_lock_irqsave(&task->pi_lock, flags);
/* remove the queued waiter. */
if (waiter) {
rt_mutex_dequeue(lock, waiter);
task->pi_blocked_on = NULL; task->pi_blocked_on = NULL;
}
/* /*
* We have to enqueue the top waiter(if it exists) into * Finish the lock acquisition. @task is the new owner. If
* task->pi_waiters list. * other waiters exist we have to insert the highest priority
* waiter into @task->pi_waiters list.
*/ */
if (rt_mutex_has_waiters(lock)) { if (rt_mutex_has_waiters(lock))
top = rt_mutex_top_waiter(lock); rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
rt_mutex_enqueue_pi(task, top);
}
raw_spin_unlock_irqrestore(&task->pi_lock, flags); raw_spin_unlock_irqrestore(&task->pi_lock, flags);
}
takeit:
/* We got the lock. */ /* We got the lock. */
debug_rt_mutex_lock(lock); debug_rt_mutex_lock(lock);
/*
* This either preserves the RT_MUTEX_HAS_WAITERS bit if there
* are still waiters or clears it.
*/
rt_mutex_set_owner(lock, task); rt_mutex_set_owner(lock, task);
rt_mutex_deadlock_account_lock(lock, task); rt_mutex_deadlock_account_lock(lock, task);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment