Commit 61270708 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

futex: reduce mmap_sem usage

now that we rely on get_user_pages() for the shared key handling
move all the mmap_sem stuff closely around the slow paths.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarNick Piggin <nickpiggin@yahoo.com.au>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 38d47c1b
...@@ -122,24 +122,6 @@ struct futex_hash_bucket { ...@@ -122,24 +122,6 @@ struct futex_hash_bucket {
static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS]; static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
/*
* Take mm->mmap_sem, when futex is shared
*/
static inline void futex_lock_mm(struct rw_semaphore *fshared)
{
if (fshared)
down_read(fshared);
}
/*
* Release mm->mmap_sem, when the futex is shared
*/
static inline void futex_unlock_mm(struct rw_semaphore *fshared)
{
if (fshared)
up_read(fshared);
}
/* /*
* We hash on the keys returned from get_futex_key (see below). * We hash on the keys returned from get_futex_key (see below).
*/ */
...@@ -250,7 +232,9 @@ static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -250,7 +232,9 @@ static int get_futex_key(u32 __user *uaddr, struct rw_semaphore *fshared,
} }
again: again:
down_read(&mm->mmap_sem);
err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL); err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
up_read(&mm->mmap_sem);
if (err < 0) if (err < 0)
return err; return err;
...@@ -327,7 +311,6 @@ static int futex_handle_fault(unsigned long address, ...@@ -327,7 +311,6 @@ static int futex_handle_fault(unsigned long address,
if (attempt > 2) if (attempt > 2)
return ret; return ret;
if (!fshared)
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
vma = find_vma(mm, address); vma = find_vma(mm, address);
if (vma && address >= vma->vm_start && if (vma && address >= vma->vm_start &&
...@@ -348,7 +331,6 @@ static int futex_handle_fault(unsigned long address, ...@@ -348,7 +331,6 @@ static int futex_handle_fault(unsigned long address,
current->min_flt++; current->min_flt++;
} }
} }
if (!fshared)
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return ret; return ret;
} }
...@@ -719,8 +701,6 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -719,8 +701,6 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
if (!bitset) if (!bitset)
return -EINVAL; return -EINVAL;
futex_lock_mm(fshared);
ret = get_futex_key(uaddr, fshared, &key); ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
...@@ -749,7 +729,6 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -749,7 +729,6 @@ static int futex_wake(u32 __user *uaddr, struct rw_semaphore *fshared,
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
out: out:
put_futex_key(fshared, &key); put_futex_key(fshared, &key);
futex_unlock_mm(fshared);
return ret; return ret;
} }
...@@ -769,8 +748,6 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -769,8 +748,6 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
int ret, op_ret, attempt = 0; int ret, op_ret, attempt = 0;
retryfull: retryfull:
futex_lock_mm(fshared);
ret = get_futex_key(uaddr1, fshared, &key1); ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
...@@ -821,12 +798,6 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -821,12 +798,6 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
goto retry; goto retry;
} }
/*
* If we would have faulted, release mmap_sem,
* fault it in and start all over again.
*/
futex_unlock_mm(fshared);
ret = get_user(dummy, uaddr2); ret = get_user(dummy, uaddr2);
if (ret) if (ret)
return ret; return ret;
...@@ -864,7 +835,6 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -864,7 +835,6 @@ futex_wake_op(u32 __user *uaddr1, struct rw_semaphore *fshared,
out: out:
put_futex_key(fshared, &key2); put_futex_key(fshared, &key2);
put_futex_key(fshared, &key1); put_futex_key(fshared, &key1);
futex_unlock_mm(fshared);
return ret; return ret;
} }
...@@ -884,8 +854,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -884,8 +854,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
int ret, drop_count = 0; int ret, drop_count = 0;
retry: retry:
futex_lock_mm(fshared);
ret = get_futex_key(uaddr1, fshared, &key1); ret = get_futex_key(uaddr1, fshared, &key1);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out; goto out;
...@@ -908,12 +876,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -908,12 +876,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
if (hb1 != hb2) if (hb1 != hb2)
spin_unlock(&hb2->lock); spin_unlock(&hb2->lock);
/*
* If we would have faulted, release mmap_sem, fault
* it in and start all over again.
*/
futex_unlock_mm(fshared);
ret = get_user(curval, uaddr1); ret = get_user(curval, uaddr1);
if (!ret) if (!ret)
...@@ -967,7 +929,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared, ...@@ -967,7 +929,6 @@ static int futex_requeue(u32 __user *uaddr1, struct rw_semaphore *fshared,
out: out:
put_futex_key(fshared, &key2); put_futex_key(fshared, &key2);
put_futex_key(fshared, &key1); put_futex_key(fshared, &key1);
futex_unlock_mm(fshared);
return ret; return ret;
} }
...@@ -1211,8 +1172,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1211,8 +1172,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
q.pi_state = NULL; q.pi_state = NULL;
q.bitset = bitset; q.bitset = bitset;
retry: retry:
futex_lock_mm(fshared);
q.key = FUTEX_KEY_INIT; q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key); ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1245,12 +1204,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1245,12 +1204,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
if (unlikely(ret)) { if (unlikely(ret)) {
queue_unlock(&q, hb); queue_unlock(&q, hb);
/*
* If we would have faulted, release mmap_sem, fault it in and
* start all over again.
*/
futex_unlock_mm(fshared);
ret = get_user(uval, uaddr); ret = get_user(uval, uaddr);
if (!ret) if (!ret)
...@@ -1264,12 +1217,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1264,12 +1217,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
/* Only actually queue if *uaddr contained val. */ /* Only actually queue if *uaddr contained val. */
queue_me(&q, hb); queue_me(&q, hb);
/*
* Now the futex is queued and we have checked the data, we
* don't want to hold mmap_sem while we sleep.
*/
futex_unlock_mm(fshared);
/* /*
* There might have been scheduling since the queue_me(), as we * There might have been scheduling since the queue_me(), as we
* cannot hold a spinlock across the get_user() in case it * cannot hold a spinlock across the get_user() in case it
...@@ -1355,7 +1302,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1355,7 +1302,6 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
out_release_sem: out_release_sem:
put_futex_key(fshared, &q.key); put_futex_key(fshared, &q.key);
futex_unlock_mm(fshared);
return ret; return ret;
} }
...@@ -1404,8 +1350,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1404,8 +1350,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
q.pi_state = NULL; q.pi_state = NULL;
retry: retry:
futex_lock_mm(fshared);
q.key = FUTEX_KEY_INIT; q.key = FUTEX_KEY_INIT;
ret = get_futex_key(uaddr, fshared, &q.key); ret = get_futex_key(uaddr, fshared, &q.key);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1495,7 +1439,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1495,7 +1439,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* exit to complete. * exit to complete.
*/ */
queue_unlock(&q, hb); queue_unlock(&q, hb);
futex_unlock_mm(fshared);
cond_resched(); cond_resched();
goto retry; goto retry;
...@@ -1527,12 +1470,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1527,12 +1470,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
*/ */
queue_me(&q, hb); queue_me(&q, hb);
/*
* Now the futex is queued and we have checked the data, we
* don't want to hold mmap_sem while we sleep.
*/
futex_unlock_mm(fshared);
WARN_ON(!q.pi_state); WARN_ON(!q.pi_state);
/* /*
* Block on the PI mutex: * Block on the PI mutex:
...@@ -1545,7 +1482,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1545,7 +1482,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
ret = ret ? 0 : -EWOULDBLOCK; ret = ret ? 0 : -EWOULDBLOCK;
} }
futex_lock_mm(fshared);
spin_lock(q.lock_ptr); spin_lock(q.lock_ptr);
if (!ret) { if (!ret) {
...@@ -1611,7 +1547,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1611,7 +1547,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
/* Unqueue and drop the lock */ /* Unqueue and drop the lock */
unqueue_me_pi(&q); unqueue_me_pi(&q);
futex_unlock_mm(fshared);
if (to) if (to)
destroy_hrtimer_on_stack(&to->timer); destroy_hrtimer_on_stack(&to->timer);
...@@ -1622,7 +1557,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1622,7 +1557,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
out_release_sem: out_release_sem:
put_futex_key(fshared, &q.key); put_futex_key(fshared, &q.key);
futex_unlock_mm(fshared);
if (to) if (to)
destroy_hrtimer_on_stack(&to->timer); destroy_hrtimer_on_stack(&to->timer);
return ret; return ret;
...@@ -1646,8 +1580,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared, ...@@ -1646,8 +1580,6 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
goto retry_unlocked; goto retry_unlocked;
} }
futex_unlock_mm(fshared);
ret = get_user(uval, uaddr); ret = get_user(uval, uaddr);
if (!ret && (uval != -EFAULT)) if (!ret && (uval != -EFAULT))
goto retry; goto retry;
...@@ -1679,10 +1611,6 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) ...@@ -1679,10 +1611,6 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
*/ */
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
return -EPERM; return -EPERM;
/*
* First take all the futex related locks:
*/
futex_lock_mm(fshared);
ret = get_futex_key(uaddr, fshared, &key); ret = get_futex_key(uaddr, fshared, &key);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -1742,7 +1670,6 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) ...@@ -1742,7 +1670,6 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
out: out:
put_futex_key(fshared, &key); put_futex_key(fshared, &key);
futex_unlock_mm(fshared);
return ret; return ret;
...@@ -1766,8 +1693,6 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared) ...@@ -1766,8 +1693,6 @@ static int futex_unlock_pi(u32 __user *uaddr, struct rw_semaphore *fshared)
goto retry_unlocked; goto retry_unlocked;
} }
futex_unlock_mm(fshared);
ret = get_user(uval, uaddr); ret = get_user(uval, uaddr);
if (!ret && (uval != -EFAULT)) if (!ret && (uval != -EFAULT))
goto retry; goto retry;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment