Commit b80cd62b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-futexes-for-linus' of...

Merge branch 'core-futexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-futexes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  arm: Remove bogus comment in futex_atomic_cmpxchg_inatomic()
  futex: Deobfuscate handle_futex_death()
  plist: Add priority list test
  plist: Shrink struct plist_head
  futex,plist: Remove debug lock assignment from plist_node
  futex,plist: Pass the real head of the priority list to plist_del()
  futex: Sanitize futex ops argument types
  futex: Sanitize cmpxchg_futex_value_locked API
  futex: Remove redundant pagefault_disable in futex_atomic_cmpxchg_inatomic()
  futex: Avoid redudant evaluation of task_pid_vnr()
  futex: Update futex_wait_setup comments about locking
parents c345f60a 07d5ecae
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
: "r" (uaddr), "r"(oparg) \ : "r" (uaddr), "r"(oparg) \
: "memory") : "memory")
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -81,21 +81,23 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -81,21 +81,23 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int prev, cmp; int ret = 0, cmp;
u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
__asm__ __volatile__ ( __asm__ __volatile__ (
__ASM_SMP_MB __ASM_SMP_MB
"1: ldl_l %0,0(%2)\n" "1: ldl_l %1,0(%3)\n"
" cmpeq %0,%3,%1\n" " cmpeq %1,%4,%2\n"
" beq %1,3f\n" " beq %2,3f\n"
" mov %4,%1\n" " mov %5,%2\n"
"2: stl_c %1,0(%2)\n" "2: stl_c %2,0(%3)\n"
" beq %1,4f\n" " beq %2,4f\n"
"3: .subsection 2\n" "3: .subsection 2\n"
"4: br 1b\n" "4: br 1b\n"
" .previous\n" " .previous\n"
...@@ -105,11 +107,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) ...@@ -105,11 +107,12 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .long 2b-.\n" " .long 2b-.\n"
" lda $31,3b-2b(%0)\n" " lda $31,3b-2b(%0)\n"
" .previous\n" " .previous\n"
: "=&r"(prev), "=&r"(cmp) : "+r"(ret), "=&r"(prev), "=&r"(cmp)
: "r"(uaddr), "r"((long)oldval), "r"(newval) : "r"(uaddr), "r"((long)oldval), "r"(newval)
: "memory"); : "memory");
return prev; *uval = prev;
return ret;
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -35,7 +35,7 @@ ...@@ -35,7 +35,7 @@
: "cc", "memory") : "cc", "memory")
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); /* implies preempt_disable() */ pagefault_disable(); /* implies preempt_disable() */
...@@ -88,36 +88,35 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -88,36 +88,35 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int val; int ret = 0;
u32 val;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); /* implies preempt_disable() */
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " T(ldr) " %0, [%3]\n" "1: " T(ldr) " %1, [%4]\n"
" teq %0, %1\n" " teq %1, %2\n"
" it eq @ explicit IT needed for the 2b label\n" " it eq @ explicit IT needed for the 2b label\n"
"2: " T(streq) " %2, [%3]\n" "2: " T(streq) " %3, [%4]\n"
"3:\n" "3:\n"
" .pushsection __ex_table,\"a\"\n" " .pushsection __ex_table,\"a\"\n"
" .align 3\n" " .align 3\n"
" .long 1b, 4f, 2b, 4f\n" " .long 1b, 4f, 2b, 4f\n"
" .popsection\n" " .popsection\n"
" .pushsection .fixup,\"ax\"\n" " .pushsection .fixup,\"ax\"\n"
"4: mov %0, %4\n" "4: mov %0, %5\n"
" b 3b\n" " b 3b\n"
" .popsection" " .popsection"
: "=&r" (val) : "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory"); : "cc", "memory");
pagefault_enable(); /* subsumes preempt_enable() */ *uval = val;
return ret;
return val;
} }
#endif /* !SMP */ #endif /* !SMP */
......
...@@ -7,10 +7,11 @@ ...@@ -7,10 +7,11 @@
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
* the various futex operations; MMU fault checking is ignored under no-MMU * the various futex operations; MMU fault checking is ignored under no-MMU
* conditions * conditions
*/ */
static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
{ {
int oldval, ret; int oldval, ret;
...@@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o ...@@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o
return ret; return ret;
} }
static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
{ {
int oldval, ret; int oldval, ret;
...@@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o ...@@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o
return ret; return ret;
} }
static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
{ {
int oldval, ret; int oldval, ret;
...@@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol ...@@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol
return ret; return ret;
} }
static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
{ {
int oldval, ret; int oldval, ret;
...@@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o ...@@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o
return ret; return ret;
} }
static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
{ {
int oldval, ret; int oldval, ret;
...@@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o ...@@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o
/* /*
* do the futex operations * do the futex operations
*/ */
int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
......
...@@ -46,7 +46,7 @@ do { \ ...@@ -46,7 +46,7 @@ do { \
} while (0) } while (0)
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -100,23 +100,26 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
{ {
register unsigned long r8 __asm ("r8"); register unsigned long r8 __asm ("r8") = 0;
unsigned long prev;
__asm__ __volatile__( __asm__ __volatile__(
" mf;; \n" " mf;; \n"
" mov ar.ccv=%3;; \n" " mov ar.ccv=%3;; \n"
"[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n"
" .xdata4 \"__ex_table\", 1b-., 2f-. \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n"
"[2:]" "[2:]"
: "=r" (r8) : "=r" (prev)
: "r" (uaddr), "r" (newval), : "r" (uaddr), "r" (newval),
"rO" ((long) (unsigned) oldval) "rO" ((long) (unsigned) oldval)
: "memory"); : "memory");
*uval = prev;
return r8; return r8;
} }
} }
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
}) })
static inline int static inline int
futex_atomic_op_inuser(int encoded_op, int __user *uaddr) futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -94,31 +94,34 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -94,31 +94,34 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int prev, cmp; int ret = 0, cmp;
u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
__asm__ __volatile__ ("1: lwx %0, %2, r0; \ __asm__ __volatile__ ("1: lwx %1, %3, r0; \
cmp %1, %0, %3; \ cmp %2, %1, %4; \
beqi %1, 3f; \ beqi %2, 3f; \
2: swx %4, %2, r0; \ 2: swx %5, %3, r0; \
addic %1, r0, 0; \ addic %2, r0, 0; \
bnei %1, 1b; \ bnei %2, 1b; \
3: \ 3: \
.section .fixup,\"ax\"; \ .section .fixup,\"ax\"; \
4: brid 3b; \ 4: brid 3b; \
addik %0, r0, %5; \ addik %0, r0, %6; \
.previous; \ .previous; \
.section __ex_table,\"a\"; \ .section __ex_table,\"a\"; \
.word 1b,4b,2b,4b; \ .word 1b,4b,2b,4b; \
.previous;" \ .previous;" \
: "=&r" (prev), "=&r"(cmp) \ : "+r" (ret), "=&r" (prev), "=&r"(cmp) \
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)); : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT));
return prev; *uval = prev;
return ret;
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -75,7 +75,7 @@ ...@@ -75,7 +75,7 @@
} }
static inline int static inline int
futex_atomic_op_inuser(int encoded_op, int __user *uaddr) futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -132,11 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -132,11 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int retval; int ret = 0;
u32 val;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
if (cpu_has_llsc && R10000_LLSC_WAR) { if (cpu_has_llsc && R10000_LLSC_WAR) {
...@@ -145,25 +147,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) ...@@ -145,25 +147,25 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
" .set mips3 \n" " .set mips3 \n"
"1: ll %0, %2 \n" "1: ll %1, %3 \n"
" bne %0, %z3, 3f \n" " bne %1, %z4, 3f \n"
" .set mips0 \n" " .set mips0 \n"
" move $1, %z4 \n" " move $1, %z5 \n"
" .set mips3 \n" " .set mips3 \n"
"2: sc $1, %1 \n" "2: sc $1, %2 \n"
" beqzl $1, 1b \n" " beqzl $1, 1b \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n" "3: \n"
" .set pop \n" " .set pop \n"
" .section .fixup,\"ax\" \n" " .section .fixup,\"ax\" \n"
"4: li %0, %5 \n" "4: li %0, %6 \n"
" j 3b \n" " j 3b \n"
" .previous \n" " .previous \n"
" .section __ex_table,\"a\" \n" " .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n" " "__UA_ADDR "\t2b, 4b \n"
" .previous \n" " .previous \n"
: "=&r" (retval), "=R" (*uaddr) : "+r" (ret), "=&r" (val), "=R" (*uaddr)
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
: "memory"); : "memory");
} else if (cpu_has_llsc) { } else if (cpu_has_llsc) {
...@@ -172,31 +174,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) ...@@ -172,31 +174,32 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
" .set mips3 \n" " .set mips3 \n"
"1: ll %0, %2 \n" "1: ll %1, %3 \n"
" bne %0, %z3, 3f \n" " bne %1, %z4, 3f \n"
" .set mips0 \n" " .set mips0 \n"
" move $1, %z4 \n" " move $1, %z5 \n"
" .set mips3 \n" " .set mips3 \n"
"2: sc $1, %1 \n" "2: sc $1, %2 \n"
" beqz $1, 1b \n" " beqz $1, 1b \n"
__WEAK_LLSC_MB __WEAK_LLSC_MB
"3: \n" "3: \n"
" .set pop \n" " .set pop \n"
" .section .fixup,\"ax\" \n" " .section .fixup,\"ax\" \n"
"4: li %0, %5 \n" "4: li %0, %6 \n"
" j 3b \n" " j 3b \n"
" .previous \n" " .previous \n"
" .section __ex_table,\"a\" \n" " .section __ex_table,\"a\" \n"
" "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t1b, 4b \n"
" "__UA_ADDR "\t2b, 4b \n" " "__UA_ADDR "\t2b, 4b \n"
" .previous \n" " .previous \n"
: "=&r" (retval), "=R" (*uaddr) : "+r" (ret), "=&r" (val), "=R" (*uaddr)
: "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
: "memory"); : "memory");
} else } else
return -ENOSYS; return -ENOSYS;
return retval; *uval = val;
return ret;
} }
#endif #endif
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <asm/errno.h> #include <asm/errno.h>
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
/* Non-atomic version */ /* Non-atomic version */
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int err = 0; u32 val;
int uval;
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
* our gateway page, and causes no end of trouble... * our gateway page, and causes no end of trouble...
...@@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) ...@@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
return -EFAULT; return -EFAULT;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
err = get_user(uval, uaddr); if (get_user(val, uaddr))
if (err) return -EFAULT; return -EFAULT;
if (uval == oldval) if (val == oldval && put_user(newval, uaddr))
err = put_user(newval, uaddr); return -EFAULT;
if (err) return -EFAULT; *uval = val;
return uval; return 0;
} }
#endif /*__KERNEL__*/ #endif /*__KERNEL__*/
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
: "cr0", "memory") : "cr0", "memory")
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int prev; int ret = 0;
u32 prev;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
__asm__ __volatile__ ( __asm__ __volatile__ (
PPC_RELEASE_BARRIER PPC_RELEASE_BARRIER
"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
cmpw 0,%0,%3\n\ cmpw 0,%1,%4\n\
bne- 3f\n" bne- 3f\n"
PPC405_ERR77(0,%2) PPC405_ERR77(0,%3)
"2: stwcx. %4,0,%2\n\ "2: stwcx. %5,0,%3\n\
bne- 1b\n" bne- 1b\n"
PPC_ACQUIRE_BARRIER PPC_ACQUIRE_BARRIER
"3: .section .fixup,\"ax\"\n\ "3: .section .fixup,\"ax\"\n\
4: li %0,%5\n\ 4: li %0,%6\n\
b 3b\n\ b 3b\n\
.previous\n\ .previous\n\
.section __ex_table,\"a\"\n\ .section __ex_table,\"a\"\n\
.align 3\n\ .align 3\n\
" PPC_LONG "1b,4b,2b,4b\n\ " PPC_LONG "1b,4b,2b,4b\n\
.previous" \ .previous" \
: "=&r" (prev), "+m" (*uaddr) : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
: "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
: "cc", "memory"); : "cc", "memory");
return prev; *uval = prev;
return ret;
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/errno.h> #include <asm/errno.h>
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
return ret; return ret;
} }
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int oldval, int newval) u32 oldval, u32 newval)
{ {
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -83,8 +83,8 @@ struct uaccess_ops { ...@@ -83,8 +83,8 @@ struct uaccess_ops {
size_t (*clear_user)(size_t, void __user *); size_t (*clear_user)(size_t, void __user *);
size_t (*strnlen_user)(size_t, const char __user *); size_t (*strnlen_user)(size_t, const char __user *);
size_t (*strncpy_from_user)(size_t, const char __user *, char *); size_t (*strncpy_from_user)(size_t, const char __user *, char *);
int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
int (*futex_atomic_cmpxchg)(int __user *, int old, int new); int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
}; };
extern struct uaccess_ops uaccess; extern struct uaccess_ops uaccess;
......
...@@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *); ...@@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *);
extern size_t copy_to_user_std(size_t, void __user *, const void *); extern size_t copy_to_user_std(size_t, void __user *, const void *);
extern size_t strnlen_user_std(size_t, const char __user *); extern size_t strnlen_user_std(size_t, const char __user *);
extern size_t strncpy_from_user_std(size_t, const char __user *, char *); extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
extern int futex_atomic_cmpxchg_std(int __user *, int, int); extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32);
extern int futex_atomic_op_std(int, int __user *, int, int *); extern int futex_atomic_op_std(int, u32 __user *, int, int *);
extern size_t copy_from_user_pt(size_t, const void __user *, void *); extern size_t copy_from_user_pt(size_t, const void __user *, void *);
extern size_t copy_to_user_pt(size_t, void __user *, const void *); extern size_t copy_to_user_pt(size_t, void __user *, const void *);
extern int futex_atomic_op_pt(int, int __user *, int, int *); extern int futex_atomic_op_pt(int, u32 __user *, int, int *);
extern int futex_atomic_cmpxchg_pt(int __user *, int, int); extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32);
#endif /* __ARCH_S390_LIB_UACCESS_H */ #endif /* __ARCH_S390_LIB_UACCESS_H */
...@@ -302,7 +302,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to, ...@@ -302,7 +302,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc" ); "m" (*uaddr) : "cc" );
static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
{ {
int oldval = 0, newval, ret; int oldval = 0, newval, ret;
...@@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) ...@@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
return ret; return ret;
} }
int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
{ {
int ret; int ret;
...@@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) ...@@ -354,26 +354,29 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
return ret; return ret;
} }
static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int ret; int ret;
asm volatile("0: cs %1,%4,0(%5)\n" asm volatile("0: cs %1,%4,0(%5)\n"
"1: lr %0,%1\n" "1: la %0,0\n"
"2:\n" "2:\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b) EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory" ); : "cc", "memory" );
*uval = oldval;
return ret; return ret;
} }
int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int ret; int ret;
if (segment_eq(get_fs(), KERNEL_DS)) if (segment_eq(get_fs(), KERNEL_DS))
return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
spin_lock(&current->mm->page_table_lock); spin_lock(&current->mm->page_table_lock);
uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr);
if (!uaddr) { if (!uaddr) {
...@@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) ...@@ -382,7 +385,7 @@ int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval)
} }
get_page(virt_to_page(uaddr)); get_page(virt_to_page(uaddr));
spin_unlock(&current->mm->page_table_lock); spin_unlock(&current->mm->page_table_lock);
ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
put_page(virt_to_page(uaddr)); put_page(virt_to_page(uaddr));
return ret; return ret;
} }
......
...@@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) ...@@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc"); "m" (*uaddr) : "cc");
int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
{ {
int oldval = 0, newval, ret; int oldval = 0, newval, ret;
...@@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) ...@@ -287,19 +287,21 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
return ret; return ret;
} }
int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int ret; int ret;
asm volatile( asm volatile(
" sacf 256\n" " sacf 256\n"
"0: cs %1,%4,0(%5)\n" "0: cs %1,%4,0(%5)\n"
"1: lr %0,%1\n" "1: la %0,0\n"
"2: sacf 0\n" "2: sacf 0\n"
EX_TABLE(0b,2b) EX_TABLE(1b,2b) EX_TABLE(0b,2b) EX_TABLE(1b,2b)
: "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
: "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
: "cc", "memory" ); : "cc", "memory" );
*uval = oldval;
return ret; return ret;
} }
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#include <asm/system.h> #include <asm/system.h>
static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
int *oldval) int *oldval)
{ {
unsigned long flags; unsigned long flags;
...@@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, ...@@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
return ret; return ret;
} }
static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
int *oldval) int *oldval)
{ {
unsigned long flags; unsigned long flags;
...@@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, ...@@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
return ret; return ret;
} }
static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
int *oldval) int *oldval)
{ {
unsigned long flags; unsigned long flags;
...@@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, ...@@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
return ret; return ret;
} }
static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
int *oldval) int *oldval)
{ {
unsigned long flags; unsigned long flags;
...@@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, ...@@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
return ret; return ret;
} }
static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
int *oldval) int *oldval)
{ {
unsigned long flags; unsigned long flags;
...@@ -88,11 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, ...@@ -88,11 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
return ret; return ret;
} }
static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
int oldval, int newval) u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
unsigned long flags; unsigned long flags;
int ret, prev = 0; int ret;
u32 prev = 0;
local_irq_save(flags); local_irq_save(flags);
...@@ -102,10 +104,8 @@ static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, ...@@ -102,10 +104,8 @@ static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr,
local_irq_restore(flags); local_irq_restore(flags);
if (ret) *uval = prev;
return ret; return ret;
return prev;
} }
#endif /* __ASM_SH_FUTEX_IRQ_H */ #endif /* __ASM_SH_FUTEX_IRQ_H */
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
/* XXX: UP variants, fix for SH-4A and SMP.. */ /* XXX: UP variants, fix for SH-4A and SMP.. */
#include <asm/futex-irq.h> #include <asm/futex-irq.h>
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -65,12 +65,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -65,12 +65,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval); return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "memory") : "memory")
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
int cmparg = (encoded_op << 20) >> 20; int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem; int oldval = 0, ret, tem;
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
return -EFAULT; return -EFAULT;
if (unlikely((((unsigned long) uaddr) & 0x3UL))) if (unlikely((((unsigned long) uaddr) & 0x3UL)))
return -EINVAL; return -EINVAL;
...@@ -85,26 +85,30 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -85,26 +85,30 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
int ret = 0;
__asm__ __volatile__( __asm__ __volatile__(
"\n1: casa [%3] %%asi, %2, %0\n" "\n1: casa [%4] %%asi, %3, %1\n"
"2:\n" "2:\n"
" .section .fixup,#alloc,#execinstr\n" " .section .fixup,#alloc,#execinstr\n"
" .align 4\n" " .align 4\n"
"3: sethi %%hi(2b), %0\n" "3: sethi %%hi(2b), %0\n"
" jmpl %0 + %%lo(2b), %%g0\n" " jmpl %0 + %%lo(2b), %%g0\n"
" mov %4, %0\n" " mov %5, %0\n"
" .previous\n" " .previous\n"
" .section __ex_table,\"a\"\n" " .section __ex_table,\"a\"\n"
" .align 4\n" " .align 4\n"
" .word 1b, 3b\n" " .word 1b, 3b\n"
" .previous\n" " .previous\n"
: "=r" (newval) : "+r" (ret), "=r" (newval)
: "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT)
: "memory"); : "memory");
return newval; *uval = newval;
return ret;
} }
#endif /* !(_SPARC64_FUTEX_H) */ #endif /* !(_SPARC64_FUTEX_H) */
...@@ -29,16 +29,16 @@ ...@@ -29,16 +29,16 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/errno.h> #include <linux/errno.h>
extern struct __get_user futex_set(int __user *v, int i); extern struct __get_user futex_set(u32 __user *v, int i);
extern struct __get_user futex_add(int __user *v, int n); extern struct __get_user futex_add(u32 __user *v, int n);
extern struct __get_user futex_or(int __user *v, int n); extern struct __get_user futex_or(u32 __user *v, int n);
extern struct __get_user futex_andn(int __user *v, int n); extern struct __get_user futex_andn(u32 __user *v, int n);
extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
#ifndef __tilegx__ #ifndef __tilegx__
extern struct __get_user futex_xor(int __user *v, int n); extern struct __get_user futex_xor(u32 __user *v, int n);
#else #else
static inline struct __get_user futex_xor(int __user *uaddr, int n) static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
{ {
struct __get_user asm_ret = __get_user_4(uaddr); struct __get_user asm_ret = __get_user_4(uaddr);
if (!asm_ret.err) { if (!asm_ret.err) {
...@@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n) ...@@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n)
} }
#endif #endif
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -119,16 +119,17 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
return ret; return ret;
} }
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int newval) u32 oldval, u32 newval)
{ {
struct __get_user asm_ret; struct __get_user asm_ret;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
asm_ret = futex_cmpxchg(uaddr, oldval, newval); asm_ret = futex_cmpxchg(uaddr, oldval, newval);
return asm_ret.err ? asm_ret.err : asm_ret.val; *uval = asm_ret.val;
return asm_ret.err;
} }
#ifndef __tilegx__ #ifndef __tilegx__
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
"+m" (*uaddr), "=&r" (tem) \ "+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0)) : "r" (oparg), "i" (-EFAULT), "1" (0))
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
...@@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ...@@ -109,9 +109,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
return ret; return ret;
} }
static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
int newval) u32 oldval, u32 newval)
{ {
int ret = 0;
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
/* Real i386 machines have no cmpxchg instruction */ /* Real i386 machines have no cmpxchg instruction */
...@@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, ...@@ -119,21 +120,22 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
return -ENOSYS; return -ENOSYS;
#endif #endif
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
"2:\t.section .fixup, \"ax\"\n" "2:\t.section .fixup, \"ax\"\n"
"3:\tmov %2, %0\n" "3:\tmov %3, %0\n"
"\tjmp 2b\n" "\tjmp 2b\n"
"\t.previous\n" "\t.previous\n"
_ASM_EXTABLE(1b, 3b) _ASM_EXTABLE(1b, 3b)
: "=a" (oldval), "+m" (*uaddr) : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval) : "i" (-EFAULT), "r" (newval), "1" (oldval)
: "memory" : "memory"
); );
return oldval; *uval = oldval;
return ret;
} }
#endif #endif
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <asm/errno.h> #include <asm/errno.h>
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
{ {
int op = (encoded_op >> 28) & 7; int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15; int cmp = (encoded_op >> 24) & 15;
...@@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg; oparg = 1 << oparg;
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT; return -EFAULT;
pagefault_disable(); pagefault_disable();
...@@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -48,7 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
} }
static inline int static inline int
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{ {
return -ENOSYS; return -ENOSYS;
} }
......
...@@ -31,15 +31,17 @@ ...@@ -31,15 +31,17 @@
* *
* Simple ASCII art explanation: * Simple ASCII art explanation:
* *
* |HEAD | * pl:prio_list (only for plist_node)
* | | * nl:node_list
* |prio_list.prev|<------------------------------------| * HEAD| NODE(S)
* |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-| * |
* |10 | |10| |21| |21| |21| |40| (prio) * ||------------------------------------|
* | | | | | | | | | | | | * ||->|pl|<->|pl|<--------------->|pl|<-|
* | | | | | | | | | | | | * | |10| |21| |21| |21| |40| (prio)
* |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| * | | | | | | | | | | |
* |node_list.prev|<------------------------------------| * | | | | | | | | | | |
* |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
* |-------------------------------------------|
* *
* The nodes on the prio_list list are sorted by priority to simplify * The nodes on the prio_list list are sorted by priority to simplify
* the insertion of new nodes. There are no nodes with duplicate * the insertion of new nodes. There are no nodes with duplicate
...@@ -78,7 +80,6 @@ ...@@ -78,7 +80,6 @@
#include <linux/spinlock_types.h> #include <linux/spinlock_types.h>
struct plist_head { struct plist_head {
struct list_head prio_list;
struct list_head node_list; struct list_head node_list;
#ifdef CONFIG_DEBUG_PI_LIST #ifdef CONFIG_DEBUG_PI_LIST
raw_spinlock_t *rawlock; raw_spinlock_t *rawlock;
...@@ -88,7 +89,8 @@ struct plist_head { ...@@ -88,7 +89,8 @@ struct plist_head {
struct plist_node { struct plist_node {
int prio; int prio;
struct plist_head plist; struct list_head prio_list;
struct list_head node_list;
}; };
#ifdef CONFIG_DEBUG_PI_LIST #ifdef CONFIG_DEBUG_PI_LIST
...@@ -100,7 +102,6 @@ struct plist_node { ...@@ -100,7 +102,6 @@ struct plist_node {
#endif #endif
#define _PLIST_HEAD_INIT(head) \ #define _PLIST_HEAD_INIT(head) \
.prio_list = LIST_HEAD_INIT((head).prio_list), \
.node_list = LIST_HEAD_INIT((head).node_list) .node_list = LIST_HEAD_INIT((head).node_list)
/** /**
...@@ -133,7 +134,8 @@ struct plist_node { ...@@ -133,7 +134,8 @@ struct plist_node {
#define PLIST_NODE_INIT(node, __prio) \ #define PLIST_NODE_INIT(node, __prio) \
{ \ { \
.prio = (__prio), \ .prio = (__prio), \
.plist = { _PLIST_HEAD_INIT((node).plist) }, \ .prio_list = LIST_HEAD_INIT((node).prio_list), \
.node_list = LIST_HEAD_INIT((node).node_list), \
} }
/** /**
...@@ -144,7 +146,6 @@ struct plist_node { ...@@ -144,7 +146,6 @@ struct plist_node {
static inline void static inline void
plist_head_init(struct plist_head *head, spinlock_t *lock) plist_head_init(struct plist_head *head, spinlock_t *lock)
{ {
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list); INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST #ifdef CONFIG_DEBUG_PI_LIST
head->spinlock = lock; head->spinlock = lock;
...@@ -160,7 +161,6 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) ...@@ -160,7 +161,6 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
static inline void static inline void
plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
{ {
INIT_LIST_HEAD(&head->prio_list);
INIT_LIST_HEAD(&head->node_list); INIT_LIST_HEAD(&head->node_list);
#ifdef CONFIG_DEBUG_PI_LIST #ifdef CONFIG_DEBUG_PI_LIST
head->rawlock = lock; head->rawlock = lock;
...@@ -176,7 +176,8 @@ plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) ...@@ -176,7 +176,8 @@ plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
static inline void plist_node_init(struct plist_node *node, int prio) static inline void plist_node_init(struct plist_node *node, int prio)
{ {
node->prio = prio; node->prio = prio;
plist_head_init(&node->plist, NULL); INIT_LIST_HEAD(&node->prio_list);
INIT_LIST_HEAD(&node->node_list);
} }
extern void plist_add(struct plist_node *node, struct plist_head *head); extern void plist_add(struct plist_node *node, struct plist_head *head);
...@@ -188,7 +189,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); ...@@ -188,7 +189,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
* @head: the head for your list * @head: the head for your list
*/ */
#define plist_for_each(pos, head) \ #define plist_for_each(pos, head) \
list_for_each_entry(pos, &(head)->node_list, plist.node_list) list_for_each_entry(pos, &(head)->node_list, node_list)
/** /**
* plist_for_each_safe - iterate safely over a plist of given type * plist_for_each_safe - iterate safely over a plist of given type
...@@ -199,7 +200,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); ...@@ -199,7 +200,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
* Iterate over a plist of given type, safe against removal of list entry. * Iterate over a plist of given type, safe against removal of list entry.
*/ */
#define plist_for_each_safe(pos, n, head) \ #define plist_for_each_safe(pos, n, head) \
list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list) list_for_each_entry_safe(pos, n, &(head)->node_list, node_list)
/** /**
* plist_for_each_entry - iterate over list of given type * plist_for_each_entry - iterate over list of given type
...@@ -208,7 +209,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); ...@@ -208,7 +209,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
* @mem: the name of the list_struct within the struct * @mem: the name of the list_struct within the struct
*/ */
#define plist_for_each_entry(pos, head, mem) \ #define plist_for_each_entry(pos, head, mem) \
list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list) list_for_each_entry(pos, &(head)->node_list, mem.node_list)
/** /**
* plist_for_each_entry_safe - iterate safely over list of given type * plist_for_each_entry_safe - iterate safely over list of given type
...@@ -220,7 +221,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head); ...@@ -220,7 +221,7 @@ extern void plist_del(struct plist_node *node, struct plist_head *head);
* Iterate over list of given type, safe against removal of list entry. * Iterate over list of given type, safe against removal of list entry.
*/ */
#define plist_for_each_entry_safe(pos, n, head, m) \ #define plist_for_each_entry_safe(pos, n, head, m) \
list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list) list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list)
/** /**
* plist_head_empty - return !0 if a plist_head is empty * plist_head_empty - return !0 if a plist_head is empty
...@@ -237,7 +238,7 @@ static inline int plist_head_empty(const struct plist_head *head) ...@@ -237,7 +238,7 @@ static inline int plist_head_empty(const struct plist_head *head)
*/ */
static inline int plist_node_empty(const struct plist_node *node) static inline int plist_node_empty(const struct plist_node *node)
{ {
return plist_head_empty(&node->plist); return list_empty(&node->node_list);
} }
/* All functions below assume the plist_head is not empty. */ /* All functions below assume the plist_head is not empty. */
...@@ -285,7 +286,7 @@ static inline int plist_node_empty(const struct plist_node *node) ...@@ -285,7 +286,7 @@ static inline int plist_node_empty(const struct plist_node *node)
static inline struct plist_node *plist_first(const struct plist_head *head) static inline struct plist_node *plist_first(const struct plist_head *head)
{ {
return list_entry(head->node_list.next, return list_entry(head->node_list.next,
struct plist_node, plist.node_list); struct plist_node, node_list);
} }
/** /**
...@@ -297,7 +298,7 @@ static inline struct plist_node *plist_first(const struct plist_head *head) ...@@ -297,7 +298,7 @@ static inline struct plist_node *plist_first(const struct plist_head *head)
static inline struct plist_node *plist_last(const struct plist_head *head) static inline struct plist_node *plist_last(const struct plist_head *head)
{ {
return list_entry(head->node_list.prev, return list_entry(head->node_list.prev,
struct plist_node, plist.node_list); struct plist_node, node_list);
} }
#endif #endif
...@@ -381,15 +381,16 @@ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, ...@@ -381,15 +381,16 @@ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
return NULL; return NULL;
} }
static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
u32 uval, u32 newval)
{ {
u32 curval; int ret;
pagefault_disable(); pagefault_disable();
curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
pagefault_enable(); pagefault_enable();
return curval; return ret;
} }
static int get_futex_value_locked(u32 *dest, u32 __user *from) static int get_futex_value_locked(u32 *dest, u32 __user *from)
...@@ -674,7 +675,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, ...@@ -674,7 +675,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
struct task_struct *task, int set_waiters) struct task_struct *task, int set_waiters)
{ {
int lock_taken, ret, ownerdied = 0; int lock_taken, ret, ownerdied = 0;
u32 uval, newval, curval; u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry: retry:
ret = lock_taken = 0; ret = lock_taken = 0;
...@@ -684,19 +685,17 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, ...@@ -684,19 +685,17 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
* (by doing a 0 -> TID atomic cmpxchg), while holding all * (by doing a 0 -> TID atomic cmpxchg), while holding all
* the locks. It will most likely not succeed. * the locks. It will most likely not succeed.
*/ */
newval = task_pid_vnr(task); newval = vpid;
if (set_waiters) if (set_waiters)
newval |= FUTEX_WAITERS; newval |= FUTEX_WAITERS;
curval = cmpxchg_futex_value_locked(uaddr, 0, newval); if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
if (unlikely(curval == -EFAULT))
return -EFAULT; return -EFAULT;
/* /*
* Detect deadlocks. * Detect deadlocks.
*/ */
if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task)))) if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
return -EDEADLK; return -EDEADLK;
/* /*
...@@ -723,14 +722,12 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, ...@@ -723,14 +722,12 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
*/ */
if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
/* Keep the OWNER_DIED bit */ /* Keep the OWNER_DIED bit */
newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task); newval = (curval & ~FUTEX_TID_MASK) | vpid;
ownerdied = 0; ownerdied = 0;
lock_taken = 1; lock_taken = 1;
} }
curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
if (unlikely(curval == -EFAULT))
return -EFAULT; return -EFAULT;
if (unlikely(curval != uval)) if (unlikely(curval != uval))
goto retry; goto retry;
...@@ -775,6 +772,24 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, ...@@ -775,6 +772,24 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
return ret; return ret;
} }
/**
* __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
* @q: The futex_q to unqueue
*
* The q->lock_ptr must not be NULL and must be held by the caller.
*/
static void __unqueue_futex(struct futex_q *q)
{
struct futex_hash_bucket *hb;
if (WARN_ON(!q->lock_ptr || !spin_is_locked(q->lock_ptr)
|| plist_node_empty(&q->list)))
return;
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
}
/* /*
* The hash bucket lock must be held when this is called. * The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed. * Afterwards, the futex_q must not be accessed.
...@@ -792,7 +807,7 @@ static void wake_futex(struct futex_q *q) ...@@ -792,7 +807,7 @@ static void wake_futex(struct futex_q *q)
*/ */
get_task_struct(p); get_task_struct(p);
plist_del(&q->list, &q->list.plist); __unqueue_futex(q);
/* /*
* The waiting task can free the futex_q as soon as * The waiting task can free the futex_q as soon as
* q->lock_ptr = NULL is written, without taking any locks. A * q->lock_ptr = NULL is written, without taking any locks. A
...@@ -843,9 +858,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) ...@@ -843,9 +858,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
newval = FUTEX_WAITERS | task_pid_vnr(new_owner); newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
if (curval == -EFAULT)
ret = -EFAULT; ret = -EFAULT;
else if (curval != uval) else if (curval != uval)
ret = -EINVAL; ret = -EINVAL;
...@@ -880,10 +893,8 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval) ...@@ -880,10 +893,8 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
* There is no waiter, so we unlock the futex. The owner died * There is no waiter, so we unlock the futex. The owner died
* bit has not to be preserved here. We are the owner: * bit has not to be preserved here. We are the owner:
*/ */
oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
return -EFAULT;
if (oldval == -EFAULT)
return oldval;
if (oldval != uval) if (oldval != uval)
return -EAGAIN; return -EAGAIN;
...@@ -1071,9 +1082,6 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, ...@@ -1071,9 +1082,6 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
plist_del(&q->list, &hb1->chain); plist_del(&q->list, &hb1->chain);
plist_add(&q->list, &hb2->chain); plist_add(&q->list, &hb2->chain);
q->lock_ptr = &hb2->lock; q->lock_ptr = &hb2->lock;
#ifdef CONFIG_DEBUG_PI_LIST
q->list.plist.spinlock = &hb2->lock;
#endif
} }
get_futex_key_refs(key2); get_futex_key_refs(key2);
q->key = *key2; q->key = *key2;
...@@ -1100,16 +1108,12 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, ...@@ -1100,16 +1108,12 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
get_futex_key_refs(key); get_futex_key_refs(key);
q->key = *key; q->key = *key;
WARN_ON(plist_node_empty(&q->list)); __unqueue_futex(q);
plist_del(&q->list, &q->list.plist);
WARN_ON(!q->rt_waiter); WARN_ON(!q->rt_waiter);
q->rt_waiter = NULL; q->rt_waiter = NULL;
q->lock_ptr = &hb->lock; q->lock_ptr = &hb->lock;
#ifdef CONFIG_DEBUG_PI_LIST
q->list.plist.spinlock = &hb->lock;
#endif
wake_up_state(q->task, TASK_NORMAL); wake_up_state(q->task, TASK_NORMAL);
} }
...@@ -1457,9 +1461,6 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) ...@@ -1457,9 +1461,6 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
prio = min(current->normal_prio, MAX_RT_PRIO); prio = min(current->normal_prio, MAX_RT_PRIO);
plist_node_init(&q->list, prio); plist_node_init(&q->list, prio);
#ifdef CONFIG_DEBUG_PI_LIST
q->list.plist.spinlock = &hb->lock;
#endif
plist_add(&q->list, &hb->chain); plist_add(&q->list, &hb->chain);
q->task = current; q->task = current;
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
...@@ -1504,8 +1505,7 @@ static int unqueue_me(struct futex_q *q) ...@@ -1504,8 +1505,7 @@ static int unqueue_me(struct futex_q *q)
spin_unlock(lock_ptr); spin_unlock(lock_ptr);
goto retry; goto retry;
} }
WARN_ON(plist_node_empty(&q->list)); __unqueue_futex(q);
plist_del(&q->list, &q->list.plist);
BUG_ON(q->pi_state); BUG_ON(q->pi_state);
...@@ -1525,8 +1525,7 @@ static int unqueue_me(struct futex_q *q) ...@@ -1525,8 +1525,7 @@ static int unqueue_me(struct futex_q *q)
static void unqueue_me_pi(struct futex_q *q) static void unqueue_me_pi(struct futex_q *q)
__releases(q->lock_ptr) __releases(q->lock_ptr)
{ {
WARN_ON(plist_node_empty(&q->list)); __unqueue_futex(q);
plist_del(&q->list, &q->list.plist);
BUG_ON(!q->pi_state); BUG_ON(!q->pi_state);
free_pi_state(q->pi_state); free_pi_state(q->pi_state);
...@@ -1578,9 +1577,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, ...@@ -1578,9 +1577,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
while (1) { while (1) {
newval = (uval & FUTEX_OWNER_DIED) | newtid; newval = (uval & FUTEX_OWNER_DIED) | newtid;
curval = cmpxchg_futex_value_locked(uaddr, uval, newval); if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
if (curval == -EFAULT)
goto handle_fault; goto handle_fault;
if (curval == uval) if (curval == uval)
break; break;
...@@ -1781,13 +1778,14 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, ...@@ -1781,13 +1778,14 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
* *
* The basic logical guarantee of a futex is that it blocks ONLY * The basic logical guarantee of a futex is that it blocks ONLY
* if cond(var) is known to be true at the time of blocking, for * if cond(var) is known to be true at the time of blocking, for
* any cond. If we queued after testing *uaddr, that would open * any cond. If we locked the hash-bucket after testing *uaddr, that
* a race condition where we could block indefinitely with * would open a race condition where we could block indefinitely with
* cond(var) false, which would violate the guarantee. * cond(var) false, which would violate the guarantee.
* *
* A consequence is that futex_wait() can return zero and absorb * On the other hand, we insert q and release the hash-bucket only
* a wakeup when *uaddr != val on entry to the syscall. This is * after testing *uaddr. This guarantees that futex_wait() will NOT
* rare, but normal. * absorb a wakeup if *uaddr does not match the desired values
* while the syscall executes.
*/ */
retry: retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key); ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key);
...@@ -2046,9 +2044,9 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) ...@@ -2046,9 +2044,9 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
{ {
struct futex_hash_bucket *hb; struct futex_hash_bucket *hb;
struct futex_q *this, *next; struct futex_q *this, *next;
u32 uval;
struct plist_head *head; struct plist_head *head;
union futex_key key = FUTEX_KEY_INIT; union futex_key key = FUTEX_KEY_INIT;
u32 uval, vpid = task_pid_vnr(current);
int ret; int ret;
retry: retry:
...@@ -2057,7 +2055,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) ...@@ -2057,7 +2055,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
/* /*
* We release only a lock we actually own: * We release only a lock we actually own:
*/ */
if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) if ((uval & FUTEX_TID_MASK) != vpid)
return -EPERM; return -EPERM;
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key);
...@@ -2072,17 +2070,14 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) ...@@ -2072,17 +2070,14 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* again. If it succeeds then we can return without waking * again. If it succeeds then we can return without waking
* anyone else up: * anyone else up:
*/ */
if (!(uval & FUTEX_OWNER_DIED)) if (!(uval & FUTEX_OWNER_DIED) &&
uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
if (unlikely(uval == -EFAULT))
goto pi_faulted; goto pi_faulted;
/* /*
* Rare case: we managed to release the lock atomically, * Rare case: we managed to release the lock atomically,
* no need to wake anyone else up: * no need to wake anyone else up:
*/ */
if (unlikely(uval == task_pid_vnr(current))) if (unlikely(uval == vpid))
goto out_unlock; goto out_unlock;
/* /*
...@@ -2167,7 +2162,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, ...@@ -2167,7 +2162,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* We were woken prior to requeue by a timeout or a signal. * We were woken prior to requeue by a timeout or a signal.
* Unqueue the futex_q and determine which it was. * Unqueue the futex_q and determine which it was.
*/ */
plist_del(&q->list, &q->list.plist); plist_del(&q->list, &hb->chain);
/* Handle spurious wakeups gracefully */ /* Handle spurious wakeups gracefully */
ret = -EWOULDBLOCK; ret = -EWOULDBLOCK;
...@@ -2463,11 +2458,20 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) ...@@ -2463,11 +2458,20 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
* userspace. * userspace.
*/ */
mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); /*
* We are not holding a lock here, but we want to have
if (nval == -EFAULT) * the pagefault_disable/enable() protection because
return -1; * we want to handle the fault gracefully. If the
* access fails we try to fault in the futex with R/W
* verification via get_user_pages. get_user() above
* does not guarantee R/W access. If that fails we
* give up and leave the futex locked.
*/
if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
if (fault_in_user_writeable(uaddr))
return -1;
goto retry;
}
if (nval != uval) if (nval != uval)
goto retry; goto retry;
...@@ -2678,8 +2682,7 @@ static int __init futex_init(void) ...@@ -2678,8 +2682,7 @@ static int __init futex_init(void)
* implementation, the non-functional ones will return * implementation, the non-functional ones will return
* -ENOSYS. * -ENOSYS.
*/ */
curval = cmpxchg_futex_value_locked(NULL, 0, 0); if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
if (curval == -EFAULT)
futex_cmpxchg_enabled = 1; futex_cmpxchg_enabled = 1;
for (i = 0; i < ARRAY_SIZE(futex_queues); i++) { for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#ifdef CONFIG_DEBUG_PI_LIST #ifdef CONFIG_DEBUG_PI_LIST
static struct plist_head test_head;
static void plist_check_prev_next(struct list_head *t, struct list_head *p, static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n) struct list_head *n)
{ {
...@@ -54,12 +56,13 @@ static void plist_check_list(struct list_head *top) ...@@ -54,12 +56,13 @@ static void plist_check_list(struct list_head *top)
static void plist_check_head(struct plist_head *head) static void plist_check_head(struct plist_head *head)
{ {
WARN_ON(!head->rawlock && !head->spinlock); WARN_ON(head != &test_head && !head->rawlock && !head->spinlock);
if (head->rawlock) if (head->rawlock)
WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); WARN_ON_SMP(!raw_spin_is_locked(head->rawlock));
if (head->spinlock) if (head->spinlock)
WARN_ON_SMP(!spin_is_locked(head->spinlock)); WARN_ON_SMP(!spin_is_locked(head->spinlock));
plist_check_list(&head->prio_list); if (!plist_head_empty(head))
plist_check_list(&plist_first(head)->prio_list);
plist_check_list(&head->node_list); plist_check_list(&head->node_list);
} }
...@@ -75,25 +78,33 @@ static void plist_check_head(struct plist_head *head) ...@@ -75,25 +78,33 @@ static void plist_check_head(struct plist_head *head)
*/ */
void plist_add(struct plist_node *node, struct plist_head *head) void plist_add(struct plist_node *node, struct plist_head *head)
{ {
struct plist_node *iter; struct plist_node *first, *iter, *prev = NULL;
struct list_head *node_next = &head->node_list;
plist_check_head(head); plist_check_head(head);
WARN_ON(!plist_node_empty(node)); WARN_ON(!plist_node_empty(node));
WARN_ON(!list_empty(&node->prio_list));
if (plist_head_empty(head))
goto ins_node;
list_for_each_entry(iter, &head->prio_list, plist.prio_list) { first = iter = plist_first(head);
if (node->prio < iter->prio)
goto lt_prio; do {
else if (node->prio == iter->prio) { if (node->prio < iter->prio) {
iter = list_entry(iter->plist.prio_list.next, node_next = &iter->node_list;
struct plist_node, plist.prio_list); break;
goto eq_prio;
} }
}
lt_prio: prev = iter;
list_add_tail(&node->plist.prio_list, &iter->plist.prio_list); iter = list_entry(iter->prio_list.next,
eq_prio: struct plist_node, prio_list);
list_add_tail(&node->plist.node_list, &iter->plist.node_list); } while (iter != first);
if (!prev || prev->prio != node->prio)
list_add_tail(&node->prio_list, &iter->prio_list);
ins_node:
list_add_tail(&node->node_list, node_next);
plist_check_head(head); plist_check_head(head);
} }
...@@ -108,14 +119,98 @@ void plist_del(struct plist_node *node, struct plist_head *head) ...@@ -108,14 +119,98 @@ void plist_del(struct plist_node *node, struct plist_head *head)
{ {
plist_check_head(head); plist_check_head(head);
if (!list_empty(&node->plist.prio_list)) { if (!list_empty(&node->prio_list)) {
struct plist_node *next = plist_first(&node->plist); if (node->node_list.next != &head->node_list) {
struct plist_node *next;
next = list_entry(node->node_list.next,
struct plist_node, node_list);
list_move_tail(&next->plist.prio_list, &node->plist.prio_list); /* add the next plist_node into prio_list */
list_del_init(&node->plist.prio_list); if (list_empty(&next->prio_list))
list_add(&next->prio_list, &node->prio_list);
}
list_del_init(&node->prio_list);
} }
list_del_init(&node->plist.node_list); list_del_init(&node->node_list);
plist_check_head(head); plist_check_head(head);
} }
#ifdef CONFIG_DEBUG_PI_LIST
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/init.h>
static struct plist_node __initdata test_node[241];
static void __init plist_test_check(int nr_expect)
{
struct plist_node *first, *prio_pos, *node_pos;
if (plist_head_empty(&test_head)) {
BUG_ON(nr_expect != 0);
return;
}
prio_pos = first = plist_first(&test_head);
plist_for_each(node_pos, &test_head) {
if (nr_expect-- < 0)
break;
if (node_pos == first)
continue;
if (node_pos->prio == prio_pos->prio) {
BUG_ON(!list_empty(&node_pos->prio_list));
continue;
}
BUG_ON(prio_pos->prio > node_pos->prio);
BUG_ON(prio_pos->prio_list.next != &node_pos->prio_list);
prio_pos = node_pos;
}
BUG_ON(nr_expect != 0);
BUG_ON(prio_pos->prio_list.next != &first->prio_list);
}
static int __init plist_test(void)
{
int nr_expect = 0, i, loop;
unsigned int r = local_clock();
printk(KERN_INFO "start plist test\n");
plist_head_init(&test_head, NULL);
for (i = 0; i < ARRAY_SIZE(test_node); i++)
plist_node_init(test_node + i, 0);
for (loop = 0; loop < 1000; loop++) {
r = r * 193939 % 47629;
i = r % ARRAY_SIZE(test_node);
if (plist_node_empty(test_node + i)) {
r = r * 193939 % 47629;
test_node[i].prio = r % 99;
plist_add(test_node + i, &test_head);
nr_expect++;
} else {
plist_del(test_node + i, &test_head);
nr_expect--;
}
plist_test_check(nr_expect);
}
for (i = 0; i < ARRAY_SIZE(test_node); i++) {
if (plist_node_empty(test_node + i))
continue;
plist_del(test_node + i, &test_head);
nr_expect--;
plist_test_check(nr_expect);
}
printk(KERN_INFO "end plist test\n");
return 0;
}
module_init(plist_test);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment