Commit 2f2239d1 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Ingo Molnar

x86: prepare merging futex_32/64.h

Replace .quad/.long with a define and use the same asm syntax
for i386 and x86.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 0e078e2f
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/futex.h> #include <linux/futex.h>
#include <asm/asm.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/processor.h> #include <asm/processor.h>
...@@ -17,13 +19,13 @@ ...@@ -17,13 +19,13 @@
jmp 2b\n\ jmp 2b\n\
.previous\n\ .previous\n\
.section __ex_table,\"a\"\n\ .section __ex_table,\"a\"\n\
.align 8\n\ .align 8\n" \
.long 1b,3b\n\ _ASM_PTR "1b,3b\n \
.previous" \ .previous" \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "0" (oparg), "1" (0)) : "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \ __asm__ __volatile ( \
"1: movl %2, %0\n\ "1: movl %2, %0\n\
movl %0, %3\n" \ movl %0, %3\n" \
...@@ -35,8 +37,8 @@ ...@@ -35,8 +37,8 @@
jmp 3b\n\ jmp 3b\n\
.previous\n\ .previous\n\
.section __ex_table,\"a\"\n\ .section __ex_table,\"a\"\n\
.align 8\n\ .align 8\n" \
.long 1b,4b,2b,4b\n\ _ASM_PTR "1b,4b,2b,4b\n \
.previous" \ .previous" \
: "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \ : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
"=&r" (tem) \ "=&r" (tem) \
...@@ -56,36 +58,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ...@@ -56,36 +58,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT; return -EFAULT;
#ifndef CONFIG_X86_BSWAP
if (op == FUTEX_OP_SET && boot_cpu_data.x86 == 3)
return -ENOSYS;
#endif
pagefault_disable(); pagefault_disable();
if (op == FUTEX_OP_SET) switch (op) {
case FUTEX_OP_SET:
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
else { break;
#ifndef CONFIG_X86_BSWAP case FUTEX_OP_ADD:
if (boot_cpu_data.x86 == 3) __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
ret = -ENOSYS; uaddr, oparg);
else break;
#endif case FUTEX_OP_OR:
switch (op) { __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
case FUTEX_OP_ADD: break;
__futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, case FUTEX_OP_ANDN:
oldval, uaddr, oparg); __futex_atomic_op2("andl %4, %3", ret, oldval, uaddr, ~oparg);
break; break;
case FUTEX_OP_OR: case FUTEX_OP_XOR:
__futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, __futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr, oparg);
oparg); break;
break; default:
case FUTEX_OP_ANDN: ret = -ENOSYS;
__futex_atomic_op2("andl %4, %3", ret, oldval, uaddr,
~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op2("xorl %4, %3", ret, oldval, uaddr,
oparg);
break;
default:
ret = -ENOSYS;
}
} }
pagefault_enable(); pagefault_enable();
...@@ -120,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) ...@@ -120,7 +118,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .section __ex_table, \"a\" \n" " .section __ex_table, \"a\" \n"
" .align 8 \n" " .align 8 \n"
" .long 1b,3b \n" _ASM_PTR " 1b,3b \n"
" .previous \n" " .previous \n"
: "=a" (oldval), "+m" (*uaddr) : "=a" (oldval), "+m" (*uaddr)
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/futex.h> #include <linux/futex.h>
#include <asm/asm.h>
#include <asm/errno.h> #include <asm/errno.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -16,13 +18,13 @@ ...@@ -16,13 +18,13 @@
jmp 2b\n\ jmp 2b\n\
.previous\n\ .previous\n\
.section __ex_table,\"a\"\n\ .section __ex_table,\"a\"\n\
.align 8\n\ .align 8\n" \
.quad 1b,3b\n\ _ASM_PTR "1b,3b\n \
.previous" \ .previous" \
: "=r" (oldval), "=r" (ret), "=m" (*uaddr) \ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
: "i" (-EFAULT), "m" (*uaddr), "0" (oparg), "1" (0)) : "i" (-EFAULT), "0" (oparg), "1" (0))
#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \ __asm__ __volatile ( \
"1: movl %2, %0\n\ "1: movl %2, %0\n\
movl %0, %3\n" \ movl %0, %3\n" \
...@@ -34,12 +36,12 @@ ...@@ -34,12 +36,12 @@
jmp 3b\n\ jmp 3b\n\
.previous\n\ .previous\n\
.section __ex_table,\"a\"\n\ .section __ex_table,\"a\"\n\
.align 8\n\ .align 8\n" \
.quad 1b,4b,2b,4b\n\ _ASM_PTR "1b,4b,2b,4b\n \
.previous" \ .previous" \
: "=&a" (oldval), "=&r" (ret), "=m" (*uaddr), \ : "=&a" (oldval), "=&r" (ret), "+m" (*uaddr), \
"=&r" (tem) \ "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "m" (*uaddr), "1" (0)) : "r" (oparg), "i" (-EFAULT), "1" (0))
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, int __user *uaddr) futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
...@@ -110,10 +112,10 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) ...@@ -110,10 +112,10 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
" .section __ex_table, \"a\" \n" " .section __ex_table, \"a\" \n"
" .align 8 \n" " .align 8 \n"
" .quad 1b,3b \n" _ASM_PTR " 1b,3b \n"
" .previous \n" " .previous \n"
: "=a" (oldval), "=m" (*uaddr) : "=a" (oldval), "+m" (*uaddr)
: "i" (-EFAULT), "r" (newval), "0" (oldval) : "i" (-EFAULT), "r" (newval), "0" (oldval)
: "memory" : "memory"
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment