Commit ad38dab0 authored by Harvey Harrison's avatar Harvey Harrison Committed by Ingo Molnar

x86: use the new byteorder headers

Impact: cleanup, no functionality changed
Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c63dfefd
...@@ -4,26 +4,33 @@ ...@@ -4,26 +4,33 @@
#include <asm/types.h> #include <asm/types.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#ifdef __GNUC__ #define __LITTLE_ENDIAN
#ifdef __i386__ static inline __attribute_const__ __u32 __arch_swab32(__u32 val)
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{ {
#ifdef CONFIG_X86_BSWAP #ifdef __i386__
asm("bswap %0" : "=r" (x) : "0" (x)); # ifdef CONFIG_X86_BSWAP
#else asm("bswap %0" : "=r" (val) : "0" (val));
# else
asm("xchgb %b0,%h0\n\t" /* swap lower bytes */ asm("xchgb %b0,%h0\n\t" /* swap lower bytes */
"rorl $16,%0\n\t" /* swap words */ "rorl $16,%0\n\t" /* swap words */
"xchgb %b0,%h0" /* swap higher bytes */ "xchgb %b0,%h0" /* swap higher bytes */
: "=q" (x) : "=q" (val)
: "0" (x)); : "0" (val));
# endif
#else /* __i386__ */
asm("bswapl %0"
: "=r" (val)
: "0" (val));
#endif #endif
return x; return val;
} }
#define __arch_swab32 __arch_swab32
static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) static inline __attribute_const__ __u64 __arch_swab64(__u64 val)
{ {
#ifdef __i386__
union { union {
struct { struct {
__u32 a; __u32 a;
...@@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val) ...@@ -32,50 +39,27 @@ static inline __attribute_const__ __u64 ___arch__swab64(__u64 val)
__u64 u; __u64 u;
} v; } v;
v.u = val; v.u = val;
#ifdef CONFIG_X86_BSWAP # ifdef CONFIG_X86_BSWAP
asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1" asm("bswapl %0 ; bswapl %1 ; xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b) : "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b)); : "0" (v.s.a), "1" (v.s.b));
#else # else
v.s.a = ___arch__swab32(v.s.a); v.s.a = __arch_swab32(v.s.a);
v.s.b = ___arch__swab32(v.s.b); v.s.b = __arch_swab32(v.s.b);
asm("xchgl %0,%1" asm("xchgl %0,%1"
: "=r" (v.s.a), "=r" (v.s.b) : "=r" (v.s.a), "=r" (v.s.b)
: "0" (v.s.a), "1" (v.s.b)); : "0" (v.s.a), "1" (v.s.b));
#endif # endif
return v.u; return v.u;
}
#else /* __i386__ */ #else /* __i386__ */
static inline __attribute_const__ __u64 ___arch__swab64(__u64 x)
{
asm("bswapq %0" asm("bswapq %0"
: "=r" (x) : "=r" (val)
: "0" (x)); : "0" (val));
return x; return val;
}
static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
{
asm("bswapl %0"
: "=r" (x)
: "0" (x));
return x;
}
#endif #endif
}
#define __arch_swab64 __arch_swab64
/* Do not define swab16. Gcc is smart enough to recognize "C" version and #include <linux/byteorder.h>
convert it into rotation or exhange. */
#define __arch__swab64(x) ___arch__swab64(x)
#define __arch__swab32(x) ___arch__swab32(x)
#define __BYTEORDER_HAS_U64__
#endif /* __GNUC__ */
#include <linux/byteorder/little_endian.h>
#endif /* _ASM_X86_BYTEORDER_H */ #endif /* _ASM_X86_BYTEORDER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment