Commit 64253acb authored by Harvey Harrison's avatar Harvey Harrison Committed by Martin Schwidefsky

[S390] s390: use the new byteorder headers

Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent e37f50e1
...@@ -11,32 +11,39 @@ ...@@ -11,32 +11,39 @@
#include <asm/types.h> #include <asm/types.h>
#ifdef __GNUC__ #define __BIG_ENDIAN
#ifndef __s390x__
# define __SWAB_64_THRU_32__
#endif
#ifdef __s390x__ #ifdef __s390x__
static inline __u64 ___arch__swab64p(const __u64 *x) static inline __u64 __arch_swab64p(const __u64 *x)
{ {
__u64 result; __u64 result;
asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x)); asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
return result; return result;
} }
#define __arch_swab64p __arch_swab64p
static inline __u64 ___arch__swab64(__u64 x) static inline __u64 __arch_swab64(__u64 x)
{ {
__u64 result; __u64 result;
asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x)); asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
return result; return result;
} }
#define __arch_swab64 __arch_swab64
static inline void ___arch__swab64s(__u64 *x) static inline void __arch_swab64s(__u64 *x)
{ {
*x = ___arch__swab64p(x); *x = __arch_swab64p(x);
} }
#define __arch_swab64s __arch_swab64s
#endif /* __s390x__ */ #endif /* __s390x__ */
static inline __u32 ___arch__swab32p(const __u32 *x) static inline __u32 __arch_swab32p(const __u32 *x)
{ {
__u32 result; __u32 result;
...@@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x) ...@@ -53,25 +60,20 @@ static inline __u32 ___arch__swab32p(const __u32 *x)
#endif /* __s390x__ */ #endif /* __s390x__ */
return result; return result;
} }
#define __arch_swab32p __arch_swab32p
static inline __u32 ___arch__swab32(__u32 x) #ifdef __s390x__
static inline __u32 __arch_swab32(__u32 x)
{ {
#ifndef __s390x__
return ___arch__swab32p(&x);
#else /* __s390x__ */
__u32 result; __u32 result;
asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x)); asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
return result; return result;
#endif /* __s390x__ */
}
static __inline__ void ___arch__swab32s(__u32 *x)
{
*x = ___arch__swab32p(x);
} }
#define __arch_swab32 __arch_swab32
#endif /* __s390x__ */
static __inline__ __u16 ___arch__swab16p(const __u16 *x) static inline __u16 __arch_swab16p(const __u16 *x)
{ {
__u16 result; __u16 result;
...@@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x) ...@@ -86,40 +88,8 @@ static __inline__ __u16 ___arch__swab16p(const __u16 *x)
#endif /* __s390x__ */ #endif /* __s390x__ */
return result; return result;
} }
#define __arch_swab16p __arch_swab16p
static __inline__ __u16 ___arch__swab16(__u16 x) #include <linux/byteorder.h>
{
return ___arch__swab16p(&x);
}
static __inline__ void ___arch__swab16s(__u16 *x)
{
*x = ___arch__swab16p(x);
}
#ifdef __s390x__
#define __arch__swab64(x) ___arch__swab64(x)
#define __arch__swab64p(x) ___arch__swab64p(x)
#define __arch__swab64s(x) ___arch__swab64s(x)
#endif /* __s390x__ */
#define __arch__swab32(x) ___arch__swab32(x)
#define __arch__swab16(x) ___arch__swab16(x)
#define __arch__swab32p(x) ___arch__swab32p(x)
#define __arch__swab16p(x) ___arch__swab16p(x)
#define __arch__swab32s(x) ___arch__swab32s(x)
#define __arch__swab16s(x) ___arch__swab16s(x)
#ifndef __s390x__
#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __BYTEORDER_HAS_U64__
# define __SWAB_64_THRU_32__
#endif
#else /* __s390x__ */
#define __BYTEORDER_HAS_U64__
#endif /* __s390x__ */
#endif /* __GNUC__ */
#include <linux/byteorder/big_endian.h>
#endif /* _S390_BYTEORDER_H */ #endif /* _S390_BYTEORDER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment