Commit 219b1e4c authored by Max Filippov's avatar Max Filippov Committed by Chris Zankel

xtensa: add s32c1i-based atomic ops implementations

Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
Signed-off-by: default avatarChris Zankel <chris@zankel.net>
parent 00273125
...@@ -66,19 +66,35 @@ ...@@ -66,19 +66,35 @@
*/ */
static inline void atomic_add(int i, atomic_t * v) static inline void atomic_add(int i, atomic_t * v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"add %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " add %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (i), "a" (v) : "=&a" (result), "=&a" (tmp)
: "a15", "memory" : "a" (i), "a" (v)
); : "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" add %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
} }
/** /**
...@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v) ...@@ -90,19 +106,35 @@ static inline void atomic_add(int i, atomic_t * v)
*/ */
static inline void atomic_sub(int i, atomic_t *v) static inline void atomic_sub(int i, atomic_t *v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15, "__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"sub %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " sub %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (i), "a" (v) : "=&a" (result), "=&a" (tmp)
: "a15", "memory" : "a" (i), "a" (v)
); : "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
#endif
} }
/* /*
...@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -111,40 +143,78 @@ static inline void atomic_sub(int i, atomic_t *v)
static inline int atomic_add_return(int i, atomic_t * v) static inline int atomic_add_return(int i, atomic_t * v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"add %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " add %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (i), "a" (v) " add %0, %0, %2\n"
: "a15", "memory" : "=&a" (result), "=&a" (tmp)
); : "a" (i), "a" (v)
: "memory"
return vval; );
return result;
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" add %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#endif
} }
static inline int atomic_sub_return(int i, atomic_t * v) static inline int atomic_sub_return(int i, atomic_t * v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"sub %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " sub %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (i), "a" (v) " sub %0, %0, %2\n"
: "a15", "memory" : "=&a" (result), "=&a" (tmp)
); : "a" (i), "a" (v)
: "memory"
return vval; );
return result;
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" sub %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (i), "a" (v)
: "a15", "memory"
);
return vval;
#endif
} }
/** /**
...@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -251,38 +321,70 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{ {
unsigned int all_f = -1; #if XCHAL_HAVE_S32C1I
unsigned int vval; unsigned long tmp;
int result;
__asm__ __volatile__(
"rsil a15,"__stringify(LOCKLEVEL)"\n\t" __asm__ __volatile__(
"l32i %0, %2, 0 \n\t" "1: l32i %1, %3, 0\n"
"xor %1, %4, %3 \n\t" " wsr %1, scompare1\n"
"and %0, %0, %4 \n\t" " and %0, %1, %2\n"
"s32i %0, %2, 0 \n\t" " s32c1i %0, %3, 0\n"
"wsr a15, ps \n\t" " bne %0, %1, 1b\n"
"rsync \n" : "=&a" (result), "=&a" (tmp)
: "=&a" (vval), "=a" (mask) : "a" (~mask), "a" (v)
: "a" (v), "a" (all_f), "1" (mask) : "memory"
: "a15", "memory" );
); #else
unsigned int all_f = -1;
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" xor %1, %4, %3\n"
" and %0, %0, %4\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval), "=a" (mask)
: "a" (v), "a" (all_f), "1" (mask)
: "a15", "memory"
);
#endif
} }
static inline void atomic_set_mask(unsigned int mask, atomic_t *v) static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{ {
unsigned int vval; #if XCHAL_HAVE_S32C1I
unsigned long tmp;
__asm__ __volatile__( int result;
"rsil a15,"__stringify(LOCKLEVEL)"\n\t"
"l32i %0, %2, 0 \n\t" __asm__ __volatile__(
"or %0, %0, %1 \n\t" "1: l32i %1, %3, 0\n"
"s32i %0, %2, 0 \n\t" " wsr %1, scompare1\n"
"wsr a15, ps \n\t" " or %0, %1, %2\n"
"rsync \n" " s32c1i %0, %3, 0\n"
: "=&a" (vval) " bne %0, %1, 1b\n"
: "a" (mask), "a" (v) : "=&a" (result), "=&a" (tmp)
: "a15", "memory" : "a" (mask), "a" (v)
); : "memory"
);
#else
unsigned int vval;
__asm__ __volatile__(
" rsil a15,"__stringify(LOCKLEVEL)"\n"
" l32i %0, %2, 0\n"
" or %0, %0, %1\n"
" s32i %0, %2, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (vval)
: "a" (mask), "a" (v)
: "a15", "memory"
);
#endif
} }
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
...@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v) ...@@ -294,4 +396,3 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _XTENSA_ATOMIC_H */ #endif /* _XTENSA_ATOMIC_H */
...@@ -22,17 +22,30 @@ ...@@ -22,17 +22,30 @@
static inline unsigned long static inline unsigned long
__cmpxchg_u32(volatile int *p, int old, int new) __cmpxchg_u32(volatile int *p, int old, int new)
{ {
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" #if XCHAL_HAVE_S32C1I
"l32i %0, %1, 0 \n\t" __asm__ __volatile__(
"bne %0, %2, 1f \n\t" " wsr %2, scompare1\n"
"s32i %3, %1, 0 \n\t" " s32c1i %0, %1, 0\n"
"1: \n\t" : "+a" (new)
"wsr a15, ps \n\t" : "a" (p), "a" (old)
"rsync \n\t" : "memory"
: "=&a" (old) );
: "a" (p), "a" (old), "r" (new)
: "a15", "memory"); return new;
return old; #else
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %1, 0\n"
" bne %0, %2, 1f\n"
" s32i %3, %1, 0\n"
"1:\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (old)
: "a" (p), "a" (old), "r" (new)
: "a15", "memory");
return old;
#endif
} }
/* This function doesn't exist, so you'll get a linker error /* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg(). */ * if something tries to do an invalid cmpxchg(). */
...@@ -93,16 +106,32 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -93,16 +106,32 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
static inline unsigned long xchg_u32(volatile int * m, unsigned long val) static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
{ {
unsigned long tmp; #if XCHAL_HAVE_S32C1I
__asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" unsigned long tmp, result;
"l32i %0, %1, 0 \n\t" __asm__ __volatile__(
"s32i %2, %1, 0 \n\t" "1: l32i %1, %2, 0\n"
"wsr a15, ps \n\t" " mov %0, %3\n"
"rsync \n\t" " wsr %1, scompare1\n"
: "=&a" (tmp) " s32c1i %0, %2, 0\n"
: "a" (m), "a" (val) " bne %0, %1, 1b\n"
: "a15", "memory"); : "=&a" (result), "=&a" (tmp)
return tmp; : "a" (m), "a" (val)
: "memory"
);
return result;
#else
unsigned long tmp;
__asm__ __volatile__(
" rsil a15, "__stringify(LOCKLEVEL)"\n"
" l32i %0, %1, 0\n"
" s32i %2, %1, 0\n"
" wsr a15, ps\n"
" rsync\n"
: "=&a" (tmp)
: "a" (m), "a" (val)
: "a15", "memory");
return tmp;
#endif
} }
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment