Commit 26b7fcc4 authored by Joe Perches's avatar Joe Perches Committed by Ingo Molnar

include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent a4c2d7d9
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/ */
#define ADDR (*(volatile long *) addr) #define ADDR (*(volatile long *)addr)
/** /**
* sync_set_bit - Atomically set a bit in memory * sync_set_bit - Atomically set a bit in memory
...@@ -26,11 +26,11 @@ ...@@ -26,11 +26,11 @@
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void sync_set_bit(int nr, volatile unsigned long * addr) static inline void sync_set_bit(int nr, volatile unsigned long *addr)
{ {
__asm__ __volatile__("lock; btsl %1,%0" asm volatile("lock; btsl %1,%0"
:"+m" (ADDR) : "+m" (ADDR)
:"Ir" (nr) : "Ir" (nr)
: "memory"); : "memory");
} }
...@@ -44,11 +44,11 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr) ...@@ -44,11 +44,11 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors. * in order to ensure changes are visible on other processors.
*/ */
static inline void sync_clear_bit(int nr, volatile unsigned long * addr) static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
{ {
__asm__ __volatile__("lock; btrl %1,%0" asm volatile("lock; btrl %1,%0"
:"+m" (ADDR) : "+m" (ADDR)
:"Ir" (nr) : "Ir" (nr)
: "memory"); : "memory");
} }
...@@ -61,11 +61,11 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr) ...@@ -61,11 +61,11 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
* Note that @nr may be almost arbitrarily large; this function is not * Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity. * restricted to acting on a single-word quantity.
*/ */
static inline void sync_change_bit(int nr, volatile unsigned long * addr) static inline void sync_change_bit(int nr, volatile unsigned long *addr)
{ {
__asm__ __volatile__("lock; btcl %1,%0" asm volatile("lock; btcl %1,%0"
:"+m" (ADDR) : "+m" (ADDR)
:"Ir" (nr) : "Ir" (nr)
: "memory"); : "memory");
} }
...@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr) ...@@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
__asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0" asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR) : "=r" (oldbit), "+m" (ADDR)
:"Ir" (nr) : "memory"); : "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
...@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr) ...@@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
__asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0" asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR) : "=r" (oldbit), "+m" (ADDR)
:"Ir" (nr) : "memory"); : "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
...@@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr) ...@@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered. * This operation is atomic and cannot be reordered.
* It also implies a memory barrier. * It also implies a memory barrier.
*/ */
static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr) static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
int oldbit; int oldbit;
__asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0" asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR) : "=r" (oldbit), "+m" (ADDR)
:"Ir" (nr) : "memory"); : "Ir" (nr) : "memory");
return oldbit; return oldbit;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment