Commit 3462bd2a authored by Hagen Paul Pfeifer's avatar Hagen Paul Pfeifer Committed by Ingo Molnar

x86/asm: Always inline atomics

During some code analysis I realized that atomic_add(), atomic_sub()
and friends are not necessarily inlined AND that each function
is defined multiple times:

	atomic_inc:          544 duplicates
	atomic_dec:          215 duplicates
	atomic_dec_and_test: 107 duplicates
	atomic64_inc:         38 duplicates
	[...]

Each definition is exact equally, e.g.:

	ffffffff813171b8 <atomic_add>:
	55         push   %rbp
	48 89 e5   mov    %rsp,%rbp
	f0 01 3e   lock add %edi,(%rsi)
	5d         pop    %rbp
	c3         retq

In turn each definition has one or more callsites (sure):

	ffffffff81317c78: e8 3b f5 ff ff  callq  ffffffff813171b8 <atomic_add> [...]
	ffffffff8131a062: e8 51 d1 ff ff  callq  ffffffff813171b8 <atomic_add> [...]
	ffffffff8131a190: e8 23 d0 ff ff  callq  ffffffff813171b8 <atomic_add> [...]

The other way around would be to remove the static linkage - but
I prefer an enforced inlining here.

	Before:
	  text     data	  bss      dec       hex     filename
	  81467393 19874720 20168704 121510817 73e1ba1 vmlinux.orig

	After:
	  text     data     bss      dec       hex     filename
	  81461323 19874720 20168704 121504747 73e03eb vmlinux.inlined

Yes, the inlining here makes the kernel even smaller! ;)

Linus further observed:

	"I have this memory of having seen that before - the size
	 heuristics for gcc getting confused by inlining.
	 [...]

	 It might be a good idea to mark things that are basically just
	 wrappers around a single (or a couple of) asm instruction to be
	 always_inline."
Signed-off-by: default avatarHagen Paul Pfeifer <hagen@jauu.net>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1429565231-4609-1-git-send-email-hagen@jauu.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent aac82d31
...@@ -46,7 +46,7 @@ static inline void atomic_set(atomic_t *v, int i) ...@@ -46,7 +46,7 @@ static inline void atomic_set(atomic_t *v, int i)
* *
* Atomically adds @i to @v. * Atomically adds @i to @v.
*/ */
static inline void atomic_add(int i, atomic_t *v) static __always_inline void atomic_add(int i, atomic_t *v)
{ {
asm volatile(LOCK_PREFIX "addl %1,%0" asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter) : "+m" (v->counter)
...@@ -60,7 +60,7 @@ static inline void atomic_add(int i, atomic_t *v) ...@@ -60,7 +60,7 @@ static inline void atomic_add(int i, atomic_t *v)
* *
* Atomically subtracts @i from @v. * Atomically subtracts @i from @v.
*/ */
static inline void atomic_sub(int i, atomic_t *v) static __always_inline void atomic_sub(int i, atomic_t *v)
{ {
asm volatile(LOCK_PREFIX "subl %1,%0" asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter) : "+m" (v->counter)
...@@ -76,7 +76,7 @@ static inline void atomic_sub(int i, atomic_t *v) ...@@ -76,7 +76,7 @@ static inline void atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all * true if the result is zero, or false for all
* other cases. * other cases.
*/ */
static inline int atomic_sub_and_test(int i, atomic_t *v) static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
{ {
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e"); GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
} }
...@@ -87,7 +87,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v) ...@@ -87,7 +87,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
* *
* Atomically increments @v by 1. * Atomically increments @v by 1.
*/ */
static inline void atomic_inc(atomic_t *v) static __always_inline void atomic_inc(atomic_t *v)
{ {
asm volatile(LOCK_PREFIX "incl %0" asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter)); : "+m" (v->counter));
...@@ -99,7 +99,7 @@ static inline void atomic_inc(atomic_t *v) ...@@ -99,7 +99,7 @@ static inline void atomic_inc(atomic_t *v)
* *
* Atomically decrements @v by 1. * Atomically decrements @v by 1.
*/ */
static inline void atomic_dec(atomic_t *v) static __always_inline void atomic_dec(atomic_t *v)
{ {
asm volatile(LOCK_PREFIX "decl %0" asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter)); : "+m" (v->counter));
...@@ -113,7 +113,7 @@ static inline void atomic_dec(atomic_t *v) ...@@ -113,7 +113,7 @@ static inline void atomic_dec(atomic_t *v)
* returns true if the result is 0, or false for all other * returns true if the result is 0, or false for all other
* cases. * cases.
*/ */
static inline int atomic_dec_and_test(atomic_t *v) static __always_inline int atomic_dec_and_test(atomic_t *v)
{ {
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e"); GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
} }
...@@ -152,7 +152,7 @@ static inline int atomic_add_negative(int i, atomic_t *v) ...@@ -152,7 +152,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
* *
* Atomically adds @i to @v and returns @i + @v * Atomically adds @i to @v and returns @i + @v
*/ */
static inline int atomic_add_return(int i, atomic_t *v) static __always_inline int atomic_add_return(int i, atomic_t *v)
{ {
return i + xadd(&v->counter, i); return i + xadd(&v->counter, i);
} }
...@@ -191,7 +191,7 @@ static inline int atomic_xchg(atomic_t *v, int new) ...@@ -191,7 +191,7 @@ static inline int atomic_xchg(atomic_t *v, int new)
* Atomically adds @a to @v, so long as @v was not already @u. * Atomically adds @a to @v, so long as @v was not already @u.
* Returns the old value of @v. * Returns the old value of @v.
*/ */
static inline int __atomic_add_unless(atomic_t *v, int a, int u) static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
int c, old; int c, old;
c = atomic_read(v); c = atomic_read(v);
......
...@@ -40,7 +40,7 @@ static inline void atomic64_set(atomic64_t *v, long i) ...@@ -40,7 +40,7 @@ static inline void atomic64_set(atomic64_t *v, long i)
* *
* Atomically adds @i to @v. * Atomically adds @i to @v.
*/ */
static inline void atomic64_add(long i, atomic64_t *v) static __always_inline void atomic64_add(long i, atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "addq %1,%0" asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter) : "=m" (v->counter)
...@@ -81,7 +81,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) ...@@ -81,7 +81,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
* *
* Atomically increments @v by 1. * Atomically increments @v by 1.
*/ */
static inline void atomic64_inc(atomic64_t *v) static __always_inline void atomic64_inc(atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "incq %0" asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter) : "=m" (v->counter)
...@@ -94,7 +94,7 @@ static inline void atomic64_inc(atomic64_t *v) ...@@ -94,7 +94,7 @@ static inline void atomic64_inc(atomic64_t *v)
* *
* Atomically decrements @v by 1. * Atomically decrements @v by 1.
*/ */
static inline void atomic64_dec(atomic64_t *v) static __always_inline void atomic64_dec(atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "decq %0" asm volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter) : "=m" (v->counter)
...@@ -148,7 +148,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) ...@@ -148,7 +148,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
* *
* Atomically adds @i to @v and returns @i + @v * Atomically adds @i to @v and returns @i + @v
*/ */
static inline long atomic64_add_return(long i, atomic64_t *v) static __always_inline long atomic64_add_return(long i, atomic64_t *v)
{ {
return i + xadd(&v->counter, i); return i + xadd(&v->counter, i);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment