Commit 9e1725b4 authored by Nadav Amit's avatar Nadav Amit Committed by Ingo Molnar

x86/refcount: Work around GCC inlining bug

As described in:

  77b0bf55: ("kbuild/Makefile: Prepare for using macros in inline assembly code to work around asm() related GCC inlining bugs")

GCC's inlining heuristics are broken with common asm() patterns used in
kernel code, resulting in the effective disabling of inlining.

The workaround is to set an assembly macro and call it from the inline
assembly block. As a result GCC considers the inline assembly block as
a single instruction. (Which it isn't, but that's the best we can get.)

This patch allows GCC to inline simple functions such as __get_seccomp_filter().

To no-one's surprise the result is that GCC performs more aggressive (read: correct)
inlining decisions in these senarios, which reduces the kernel size and presumably
also speeds it up:

      text     data     bss      dec     hex  filename
  18140970 10225412 2957312 31323694 1ddf62e  ./vmlinux before
  18140140 10225284 2957312 31322736 1ddf270  ./vmlinux after (-958)

16 fewer static text symbols:

   Before: 40302
    After: 40286 (-16)

these got inlined instead.

Functions such as kref_get(), free_user(), fuse_file_get() now get inlined. Hurray!

[ mingo: Rewrote the changelog. ]
Tested-by: default avatarKees Cook <keescook@chromium.org>
Signed-off-by: default avatarNadav Amit <namit@vmware.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jan Beulich <JBeulich@suse.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20181003213100.189959-5-namit@vmware.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent c06c4d80
...@@ -4,6 +4,41 @@ ...@@ -4,6 +4,41 @@
* x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from * x86-specific implementation of refcount_t. Based on PAX_REFCOUNT from
* PaX/grsecurity. * PaX/grsecurity.
*/ */
#ifdef __ASSEMBLY__
#include <asm/asm.h>
#include <asm/bug.h>
.macro REFCOUNT_EXCEPTION counter:req
.pushsection .text..refcount
111: lea \counter, %_ASM_CX
112: ud2
ASM_UNREACHABLE
.popsection
113: _ASM_EXTABLE_REFCOUNT(112b, 113b)
.endm
/* Trigger refcount exception if refcount result is negative. */
.macro REFCOUNT_CHECK_LT_ZERO counter:req
js 111f
REFCOUNT_EXCEPTION counter="\counter"
.endm
/* Trigger refcount exception if refcount result is zero or negative. */
.macro REFCOUNT_CHECK_LE_ZERO counter:req
jz 111f
REFCOUNT_CHECK_LT_ZERO counter="\counter"
.endm
/* Trigger refcount exception unconditionally. */
.macro REFCOUNT_ERROR counter:req
jmp 111f
REFCOUNT_EXCEPTION counter="\counter"
.endm
#else /* __ASSEMBLY__ */
#include <linux/refcount.h> #include <linux/refcount.h>
#include <asm/bug.h> #include <asm/bug.h>
...@@ -15,34 +50,11 @@ ...@@ -15,34 +50,11 @@
* central refcount exception. The fixup address for the exception points * central refcount exception. The fixup address for the exception points
* back to the regular execution flow in .text. * back to the regular execution flow in .text.
*/ */
#define _REFCOUNT_EXCEPTION \
".pushsection .text..refcount\n" \
"111:\tlea %[counter], %%" _ASM_CX "\n" \
"112:\t" ASM_UD2 "\n" \
ASM_UNREACHABLE \
".popsection\n" \
"113:\n" \
_ASM_EXTABLE_REFCOUNT(112b, 113b)
/* Trigger refcount exception if refcount result is negative. */
#define REFCOUNT_CHECK_LT_ZERO \
"js 111f\n\t" \
_REFCOUNT_EXCEPTION
/* Trigger refcount exception if refcount result is zero or negative. */
#define REFCOUNT_CHECK_LE_ZERO \
"jz 111f\n\t" \
REFCOUNT_CHECK_LT_ZERO
/* Trigger refcount exception unconditionally. */
#define REFCOUNT_ERROR \
"jmp 111f\n\t" \
_REFCOUNT_EXCEPTION
static __always_inline void refcount_add(unsigned int i, refcount_t *r) static __always_inline void refcount_add(unsigned int i, refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "addl %1,%0\n\t" asm volatile(LOCK_PREFIX "addl %1,%0\n\t"
REFCOUNT_CHECK_LT_ZERO "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter) : [counter] "+m" (r->refs.counter)
: "ir" (i) : "ir" (i)
: "cc", "cx"); : "cc", "cx");
...@@ -51,7 +63,7 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r) ...@@ -51,7 +63,7 @@ static __always_inline void refcount_add(unsigned int i, refcount_t *r)
static __always_inline void refcount_inc(refcount_t *r) static __always_inline void refcount_inc(refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "incl %0\n\t" asm volatile(LOCK_PREFIX "incl %0\n\t"
REFCOUNT_CHECK_LT_ZERO "REFCOUNT_CHECK_LT_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter) : [counter] "+m" (r->refs.counter)
: : "cc", "cx"); : : "cc", "cx");
} }
...@@ -59,7 +71,7 @@ static __always_inline void refcount_inc(refcount_t *r) ...@@ -59,7 +71,7 @@ static __always_inline void refcount_inc(refcount_t *r)
static __always_inline void refcount_dec(refcount_t *r) static __always_inline void refcount_dec(refcount_t *r)
{ {
asm volatile(LOCK_PREFIX "decl %0\n\t" asm volatile(LOCK_PREFIX "decl %0\n\t"
REFCOUNT_CHECK_LE_ZERO "REFCOUNT_CHECK_LE_ZERO counter=\"%[counter]\""
: [counter] "+m" (r->refs.counter) : [counter] "+m" (r->refs.counter)
: : "cc", "cx"); : : "cc", "cx");
} }
...@@ -67,13 +79,15 @@ static __always_inline void refcount_dec(refcount_t *r) ...@@ -67,13 +79,15 @@ static __always_inline void refcount_dec(refcount_t *r)
static __always_inline __must_check static __always_inline __must_check
bool refcount_sub_and_test(unsigned int i, refcount_t *r) bool refcount_sub_and_test(unsigned int i, refcount_t *r)
{ {
GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl", REFCOUNT_CHECK_LT_ZERO, GEN_BINARY_SUFFIXED_RMWcc(LOCK_PREFIX "subl",
"REFCOUNT_CHECK_LT_ZERO counter=\"%0\"",
r->refs.counter, "er", i, "%0", e, "cx"); r->refs.counter, "er", i, "%0", e, "cx");
} }
static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r) static __always_inline __must_check bool refcount_dec_and_test(refcount_t *r)
{ {
GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl", REFCOUNT_CHECK_LT_ZERO, GEN_UNARY_SUFFIXED_RMWcc(LOCK_PREFIX "decl",
"REFCOUNT_CHECK_LT_ZERO counter=\"%0\"",
r->refs.counter, "%0", e, "cx"); r->refs.counter, "%0", e, "cx");
} }
...@@ -91,7 +105,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r) ...@@ -91,7 +105,7 @@ bool refcount_add_not_zero(unsigned int i, refcount_t *r)
/* Did we try to increment from/to an undesirable state? */ /* Did we try to increment from/to an undesirable state? */
if (unlikely(c < 0 || c == INT_MAX || result < c)) { if (unlikely(c < 0 || c == INT_MAX || result < c)) {
asm volatile(REFCOUNT_ERROR asm volatile("REFCOUNT_ERROR counter=\"%[counter]\""
: : [counter] "m" (r->refs.counter) : : [counter] "m" (r->refs.counter)
: "cc", "cx"); : "cc", "cx");
break; break;
...@@ -107,4 +121,6 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r) ...@@ -107,4 +121,6 @@ static __always_inline __must_check bool refcount_inc_not_zero(refcount_t *r)
return refcount_add_not_zero(1, r); return refcount_add_not_zero(1, r);
} }
#endif /* __ASSEMBLY__ */
#endif #endif
...@@ -7,3 +7,4 @@ ...@@ -7,3 +7,4 @@
*/ */
#include <linux/compiler.h> #include <linux/compiler.h>
#include <asm/refcount.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment