Commit d14edb16 authored by Denys Vlasenko's avatar Denys Vlasenko Committed by Ingo Molnar

x86/hweight: Force inlining of __arch_hweight{32,64}()

With this config:

  http://busybox.net/~vda/kernel_config_OPTIMIZE_INLINING_and_Os

gcc-4.7.2 generates many copies of these tiny functions:

	__arch_hweight32 (35 copies):
	55                      push   %rbp
	e8 66 9b 4a 00          callq  __sw_hweight32
	48 89 e5                mov    %rsp,%rbp
	5d                      pop    %rbp
	c3                      retq

	__arch_hweight64 (8 copies):
	55                      push   %rbp
	e8 5e c2 8a 00          callq  __sw_hweight64
	48 89 e5                mov    %rsp,%rbp
	5d                      pop    %rbp
	c3                      retq

See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122

This patch fixes this via s/inline/__always_inline/

To avoid touching 32-bit case where such change was not tested
to be a win, reformat __arch_hweight64() to have completely
disjoint 64-bit and 32-bit implementations. IOW: made #ifdef /
32 bits and 64 bits instead of having #ifdef / #else / #endif
inside a single function body. Only 64-bit __arch_hweight64() is
__always_inline'd.

	    text     data      bss       dec  filename
	86971120 17195912 36659200 140826232  vmlinux.before
	86970954 17195912 36659200 140826066  vmlinux
Signed-off-by: default avatarDenys Vlasenko <dvlasenk@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Graf <tgraf@suug.ch>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1438697716-28121-2-git-send-email-dvlasenk@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1a1d48a4
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
* compiler switches. * compiler switches.
*/ */
static inline unsigned int __arch_hweight32(unsigned int w) static __always_inline unsigned int __arch_hweight32(unsigned int w)
{ {
unsigned int res = 0; unsigned int res = 0;
...@@ -42,20 +42,23 @@ static inline unsigned int __arch_hweight8(unsigned int w) ...@@ -42,20 +42,23 @@ static inline unsigned int __arch_hweight8(unsigned int w)
return __arch_hweight32(w & 0xff); return __arch_hweight32(w & 0xff);
} }
#ifdef CONFIG_X86_32
static inline unsigned long __arch_hweight64(__u64 w) static inline unsigned long __arch_hweight64(__u64 w)
{ {
unsigned long res = 0;
#ifdef CONFIG_X86_32
return __arch_hweight32((u32)w) + return __arch_hweight32((u32)w) +
__arch_hweight32((u32)(w >> 32)); __arch_hweight32((u32)(w >> 32));
}
#else #else
static __always_inline unsigned long __arch_hweight64(__u64 w)
{
unsigned long res = 0;
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT) asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
: "="REG_OUT (res) : "="REG_OUT (res)
: REG_IN (w)); : REG_IN (w));
#endif /* CONFIG_X86_32 */
return res; return res;
} }
#endif /* CONFIG_X86_32 */
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment