Commit 9e3d6223 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

math64, timers: Fix 32bit mul_u64_u32_shr() and friends

It turns out that while GCC-4.4 manages to generate 32x32->64 mult
instructions for the 32bit mul_u64_u32_shr() code, any GCC after that
fails horribly.

Fix this by providing an explicit mul_u32_u32() function which can be
architcture provided.
Reported-by: default avatarChris Metcalf <cmetcalf@mellanox.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Chris Metcalf <cmetcalf@mellanox.com> [for tile]
Cc: Christopher S. Hall <christopher.s.hall@intel.com>
Cc: David Gibson <david@gibson.dropbear.id.au>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Laurent Vivier <lvivier@redhat.com>
Cc: Liav Rehana <liavr@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Parit Bhargava <prarit@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Cochran <richardcochran@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20161209083011.GD15765@worktop.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e96f8f18
...@@ -5,7 +5,6 @@ generic-y += bug.h ...@@ -5,7 +5,6 @@ generic-y += bug.h
generic-y += bugs.h generic-y += bugs.h
generic-y += clkdev.h generic-y += clkdev.h
generic-y += cputime.h generic-y += cputime.h
generic-y += div64.h
generic-y += emergency-restart.h generic-y += emergency-restart.h
generic-y += errno.h generic-y += errno.h
generic-y += exec.h generic-y += exec.h
......
#ifndef _ASM_TILE_DIV64_H
#define _ASM_TILE_DIV64_H
#ifdef __tilegx__
static inline u64 mul_u32_u32(u32 a, u32 b)
{
return __insn_mul_lu_lu(a, b);
}
#define mul_u32_u32 mul_u32_u32
#endif
#include <asm-generic/div64.h>
#endif /* _ASM_TILE_DIV64_H */
...@@ -59,6 +59,17 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) ...@@ -59,6 +59,17 @@ static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
} }
#define div_u64_rem div_u64_rem #define div_u64_rem div_u64_rem
static inline u64 mul_u32_u32(u32 a, u32 b)
{
u32 high, low;
asm ("mull %[b]" : "=a" (low), "=d" (high)
: [a] "a" (a), [b] "rm" (b) );
return low | ((u64)high) << 32;
}
#define mul_u32_u32 mul_u32_u32
#else #else
# include <asm-generic/div64.h> # include <asm-generic/div64.h>
#endif /* CONFIG_X86_32 */ #endif /* CONFIG_X86_32 */
......
...@@ -133,6 +133,16 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) ...@@ -133,6 +133,16 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret; return ret;
} }
#ifndef mul_u32_u32
/*
* Many a GCC version messes this up and generates a 64x64 mult :-(
*/
static inline u64 mul_u32_u32(u32 a, u32 b)
{
return (u64)a * b;
}
#endif
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
#ifndef mul_u64_u32_shr #ifndef mul_u64_u32_shr
...@@ -160,9 +170,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) ...@@ -160,9 +170,9 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
al = a; al = a;
ah = a >> 32; ah = a >> 32;
ret = ((u64)al * mul) >> shift; ret = mul_u32_u32(al, mul) >> shift;
if (ah) if (ah)
ret += ((u64)ah * mul) << (32 - shift); ret += mul_u32_u32(ah, mul) << (32 - shift);
return ret; return ret;
} }
...@@ -186,10 +196,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) ...@@ -186,10 +196,10 @@ static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift)
a0.ll = a; a0.ll = a;
b0.ll = b; b0.ll = b;
rl.ll = (u64)a0.l.low * b0.l.low; rl.ll = mul_u32_u32(a0.l.low, b0.l.low);
rm.ll = (u64)a0.l.low * b0.l.high; rm.ll = mul_u32_u32(a0.l.low, b0.l.high);
rn.ll = (u64)a0.l.high * b0.l.low; rn.ll = mul_u32_u32(a0.l.high, b0.l.low);
rh.ll = (u64)a0.l.high * b0.l.high; rh.ll = mul_u32_u32(a0.l.high, b0.l.high);
/* /*
* Each of these lines computes a 64-bit intermediate result into "c", * Each of these lines computes a 64-bit intermediate result into "c",
...@@ -229,8 +239,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) ...@@ -229,8 +239,8 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
} u, rl, rh; } u, rl, rh;
u.ll = a; u.ll = a;
rl.ll = (u64)u.l.low * mul; rl.ll = mul_u32_u32(u.l.low, mul);
rh.ll = (u64)u.l.high * mul + rl.l.high; rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high;
/* Bits 32-63 of the result will be in rh.l.low. */ /* Bits 32-63 of the result will be in rh.l.low. */
rl.l.high = do_div(rh.ll, divisor); rl.l.high = do_div(rh.ll, divisor);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment