Commit 658716d1 authored by Brian Behlendorf's avatar Brian Behlendorf Committed by Linus Torvalds

div64_u64(): improve precision on 32bit platforms

The current implementation of div64_u64 for 32bit systems returns an
approximately correct result when the divisor exceeds 32bits.  Since doing
64bit division using 32bit hardware is a long since solved problem we just
use one of the existing proven methods.

Additionally, add a div64_s64 function to correctly handle doing signed
64bit division.

Addresses https://bugzilla.redhat.com/show_bug.cgi?id=616105Signed-off-by: default avatarBrian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Cc: Ben Woodard <bwoodard@llnl.gov>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Mark Grondona <mgrondona@llnl.gov>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5d051dec
...@@ -173,6 +173,11 @@ extern int _cond_resched(void); ...@@ -173,6 +173,11 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \ (__x < 0) ? -__x : __x; \
}) })
#define abs64(x) ({ \
s64 __x = (x); \
(__x < 0) ? -__x : __x; \
})
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_PROVE_LOCKING
void might_fault(void); void might_fault(void);
#else #else
......
...@@ -35,6 +35,14 @@ static inline u64 div64_u64(u64 dividend, u64 divisor) ...@@ -35,6 +35,14 @@ static inline u64 div64_u64(u64 dividend, u64 divisor)
return dividend / divisor; return dividend / divisor;
} }
/**
* div64_s64 - signed 64bit divide with 64bit divisor
*/
static inline s64 div64_s64(s64 dividend, s64 divisor)
{
return dividend / divisor;
}
#elif BITS_PER_LONG == 32 #elif BITS_PER_LONG == 32
#ifndef div_u64_rem #ifndef div_u64_rem
...@@ -53,6 +61,10 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); ...@@ -53,6 +61,10 @@ extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
extern u64 div64_u64(u64 dividend, u64 divisor); extern u64 div64_u64(u64 dividend, u64 divisor);
#endif #endif
#ifndef div64_s64
extern s64 div64_s64(s64 dividend, s64 divisor);
#endif
#endif /* BITS_PER_LONG */ #endif /* BITS_PER_LONG */
/** /**
......
...@@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) ...@@ -77,26 +77,58 @@ s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
EXPORT_SYMBOL(div_s64_rem); EXPORT_SYMBOL(div_s64_rem);
#endif #endif
/* 64bit divisor, dividend and result. dynamic precision */ /**
* div64_u64 - unsigned 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
*
* This implementation is a modified version of the algorithm proposed
* by the book 'Hacker's Delight'. The original source and full proof
* can be found here and is available for use without restriction.
*
* 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
*/
#ifndef div64_u64 #ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor) u64 div64_u64(u64 dividend, u64 divisor)
{ {
u32 high, d; u32 high = divisor >> 32;
u64 quot;
high = divisor >> 32; if (high == 0) {
if (high) { quot = div_u64(dividend, divisor);
unsigned int shift = fls(high); } else {
int n = 1 + fls(high);
quot = div_u64(dividend >> n, divisor >> n);
d = divisor >> shift; if (quot != 0)
dividend >>= shift; quot--;
} else if ((dividend - quot * divisor) >= divisor)
d = divisor; quot++;
}
return div_u64(dividend, d); return quot;
} }
EXPORT_SYMBOL(div64_u64); EXPORT_SYMBOL(div64_u64);
#endif #endif
/**
* div64_s64 - signed 64bit divide with 64bit divisor
* @dividend: 64bit dividend
* @divisor: 64bit divisor
*/
#ifndef div64_s64
s64 div64_s64(s64 dividend, s64 divisor)
{
s64 quot, t;
quot = div64_u64(abs64(dividend), abs64(divisor));
t = (dividend ^ divisor) >> 63;
return (quot ^ t) - t;
}
EXPORT_SYMBOL(div64_s64);
#endif
#endif /* BITS_PER_LONG == 32 */ #endif /* BITS_PER_LONG == 32 */
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment