Commit f0a8aa74 authored by Bernardo Innocenti's avatar Bernardo Innocenti Committed by Linus Torvalds

[PATCH] Fix do_div() for all architectures

This offers a generic do_div64() that actually does the right thing,
unlike some architectures that "optimized" the 64-by-32 divide into
just a 32-bit divide.

Both ppc and sh were already providing an assembly optimized
__div64_32().  I called my function the same, so that their optimized
versions will automatically override mine in lib.a.

I've only tested extensively on m68knommu (uClinux) and made
sure generated code is reasonably short. Should be ok also on
parisc, since it's the same algorithm they were using before.

 - add generic C implementations of the do_div() for 32bit and 64bit
   archs in asm-generic/div64.h;

 - add generic library support function __div64_32() to handle the
   full 64/32 case on 32bit archs;

 - kill multiple copies of generic do_div() in architecture
   specific subdirs. Most copies were either buggy or not doing
   what they were supposed to do;

 - ensure all surviving instances of do_div() have their parameters
   correctly parenthesized to avoid funny side-effects;
parent a6a6977c
#ifndef __ALPHA_DIV64 #include <asm-generic/div64.h>
#define __ALPHA_DIV64
/*
* Hey, we're already 64-bit, no
* need to play games..
*/
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) (n)) % (unsigned) (base); \
(n) = ((unsigned long) (n)) / (unsigned) (base); \
__res; })
#endif
#ifndef __ASM_ARM_DIV64 #include <asm-generic/div64.h>
#define __ASM_ARM_DIV64
/* We're not 64-bit, but... */
#define do_div(n,base) \
({ \
int __res; \
__res = ((unsigned long)n) % (unsigned int)base; \
n = ((unsigned long)n) / (unsigned int)base; \
__res; \
})
#endif
#ifndef __ASM_CRIS_DIV64 #include <asm-generic/div64.h>
#define __ASM_CRIS_DIV64
/* copy from asm-arm */
/* We're not 64-bit, but... */
#define do_div(n,base) \
({ \
int __res; \
__res = ((unsigned long)n) % (unsigned int)base; \
n = ((unsigned long)n) / (unsigned int)base; \
__res; \
})
#endif
#ifndef _ASM_GENERIC_DIV64_H
#define _ASM_GENERIC_DIV64_H
/*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
* Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
*
* The semantics of do_div() are:
*
* uint32_t do_div(uint64_t *n, uint32_t base)
* {
* uint32_t remainder = *n % base;
* *n = *n / base;
* return remainder;
* }
*
* NOTE: macro parameter n is evaluated multiple times,
* beware of side effects!
*/
#include <linux/types.h>
#if BITS_PER_LONG == 64
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
__rem = ((uint64_t)(n)) % __base; \
(n) = ((uint64_t)(n)) / __base; \
__rem; \
})
#elif BITS_PER_LONG == 32
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
# define do_div(n,base) ({ \
uint32_t __base = (base); \
uint32_t __rem; \
if (((n) >> 32) == 0) { \
__rem = (uint32_t)(n) % __base; \
(n) = (uint32_t)(n) / __base; \
} else \
__rem = __div64_32(&(n), __base); \
__rem; \
})
#else /* BITS_PER_LONG == ?? */
# error do_div() does not yet support the C64
#endif /* BITS_PER_LONG */
#endif /* _ASM_GENERIC_DIV64_H */
#ifndef H8300_DIV64_H #include <asm-generic/div64.h>
#define H8300_DIV64_H
/* n = n / base; return rem; */
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; \
})
#endif /* _H8300_DIV64_H */
#ifndef _ASM_IA64_DIV64_H #include <asm-generic/div64.h>
#define _ASM_IA64_DIV64_H
/*
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*
* vsprintf uses this to divide a 64-bit integer N by a small integer BASE.
* This is incredibly hard on IA-64...
*/
#define do_div(n,base) \
({ \
int _res; \
_res = ((unsigned long) (n)) % (unsigned) (base); \
(n) = ((unsigned long) (n)) / (unsigned) (base); \
_res; \
})
#endif /* _ASM_IA64_DIV64_H */
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
/* n = n / base; return rem; */ /* n = n / base; return rem; */
#if 1
#define do_div(n, base) ({ \ #define do_div(n, base) ({ \
union { \ union { \
unsigned long n32[2]; \ unsigned long n32[2]; \
...@@ -23,13 +22,5 @@ ...@@ -23,13 +22,5 @@
(n) = __n.n64; \ (n) = __n.n64; \
__rem; \ __rem; \
}) })
#else
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; \
})
#endif
#endif /* _M68K_DIV64_H */ #endif /* _M68K_DIV64_H */
#ifndef _M68KNOMMU_DIV64_H #include <asm-generic/div64.h>
#define _M68KNOMMU_DIV64_H
/* n = n / base; return rem; */
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; \
})
#endif /* _M68K_DIV64_H */
...@@ -27,23 +27,6 @@ ...@@ -27,23 +27,6 @@
(res) = __quot; \ (res) = __quot; \
__mod; }) __mod; })
/* #include <asm-generic.h>
* Hey, we're already 64-bit, no
* need to play games..
*/
#define do_div(n, base) ({ \
unsigned long __quot; \
unsigned int __mod; \
unsigned long __div; \
unsigned int __base; \
\
__div = (n); \
__base = (base); \
\
__mod = __div % __base; \
__quot = __div / __base; \
\
(n) = __quot; \
__mod; })
#endif /* _ASM_DIV64_H */ #endif /* _ASM_DIV64_H */
#ifndef __ASM_PARISC_DIV64 #include <asm-generic/div64.h>
#define __ASM_PARISC_DIV64
#ifdef __LP64__
/*
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*
* vsprintf uses this to divide a 64-bit integer N by a small integer BASE.
* This is incredibly hard on IA-64 and HPPA
*/
#define do_div(n,base) \
({ \
int _res; \
_res = ((unsigned long) (n)) % (unsigned) (base); \
(n) = ((unsigned long) (n)) / (unsigned) (base); \
_res; \
})
#else
/*
* unsigned long long division. Yuck Yuck! What is Linux coming to?
* This is 100% disgusting
*/
#define do_div(n,base) \
({ \
unsigned long __low, __low2, __high, __rem; \
__low = (n) & 0xffffffff; \
__high = (n) >> 32; \
if (__high) { \
__rem = __high % (unsigned long)base; \
__high = __high / (unsigned long)base; \
__low2 = __low >> 16; \
__low2 += __rem << 16; \
__rem = __low2 % (unsigned long)base; \
__low2 = __low2 / (unsigned long)base; \
__low = __low & 0xffff; \
__low += __rem << 16; \
__rem = __low % (unsigned long)base; \
__low = __low / (unsigned long)base; \
n = __low + ((long long)__low2 << 16) + \
((long long) __high << 32); \
} else { \
__rem = __low % (unsigned long)base; \
n = (__low / (unsigned long)base); \
} \
__rem; \
})
#endif
#endif
#ifndef __PPC_DIV64 #include <asm-generic/div64.h>
#define __PPC_DIV64
#include <linux/types.h>
extern u32 __div64_32(u64 *dividend, u32 div);
#define do_div(n, div) ({ \
u64 __n = (n); \
u32 __d = (div); \
u32 __q, __r; \
if ((__n >> 32) == 0) { \
__q = (u32)__n / __d; \
__r = (u32)__n - __q * __d; \
(n) = __q; \
} else { \
__r = __div64_32(&__n, __d); \
(n) = __n; \
} \
__r; \
})
#endif
#ifndef __PPC_DIV64 #include <asm-generic/div64.h>
#define __PPC_DIV64
/* Copyright 2001 PPC64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) (n)) % (unsigned) (base); \
(n) = ((unsigned long) (n)) / (unsigned) (base); \
__res; })
#endif
...@@ -43,13 +43,7 @@ ...@@ -43,13 +43,7 @@
}) })
#else /* __s390x__ */ #else /* __s390x__ */
#include <asm-generic/div64.h>
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; })
#endif /* __s390x__ */ #endif /* __s390x__ */
#endif #endif
#ifndef __ASM_SH_DIV64 #include <asm-generic/div64.h>
#define __ASM_SH_DIV64
extern u64 __div64_32(u64 n, u32 d);
#define do_div(n,base) ({ \
u64 __n = (n), __q; \
u32 __base = (base); \
u32 __res; \
if ((__n >> 32) == 0) { \
__res = ((unsigned long) __n) % (unsigned) __base; \
(n) = ((unsigned long) __n) / (unsigned) __base; \
} else { \
__q = __div64_32(__n, __base); \
__res = __n - __q * __base; \
(n) = __q; \
} \
__res; })
#endif /* __ASM_SH_DIV64 */
#ifndef __SPARC_DIV64 #include <asm-generic/div64.h>
#define __SPARC_DIV64
/* We're not 64-bit, but... */
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; })
#endif /* __SPARC_DIV64 */
#ifndef __SPARC64_DIV64 #include <asm-generic/div64.h>
#define __SPARC64_DIV64
/*
* Hey, we're already 64-bit, no
* need to play games..
*/
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; })
#endif /* __SPARC64_DIV64 */
#ifndef __V850_DIV64_H__ #include <asm-generic/div64.h>
#define __V850_DIV64_H__
/* We're not 64-bit, but... */
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) n) % (unsigned) base; \
n = ((unsigned long) n) / (unsigned) base; \
__res; })
#endif /* __V850_DIV64_H__ */
#ifndef __X86_64_DIV64 #include <asm-generic/div64.h>
#define __X86_64_DIV64
/*
* Hey, we're already 64-bit, no
* need to play games..
*/
#define do_div(n,base) ({ \
int __res; \
__res = ((unsigned long) (n)) % (unsigned) (base); \
(n) = ((unsigned long) (n)) / (unsigned) (base); \
__res; })
#endif
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \ lib-y := errno.o ctype.o string.o vsprintf.o cmdline.o \
bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
kobject.o idr.o kobject.o idr.o div64.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
......
/*
* Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
*
* Based on former do_div() implementation from asm-parisc/div64.h:
* Copyright (C) 1999 Hewlett-Packard Co
* Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
*
*
* Generic C version of 64bit/32bit division and modulo, with
* 64bit result and 32bit remainder.
*
* The fast case for (n>>32 == 0) is handled inline by do_div().
*
* Code generated for this function might be very inefficient
* for some CPUs. div64_32() can be overridden by linking arch-specific
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
#include <linux/types.h>
#include <asm/div64.h>
uint32_t __div64_32(uint64_t *n, uint32_t base)
{
uint32_t low, low2, high, rem;
low = *n & 0xffffffff;
high = *n >> 32;
rem = high % (uint32_t)base;
high = high / (uint32_t)base;
low2 = low >> 16;
low2 += rem << 16;
rem = low2 % (uint32_t)base;
low2 = low2 / (uint32_t)base;
low = low & 0xffff;
low += rem << 16;
rem = low % (uint32_t)base;
low = low / (uint32_t)base;
*n = low +
((uint64_t)low2 << 16) +
((uint64_t)high << 32);
return rem;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment