Commit 7814e4b6 authored by Al Viro's avatar Al Viro Committed by David S. Miller

[NET]: PARISC checksum annotations and cleanups.

* sanitized prototypes, annotated
* kill shift-by-16 in checksum calculation
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8e3d8433
...@@ -101,11 +101,14 @@ static inline unsigned int do_csum(const unsigned char * buff, int len) ...@@ -101,11 +101,14 @@ static inline unsigned int do_csum(const unsigned char * buff, int len)
/* /*
* computes a partial checksum, e.g. for TCP/UDP fragments * computes a partial checksum, e.g. for TCP/UDP fragments
*/ */
unsigned int csum_partial(const unsigned char *buff, int len, unsigned int sum) /*
* why bother folding?
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{ {
unsigned int result = do_csum(buff, len); unsigned int result = do_csum(buff, len);
addc(result, sum); addc(result, sum);
return from32to16(result); return (__force __wsum)from32to16(result);
} }
EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial);
...@@ -113,8 +116,8 @@ EXPORT_SYMBOL(csum_partial); ...@@ -113,8 +116,8 @@ EXPORT_SYMBOL(csum_partial);
/* /*
* copy while checksumming, otherwise like csum_partial * copy while checksumming, otherwise like csum_partial
*/ */
unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst, __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, unsigned int sum) int len, __wsum sum)
{ {
/* /*
* It's 2:30 am and I don't feel like doing it real ... * It's 2:30 am and I don't feel like doing it real ...
...@@ -131,9 +134,9 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck); ...@@ -131,9 +134,9 @@ EXPORT_SYMBOL(csum_partial_copy_nocheck);
* Copy from userspace and compute checksum. If we catch an exception * Copy from userspace and compute checksum. If we catch an exception
* then zero the rest of the buffer. * then zero the rest of the buffer.
*/ */
unsigned int csum_partial_copy_from_user(const unsigned char __user *src, __wsum csum_partial_copy_from_user(const void __user *src,
unsigned char *dst, int len, void *dst, int len,
unsigned int sum, int *err_ptr) __wsum sum, int *err_ptr)
{ {
int missing; int missing;
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* *
* it's best to have buff aligned on a 32-bit boundary * it's best to have buff aligned on a 32-bit boundary
*/ */
extern unsigned int csum_partial(const unsigned char *, int, unsigned int); extern __wsum csum_partial(const void *, int, __wsum);
/* /*
* The same as csum_partial, but copies from src while it checksums. * The same as csum_partial, but copies from src while it checksums.
...@@ -23,15 +23,14 @@ extern unsigned int csum_partial(const unsigned char *, int, unsigned int); ...@@ -23,15 +23,14 @@ extern unsigned int csum_partial(const unsigned char *, int, unsigned int);
* Here even more important to align src and dst on a 32-bit (or even * Here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary * better 64-bit) boundary
*/ */
extern unsigned int csum_partial_copy_nocheck(const unsigned char *, unsigned char *, extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum);
int, unsigned int);
/* /*
* this is a new version of the above that records errors it finds in *errp, * this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer. * but continues and zeros the rest of the buffer.
*/ */
extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, extern __wsum csum_partial_copy_from_user(const void __user *src,
unsigned char *dst, int len, unsigned int sum, int *errp); void *dst, int len, __wsum sum, int *errp);
/* /*
* Optimized for IP headers, which always checksum on 4 octet boundaries. * Optimized for IP headers, which always checksum on 4 octet boundaries.
...@@ -39,11 +38,10 @@ extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src, ...@@ -39,11 +38,10 @@ extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
* Written by Randolph Chung <tausq@debian.org>, and then mucked with by * Written by Randolph Chung <tausq@debian.org>, and then mucked with by
* LaMont Jones <lamont@debian.org> * LaMont Jones <lamont@debian.org>
*/ */
static inline unsigned short ip_fast_csum(unsigned char * iph, static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
unsigned int ihl) { {
unsigned int sum; unsigned int sum;
__asm__ __volatile__ ( __asm__ __volatile__ (
" ldws,ma 4(%1), %0\n" " ldws,ma 4(%1), %0\n"
" addib,<= -4, %2, 2f\n" " addib,<= -4, %2, 2f\n"
...@@ -69,27 +67,27 @@ static inline unsigned short ip_fast_csum(unsigned char * iph, ...@@ -69,27 +67,27 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
: "1" (iph), "2" (ihl) : "1" (iph), "2" (ihl)
: "r19", "r20", "r21" ); : "r19", "r20", "r21" );
return(sum); return (__force __sum16)sum;
} }
/* /*
* Fold a partial checksum * Fold a partial checksum
*/ */
static inline unsigned int csum_fold(unsigned int sum) static inline __sum16 csum_fold(__wsum csum)
{ {
u32 sum = (__force u32)csum;
/* add the swapped two 16-bit halves of sum, /* add the swapped two 16-bit halves of sum,
a possible carry from adding the two 16-bit halves, a possible carry from adding the two 16-bit halves,
will carry from the lower half into the upper half, will carry from the lower half into the upper half,
giving us the correct sum in the upper half. */ giving us the correct sum in the upper half. */
sum += (sum << 16) + (sum >> 16); sum += (sum << 16) + (sum >> 16);
return (~sum) >> 16; return (__force __sum16)(~sum >> 16);
} }
static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
unsigned long daddr,
unsigned short len, unsigned short len,
unsigned short proto, unsigned short proto,
unsigned int sum) __wsum sum)
{ {
__asm__( __asm__(
" add %1, %0, %0\n" " add %1, %0, %0\n"
...@@ -97,7 +95,7 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, ...@@ -97,7 +95,7 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
" addc %3, %0, %0\n" " addc %3, %0, %0\n"
" addc %%r0, %0, %0\n" " addc %%r0, %0, %0\n"
: "=r" (sum) : "=r" (sum)
: "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum)); : "r" (daddr), "r"(saddr), "r"(proto+len), "0"(sum));
return sum; return sum;
} }
...@@ -105,11 +103,10 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr, ...@@ -105,11 +103,10 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
* computes the checksum of the TCP/UDP pseudo-header * computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented * returns a 16-bit checksum, already complemented
*/ */
static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
unsigned long daddr,
unsigned short len, unsigned short len,
unsigned short proto, unsigned short proto,
unsigned int sum) __wsum sum)
{ {
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
} }
...@@ -118,17 +115,17 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, ...@@ -118,17 +115,17 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
* this routine is used for miscellaneous IP-like checksums, mainly * this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c * in icmp.c
*/ */
static inline unsigned short ip_compute_csum(unsigned char * buf, int len) { static inline __sum16 ip_compute_csum(const void *buf, int len)
{
return csum_fold (csum_partial(buf, len, 0)); return csum_fold (csum_partial(buf, len, 0));
} }
#define _HAVE_ARCH_IPV6_CSUM #define _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
struct in6_addr *daddr, const struct in6_addr *daddr,
__u16 len, __u32 len, unsigned short proto,
unsigned short proto, __wsum sum)
unsigned int sum)
{ {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -193,9 +190,9 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, ...@@ -193,9 +190,9 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
* Copy and checksum to user * Copy and checksum to user
*/ */
#define HAVE_CSUM_COPY_USER #define HAVE_CSUM_COPY_USER
static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src, static __inline__ __wsum csum_and_copy_to_user(const void *src,
unsigned char __user *dst, void __user *dst,
int len, int sum, int len, __wsum sum,
int *err_ptr) int *err_ptr)
{ {
/* code stolen from include/asm-mips64 */ /* code stolen from include/asm-mips64 */
...@@ -203,7 +200,7 @@ static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src, ...@@ -203,7 +200,7 @@ static __inline__ unsigned int csum_and_copy_to_user (const unsigned char *src,
if (copy_to_user(dst, src, len)) { if (copy_to_user(dst, src, len)) {
*err_ptr = -EFAULT; *err_ptr = -EFAULT;
return -1; return (__force __wsum)-1;
} }
return sum; return sum;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment