Commit cc44c17b authored by Al Viro's avatar Al Viro

csum_partial_copy_nocheck(): drop the last argument

It's always 0.  Note that we theoretically could use ~0U as well -
result will be the same modulo 0xffff, _if_ the damn thing did the
right thing for any value of initial sum; later we'll make use of
that when convenient.

However, unlike csum_and_copy_..._user(), there are instances that
did not work for arbitrary initial sums; c6x is one such.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 6e41c585
...@@ -45,7 +45,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); ...@@ -45,7 +45,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
#define _HAVE_ARCH_CSUM_AND_COPY #define _HAVE_ARCH_CSUM_AND_COPY
__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp);
__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
/* /*
......
...@@ -372,13 +372,13 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len, ...@@ -372,13 +372,13 @@ csum_and_copy_from_user(const void __user *src, void *dst, int len,
EXPORT_SYMBOL(csum_and_copy_from_user); EXPORT_SYMBOL(csum_and_copy_from_user);
__wsum __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) csum_partial_copy_nocheck(const void *src, void *dst, int len)
{ {
__wsum checksum; __wsum checksum;
mm_segment_t oldfs = get_fs(); mm_segment_t oldfs = get_fs();
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
checksum = csum_and_copy_from_user((__force const void __user *)src, checksum = csum_and_copy_from_user((__force const void __user *)src,
dst, len, sum, NULL); dst, len, 0, NULL);
set_fs(oldfs); set_fs(oldfs);
return checksum; return checksum;
} }
......
...@@ -35,7 +35,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum); ...@@ -35,7 +35,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
*/ */
__wsum __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); csum_partial_copy_nocheck(const void *src, void *dst, int len);
__wsum __wsum
csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
......
...@@ -9,13 +9,14 @@ ...@@ -9,13 +9,14 @@
.text .text
/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum) /* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len)
* Params : r0 = src, r1 = dst, r2 = len, r3 = checksum * Params : r0 = src, r1 = dst, r2 = len
* Returns : r0 = new checksum * Returns : r0 = new checksum
*/ */
.macro save_regs .macro save_regs
stmfd sp!, {r1, r4 - r8, lr} stmfd sp!, {r1, r4 - r8, lr}
mov r3, #0
.endm .endm
.macro load_regs .macro load_regs
......
...@@ -27,7 +27,7 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, ...@@ -27,7 +27,7 @@ csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
#define csum_tcpudp_nofold csum_tcpudp_nofold #define csum_tcpudp_nofold csum_tcpudp_nofold
#define _HAVE_ARCH_CSUM_AND_COPY #define _HAVE_ARCH_CSUM_AND_COPY
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
#include <asm-generic/checksum.h> #include <asm-generic/checksum.h>
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
ENTRY(csum_partial_copy_nocheck) ENTRY(csum_partial_copy_nocheck)
MVC .S2 ILC,B30 MVC .S2 ILC,B30
MV .D1X B6,A31 ; given csum
ZERO .D1 A9 ; csum (a side) ZERO .D1 A9 ; csum (a side)
|| ZERO .D2 B9 ; csum (b side) || ZERO .D2 B9 ; csum (b side)
|| SHRU .S2X A6,2,B5 ; len / 4 || SHRU .S2X A6,2,B5 ; len / 4
...@@ -144,8 +143,7 @@ L91: SHRU .S2X A9,16,B4 ...@@ -144,8 +143,7 @@ L91: SHRU .S2X A9,16,B4
SHRU .S1 A9,16,A0 SHRU .S1 A9,16,A0
[A0] BNOP .S1 L91,5 [A0] BNOP .S1 L91,5
L10: ADD .D1 A31,A9,A9 L10: MV .D1 A9,A4
MV .D1 A9,A4
BNOP .S2 B3,4 BNOP .S2 B3,4
MVC .S2 B30,ILC MVC .S2 B30,ILC
......
...@@ -38,8 +38,7 @@ extern __wsum csum_and_copy_from_user(const void __user *src, ...@@ -38,8 +38,7 @@ extern __wsum csum_and_copy_from_user(const void __user *src,
int *csum_err); int *csum_err);
extern __wsum csum_partial_copy_nocheck(const void *src, extern __wsum csum_partial_copy_nocheck(const void *src,
void *dst, int len, void *dst, int len);
__wsum sum);
/* /*
* This is a version of ip_fast_csum() optimized for IP headers, * This is a version of ip_fast_csum() optimized for IP headers,
......
...@@ -324,9 +324,10 @@ EXPORT_SYMBOL(csum_and_copy_from_user); ...@@ -324,9 +324,10 @@ EXPORT_SYMBOL(csum_and_copy_from_user);
*/ */
__wsum __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) csum_partial_copy_nocheck(const void *src, void *dst, int len)
{ {
unsigned long tmp1, tmp2; unsigned long tmp1, tmp2;
__wsum sum = 0;
__asm__("movel %2,%4\n\t" __asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */ "btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t" "jeq 2f\n\t"
......
...@@ -102,8 +102,11 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, ...@@ -102,8 +102,11 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
* we have just one address space, so this is identical to the above) * we have just one address space, so this is identical to the above)
*/ */
#define _HAVE_ARCH_CSUM_AND_COPY #define _HAVE_ARCH_CSUM_AND_COPY
__wsum csum_partial_copy_nocheck(const void *src, void *dst, __wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
int len, __wsum sum); static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
return __csum_partial_copy_nocheck(src, dst, len, 0);
}
/* /*
* Fold a partial checksum without adding pseudo headers * Fold a partial checksum without adding pseudo headers
......
...@@ -462,8 +462,8 @@ EXPORT_SYMBOL(csum_partial) ...@@ -462,8 +462,8 @@ EXPORT_SYMBOL(csum_partial)
lw errptr, 16(sp) lw errptr, 16(sp)
#endif #endif
.if \__nocheck == 1 .if \__nocheck == 1
FEXPORT(csum_partial_copy_nocheck) FEXPORT(__csum_partial_copy_nocheck)
EXPORT_SYMBOL(csum_partial_copy_nocheck) EXPORT_SYMBOL(__csum_partial_copy_nocheck)
.endif .endif
move sum, zero move sum, zero
move odd, zero move odd, zero
......
...@@ -30,8 +30,8 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, ...@@ -30,8 +30,8 @@ extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
int len, __wsum sum, int *err_ptr); int len, __wsum sum, int *err_ptr);
#define _HAVE_ARCH_CSUM_AND_COPY #define _HAVE_ARCH_CSUM_AND_COPY
#define csum_partial_copy_nocheck(src, dst, len, sum) \ #define csum_partial_copy_nocheck(src, dst, len) \
csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) csum_partial_copy_generic((src), (dst), (len), 0, NULL, NULL)
/* /*
......
...@@ -43,10 +43,9 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, ...@@ -43,10 +43,9 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* access_ok(). * access_ok().
*/ */
static inline static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst, __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
int len, __wsum sum)
{ {
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); return csum_partial_copy_generic(src, dst, len, 0, NULL, NULL);
} }
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
......
...@@ -42,7 +42,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum); ...@@ -42,7 +42,7 @@ __wsum csum_partial(const void *buff, int len, __wsum sum);
unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *); unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
static inline __wsum static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) csum_partial_copy_nocheck(const void *src, void *dst, int len)
{ {
register unsigned int ret asm("o0") = (unsigned int)src; register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst; register char *d asm("o1") = dst;
...@@ -52,7 +52,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) ...@@ -52,7 +52,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
"call __csum_partial_copy_sparc_generic\n\t" "call __csum_partial_copy_sparc_generic\n\t"
" mov %6, %%g7\n" " mov %6, %%g7\n"
: "=&r" (ret), "=&r" (d), "=&r" (l) : "=&r" (ret), "=&r" (d), "=&r" (l)
: "0" (ret), "1" (d), "2" (l), "r" (sum) : "0" (ret), "1" (d), "2" (l), "r" (0)
: "o2", "o3", "o4", "o5", "o7", : "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7", "g2", "g3", "g4", "g5", "g7",
"memory", "cc"); "memory", "cc");
......
...@@ -38,8 +38,12 @@ __wsum csum_partial(const void * buff, int len, __wsum sum); ...@@ -38,8 +38,12 @@ __wsum csum_partial(const void * buff, int len, __wsum sum);
* here even more important to align src and dst on a 32-bit (or even * here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary * better 64-bit) boundary
*/ */
__wsum csum_partial_copy_nocheck(const void *src, void *dst, __wsum __csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
int len, __wsum sum);
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
{
return __csum_partial_copy_nocheck(src, dst, len, 0);
}
long __csum_partial_copy_from_user(const void __user *src, long __csum_partial_copy_from_user(const void __user *src,
void *dst, int len, void *dst, int len,
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#endif #endif
#ifndef FUNC_NAME #ifndef FUNC_NAME
#define FUNC_NAME csum_partial_copy_nocheck #define FUNC_NAME __csum_partial_copy_nocheck
#endif #endif
.register %g2, #scratch .register %g2, #scratch
......
...@@ -38,10 +38,9 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, ...@@ -38,10 +38,9 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* If you use these functions directly please don't forget the * If you use these functions directly please don't forget the
* access_ok(). * access_ok().
*/ */
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
int len, __wsum sum)
{ {
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); return csum_partial_copy_generic(src, dst, len, 0, NULL, NULL);
} }
static inline __wsum csum_and_copy_from_user(const void __user *src, static inline __wsum csum_and_copy_from_user(const void __user *src,
......
...@@ -139,8 +139,7 @@ extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, ...@@ -139,8 +139,7 @@ extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
int len, __wsum isum, int *errp); int len, __wsum isum, int *errp);
extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp); int len, __wsum isum, int *errp);
extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len);
int len, __wsum sum);
/** /**
* ip_compute_csum - Compute an 16bit IP checksum. * ip_compute_csum - Compute an 16bit IP checksum.
......
...@@ -129,9 +129,9 @@ EXPORT_SYMBOL(csum_and_copy_to_user); ...@@ -129,9 +129,9 @@ EXPORT_SYMBOL(csum_and_copy_to_user);
* Returns an 32bit unfolded checksum of the buffer. * Returns an 32bit unfolded checksum of the buffer.
*/ */
__wsum __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) csum_partial_copy_nocheck(const void *src, void *dst, int len)
{ {
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); return csum_partial_copy_generic(src, dst, len, 0, NULL, NULL);
} }
EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(csum_partial_copy_nocheck);
......
...@@ -47,10 +47,9 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, ...@@ -47,10 +47,9 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
* passed in an incorrect kernel address to one of these functions. * passed in an incorrect kernel address to one of these functions.
*/ */
static inline static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst, __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
int len, __wsum sum)
{ {
return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); return csum_partial_copy_generic(src, dst, len, 0, NULL, NULL);
} }
#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
......
...@@ -1419,8 +1419,7 @@ typhoon_download_firmware(struct typhoon *tp) ...@@ -1419,8 +1419,7 @@ typhoon_download_firmware(struct typhoon *tp)
* the checksum, we can do this once, at the end. * the checksum, we can do this once, at the end.
*/ */
csum = csum_fold(csum_partial_copy_nocheck(image_data, csum = csum_fold(csum_partial_copy_nocheck(image_data,
dpage, len, dpage, len));
0));
iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH); iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
iowrite32(le16_to_cpu((__force __le16)csum), iowrite32(le16_to_cpu((__force __le16)csum),
......
...@@ -49,10 +49,10 @@ static __inline__ __wsum csum_and_copy_to_user ...@@ -49,10 +49,10 @@ static __inline__ __wsum csum_and_copy_to_user
#ifndef _HAVE_ARCH_CSUM_AND_COPY #ifndef _HAVE_ARCH_CSUM_AND_COPY
static inline __wsum static inline __wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) csum_partial_copy_nocheck(const void *src, void *dst, int len)
{ {
memcpy(dst, src, len); memcpy(dst, src, len);
return csum_partial(dst, len, sum); return csum_partial(dst, len, 0);
} }
#endif #endif
......
...@@ -581,7 +581,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, ...@@ -581,7 +581,7 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
static __wsum csum_and_memcpy(void *to, const void *from, size_t len, static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
__wsum sum, size_t off) __wsum sum, size_t off)
{ {
__wsum next = csum_partial_copy_nocheck(from, to, len, 0); __wsum next = csum_partial_copy_nocheck(from, to, len);
return csum_block_add(sum, next, off); return csum_block_add(sum, next, off);
} }
......
...@@ -2736,7 +2736,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, ...@@ -2736,7 +2736,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
if (copy > len) if (copy > len)
copy = len; copy = len;
csum = csum_partial_copy_nocheck(skb->data + offset, to, csum = csum_partial_copy_nocheck(skb->data + offset, to,
copy, 0); copy);
if ((len -= copy) == 0) if ((len -= copy) == 0)
return csum; return csum;
offset += copy; offset += copy;
...@@ -2766,7 +2766,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, ...@@ -2766,7 +2766,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
vaddr = kmap_atomic(p); vaddr = kmap_atomic(p);
csum2 = csum_partial_copy_nocheck(vaddr + p_off, csum2 = csum_partial_copy_nocheck(vaddr + p_off,
to + copied, to + copied,
p_len, 0); p_len);
kunmap_atomic(vaddr); kunmap_atomic(vaddr);
csum = csum_block_add(csum, csum2, pos); csum = csum_block_add(csum, csum2, pos);
pos += p_len; pos += p_len;
......
...@@ -381,7 +381,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, ...@@ -381,7 +381,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
csum = csum_partial_copy_nocheck((void *)&icmp_param->data, csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
(char *)icmph, (char *)icmph,
icmp_param->head_len, 0); icmp_param->head_len);
skb_queue_walk(&sk->sk_write_queue, skb1) { skb_queue_walk(&sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum); csum = csum_add(csum, skb1->csum);
} }
......
...@@ -1648,7 +1648,7 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset, ...@@ -1648,7 +1648,7 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
{ {
__wsum csum; __wsum csum;
csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); csum = csum_partial_copy_nocheck(dptr+offset, to, len);
skb->csum = csum_block_add(skb->csum, csum, odd); skb->csum = csum_block_add(skb->csum, csum, odd);
return 0; return 0;
} }
......
...@@ -478,7 +478,7 @@ static int raw_getfrag(void *from, char *to, int offset, int len, int odd, ...@@ -478,7 +478,7 @@ static int raw_getfrag(void *from, char *to, int offset, int len, int odd,
skb->csum = csum_block_add( skb->csum = csum_block_add(
skb->csum, skb->csum,
csum_partial_copy_nocheck(rfv->hdr.c + offset, csum_partial_copy_nocheck(rfv->hdr.c + offset,
to, copy, 0), to, copy),
odd); odd);
odd = 0; odd = 0;
......
...@@ -746,7 +746,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd, ...@@ -746,7 +746,7 @@ static int raw6_getfrag(void *from, char *to, int offset, int len, int odd,
skb->csum = csum_block_add( skb->csum = csum_block_add(
skb->csum, skb->csum,
csum_partial_copy_nocheck(rfv->c + offset, csum_partial_copy_nocheck(rfv->c + offset,
to, copy, 0), to, copy),
odd); odd);
odd = 0; odd = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment