Commit f9152895 authored by Al Viro's avatar Al Viro

iov_iter: reduce code duplication

The same combination of csum_partial_copy_nocheck() with csum_add_block()
is used in a bunch of places.  Add a helper doing just that and use it.
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 78e1f386
...@@ -560,13 +560,20 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, ...@@ -560,13 +560,20 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
return bytes; return bytes;
} }
static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
__wsum sum, size_t off)
{
__wsum next = csum_partial_copy_nocheck(from, to, len, 0);
return csum_block_add(sum, next, off);
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i) __wsum *csum, struct iov_iter *i)
{ {
struct pipe_inode_info *pipe = i->pipe; struct pipe_inode_info *pipe = i->pipe;
size_t n, r; size_t n, r;
size_t off = 0; size_t off = 0;
__wsum sum = *csum, next; __wsum sum = *csum;
int idx; int idx;
if (!sanity(i)) if (!sanity(i))
...@@ -578,8 +585,7 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, ...@@ -578,8 +585,7 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
for ( ; n; idx = next_idx(idx, pipe), r = 0) { for ( ; n; idx = next_idx(idx, pipe), r = 0) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - r); size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
char *p = kmap_atomic(pipe->bufs[idx].page); char *p = kmap_atomic(pipe->bufs[idx].page);
next = csum_partial_copy_nocheck(addr, p + r, chunk, 0); sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
sum = csum_block_add(sum, next, off);
kunmap_atomic(p); kunmap_atomic(p);
i->idx = idx; i->idx = idx;
i->iov_offset = r + chunk; i->iov_offset = r + chunk;
...@@ -1400,17 +1406,15 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, ...@@ -1400,17 +1406,15 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
err ? v.iov_len : 0; err ? v.iov_len : 0;
}), ({ }), ({
char *p = kmap_atomic(v.bv_page); char *p = kmap_atomic(v.bv_page);
next = csum_partial_copy_nocheck(p + v.bv_offset, sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
(to += v.bv_len) - v.bv_len, p + v.bv_offset, v.bv_len,
v.bv_len, 0); sum, off);
kunmap_atomic(p); kunmap_atomic(p);
sum = csum_block_add(sum, next, off);
off += v.bv_len; off += v.bv_len;
}),({ }),({
next = csum_partial_copy_nocheck(v.iov_base, sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
(to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len,
v.iov_len, 0); sum, off);
sum = csum_block_add(sum, next, off);
off += v.iov_len; off += v.iov_len;
}) })
) )
...@@ -1444,17 +1448,15 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, ...@@ -1444,17 +1448,15 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
0; 0;
}), ({ }), ({
char *p = kmap_atomic(v.bv_page); char *p = kmap_atomic(v.bv_page);
next = csum_partial_copy_nocheck(p + v.bv_offset, sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
(to += v.bv_len) - v.bv_len, p + v.bv_offset, v.bv_len,
v.bv_len, 0); sum, off);
kunmap_atomic(p); kunmap_atomic(p);
sum = csum_block_add(sum, next, off);
off += v.bv_len; off += v.bv_len;
}),({ }),({
next = csum_partial_copy_nocheck(v.iov_base, sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
(to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len,
v.iov_len, 0); sum, off);
sum = csum_block_add(sum, next, off);
off += v.iov_len; off += v.iov_len;
}) })
) )
...@@ -1491,17 +1493,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, ...@@ -1491,17 +1493,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
err ? v.iov_len : 0; err ? v.iov_len : 0;
}), ({ }), ({
char *p = kmap_atomic(v.bv_page); char *p = kmap_atomic(v.bv_page);
next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, sum = csum_and_memcpy(p + v.bv_offset,
p + v.bv_offset, (from += v.bv_len) - v.bv_len,
v.bv_len, 0); v.bv_len, sum, off);
kunmap_atomic(p); kunmap_atomic(p);
sum = csum_block_add(sum, next, off);
off += v.bv_len; off += v.bv_len;
}),({ }),({
next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, sum = csum_and_memcpy(v.iov_base,
v.iov_base, (from += v.iov_len) - v.iov_len,
v.iov_len, 0); v.iov_len, sum, off);
sum = csum_block_add(sum, next, off);
off += v.iov_len; off += v.iov_len;
}) })
) )
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment