Commit 8fad7767 authored by Al Viro's avatar Al Viro

ITER_PIPE: allocate buffers as we go in copy-to-pipe primitives

New helper: append_pipe().  Extends the last buffer if possible,
allocates a new one otherwise.  Returns page and offset in it
on success, NULL on failure.  iov_iter is advanced past the
data we've got.

Use that instead of push_pipe() in copy-to-pipe primitives;
they get simpler that way.  Handling of short copy (in "mc" one)
is done simply by iov_iter_revert() - iov_iter is in consistent
state after that one, so we can use that.

[Fix for braino caught by Liu Xinpeng <liuxp11@chinatelecom.cn> folded in]
[another braino fix, this time in copy_pipe_to_iter() and pipe_zero();
caught by testcase from Hugh Dickins]
Signed-off-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
parent 47b7fcae
...@@ -259,6 +259,45 @@ static void push_page(struct pipe_inode_info *pipe, struct page *page, ...@@ -259,6 +259,45 @@ static void push_page(struct pipe_inode_info *pipe, struct page *page,
get_page(page); get_page(page);
} }
static inline bool allocated(struct pipe_buffer *buf)
{
return buf->ops == &default_pipe_buf_ops;
}
static struct page *append_pipe(struct iov_iter *i, size_t size,
unsigned int *off)
{
struct pipe_inode_info *pipe = i->pipe;
size_t offset = i->iov_offset;
struct pipe_buffer *buf;
struct page *page;
if (offset && offset < PAGE_SIZE) {
// some space in the last buffer; can we add to it?
buf = pipe_buf(pipe, pipe->head - 1);
if (allocated(buf)) {
size = min_t(size_t, size, PAGE_SIZE - offset);
buf->len += size;
i->iov_offset += size;
i->count -= size;
*off = offset;
return buf->page;
}
}
// OK, we need a new buffer
*off = 0;
size = min_t(size_t, size, PAGE_SIZE);
if (pipe_full(pipe->head, pipe->tail, pipe->max_usage))
return NULL;
page = push_anon(pipe, size);
if (!page)
return NULL;
i->head = pipe->head - 1;
i->iov_offset = size;
i->count -= size;
return page;
}
static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i) struct iov_iter *i)
{ {
...@@ -396,11 +435,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, ...@@ -396,11 +435,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
} }
EXPORT_SYMBOL(iov_iter_init); EXPORT_SYMBOL(iov_iter_init);
static inline bool allocated(struct pipe_buffer *buf)
{
return buf->ops == &default_pipe_buf_ops;
}
static inline void data_start(const struct iov_iter *i, static inline void data_start(const struct iov_iter *i,
unsigned int *iter_headp, size_t *offp) unsigned int *iter_headp, size_t *offp)
{ {
...@@ -459,28 +493,24 @@ static size_t push_pipe(struct iov_iter *i, size_t size, ...@@ -459,28 +493,24 @@ static size_t push_pipe(struct iov_iter *i, size_t size,
static size_t copy_pipe_to_iter(const void *addr, size_t bytes, static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i) struct iov_iter *i)
{ {
struct pipe_inode_info *pipe = i->pipe; unsigned int off, chunk;
unsigned int p_mask = pipe->ring_size - 1;
unsigned int i_head;
size_t n, off;
if (!sanity(i)) if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0; return 0;
bytes = n = push_pipe(i, bytes, &i_head, &off); if (!sanity(i))
if (unlikely(!n))
return 0; return 0;
do {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off); for (size_t n = bytes; n; n -= chunk) {
memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); struct page *page = append_pipe(i, n, &off);
i->head = i_head; chunk = min_t(size_t, n, PAGE_SIZE - off);
i->iov_offset = off + chunk; if (!page)
n -= chunk; return bytes - n;
memcpy_to_page(page, off, addr, chunk);
addr += chunk; addr += chunk;
off = 0; }
i_head++;
} while (n);
i->count -= bytes;
return bytes; return bytes;
} }
...@@ -494,31 +524,32 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len, ...@@ -494,31 +524,32 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
struct iov_iter *i, __wsum *sump) struct iov_iter *i, __wsum *sump)
{ {
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
__wsum sum = *sump; __wsum sum = *sump;
size_t off = 0; size_t off = 0;
unsigned int i_head; unsigned int chunk, r;
size_t r;
if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0;
if (!sanity(i)) if (!sanity(i))
return 0; return 0;
bytes = push_pipe(i, bytes, &i_head, &r);
while (bytes) { while (bytes) {
size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r); struct page *page = append_pipe(i, bytes, &r);
char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); char *p;
if (!page)
break;
chunk = min_t(size_t, bytes, PAGE_SIZE - r);
p = kmap_local_page(page);
sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off);
kunmap_local(p); kunmap_local(p);
i->head = i_head;
i->iov_offset = r + chunk;
bytes -= chunk;
off += chunk; off += chunk;
r = 0; bytes -= chunk;
i_head++;
} }
*sump = sum; *sump = sum;
i->count -= off;
return off; return off;
} }
...@@ -550,39 +581,36 @@ static int copyout_mc(void __user *to, const void *from, size_t n) ...@@ -550,39 +581,36 @@ static int copyout_mc(void __user *to, const void *from, size_t n)
static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
struct iov_iter *i) struct iov_iter *i)
{ {
struct pipe_inode_info *pipe = i->pipe; size_t xfer = 0;
unsigned int p_mask = pipe->ring_size - 1; unsigned int off, chunk;
unsigned int i_head;
unsigned int valid = pipe->head; if (unlikely(bytes > i->count))
size_t n, off, xfer = 0; bytes = i->count;
if (unlikely(!bytes))
return 0;
if (!sanity(i)) if (!sanity(i))
return 0; return 0;
n = push_pipe(i, bytes, &i_head, &off); while (bytes) {
while (n) { struct page *page = append_pipe(i, bytes, &off);
size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page);
unsigned long rem; unsigned long rem;
char *p;
if (!page)
break;
chunk = min_t(size_t, bytes, PAGE_SIZE - off);
p = kmap_local_page(page);
rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
chunk -= rem; chunk -= rem;
kunmap_local(p); kunmap_local(p);
if (chunk) { xfer += chunk;
i->head = i_head; bytes -= chunk;
i->iov_offset = off + chunk;
xfer += chunk;
valid = i_head + 1;
}
if (rem) { if (rem) {
pipe->bufs[i_head & p_mask].len -= rem; iov_iter_revert(i, rem);
pipe_discard_from(pipe, valid);
break; break;
} }
n -= chunk;
off = 0;
i_head++;
} }
i->count -= xfer;
return xfer; return xfer;
} }
...@@ -769,30 +797,27 @@ EXPORT_SYMBOL(copy_page_from_iter); ...@@ -769,30 +797,27 @@ EXPORT_SYMBOL(copy_page_from_iter);
static size_t pipe_zero(size_t bytes, struct iov_iter *i) static size_t pipe_zero(size_t bytes, struct iov_iter *i)
{ {
struct pipe_inode_info *pipe = i->pipe; unsigned int chunk, off;
unsigned int p_mask = pipe->ring_size - 1;
unsigned int i_head;
size_t n, off;
if (!sanity(i)) if (unlikely(bytes > i->count))
bytes = i->count;
if (unlikely(!bytes))
return 0; return 0;
bytes = n = push_pipe(i, bytes, &i_head, &off); if (!sanity(i))
if (unlikely(!n))
return 0; return 0;
do { for (size_t n = bytes; n; n -= chunk) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - off); struct page *page = append_pipe(i, n, &off);
char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); char *p;
if (!page)
return bytes - n;
chunk = min_t(size_t, n, PAGE_SIZE - off);
p = kmap_local_page(page);
memset(p + off, 0, chunk); memset(p + off, 0, chunk);
kunmap_local(p); kunmap_local(p);
i->head = i_head; }
i->iov_offset = off + chunk;
n -= chunk;
off = 0;
i_head++;
} while (n);
i->count -= bytes;
return bytes; return bytes;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment