Commit 35f3d14d authored by Jens Axboe's avatar Jens Axboe

pipe: add support for shrinking and growing pipes

This patch adds F_GETPIPE_SZ and F_SETPIPE_SZ fcntl() actions for
growing and shrinking the size of a pipe and adjusts pipe.c and splice.c
(and relay and network splice) usage to work with these larger (or smaller)
pipes.
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 3d42b361
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/dnotify.h> #include <linux/dnotify.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/pipe_fs_i.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/signal.h> #include <linux/signal.h>
...@@ -412,6 +413,10 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, ...@@ -412,6 +413,10 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
case F_NOTIFY: case F_NOTIFY:
err = fcntl_dirnotify(fd, filp, arg); err = fcntl_dirnotify(fd, filp, arg);
break; break;
case F_SETPIPE_SZ:
case F_GETPIPE_SZ:
err = pipe_fcntl(filp, cmd, arg);
break;
default: default:
break; break;
} }
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/log2.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/pipe_fs_i.h> #include <linux/pipe_fs_i.h>
#include <linux/uio.h> #include <linux/uio.h>
...@@ -390,7 +391,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov, ...@@ -390,7 +391,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
if (!buf->len) { if (!buf->len) {
buf->ops = NULL; buf->ops = NULL;
ops->release(pipe, buf); ops->release(pipe, buf);
curbuf = (curbuf + 1) & (PIPE_BUFFERS-1); curbuf = (curbuf + 1) & (pipe->buffers - 1);
pipe->curbuf = curbuf; pipe->curbuf = curbuf;
pipe->nrbufs = --bufs; pipe->nrbufs = --bufs;
do_wakeup = 1; do_wakeup = 1;
...@@ -472,7 +473,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, ...@@ -472,7 +473,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */ chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
if (pipe->nrbufs && chars != 0) { if (pipe->nrbufs && chars != 0) {
int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) & int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
(PIPE_BUFFERS-1); (pipe->buffers - 1);
struct pipe_buffer *buf = pipe->bufs + lastbuf; struct pipe_buffer *buf = pipe->bufs + lastbuf;
const struct pipe_buf_operations *ops = buf->ops; const struct pipe_buf_operations *ops = buf->ops;
int offset = buf->offset + buf->len; int offset = buf->offset + buf->len;
...@@ -518,8 +519,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, ...@@ -518,8 +519,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
break; break;
} }
bufs = pipe->nrbufs; bufs = pipe->nrbufs;
if (bufs < PIPE_BUFFERS) { if (bufs < pipe->buffers) {
int newbuf = (pipe->curbuf + bufs) & (PIPE_BUFFERS-1); int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
struct pipe_buffer *buf = pipe->bufs + newbuf; struct pipe_buffer *buf = pipe->bufs + newbuf;
struct page *page = pipe->tmp_page; struct page *page = pipe->tmp_page;
char *src; char *src;
...@@ -580,7 +581,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov, ...@@ -580,7 +581,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
if (!total_len) if (!total_len)
break; break;
} }
if (bufs < PIPE_BUFFERS) if (bufs < pipe->buffers)
continue; continue;
if (filp->f_flags & O_NONBLOCK) { if (filp->f_flags & O_NONBLOCK) {
if (!ret) if (!ret)
...@@ -640,7 +641,7 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -640,7 +641,7 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
nrbufs = pipe->nrbufs; nrbufs = pipe->nrbufs;
while (--nrbufs >= 0) { while (--nrbufs >= 0) {
count += pipe->bufs[buf].len; count += pipe->bufs[buf].len;
buf = (buf+1) & (PIPE_BUFFERS-1); buf = (buf+1) & (pipe->buffers - 1);
} }
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
...@@ -671,7 +672,7 @@ pipe_poll(struct file *filp, poll_table *wait) ...@@ -671,7 +672,7 @@ pipe_poll(struct file *filp, poll_table *wait)
} }
if (filp->f_mode & FMODE_WRITE) { if (filp->f_mode & FMODE_WRITE) {
mask |= (nrbufs < PIPE_BUFFERS) ? POLLOUT | POLLWRNORM : 0; mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
/* /*
* Most Unices do not set POLLERR for FIFOs but on Linux they * Most Unices do not set POLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll(). * behave exactly like pipes for poll().
...@@ -877,25 +878,32 @@ struct pipe_inode_info * alloc_pipe_info(struct inode *inode) ...@@ -877,25 +878,32 @@ struct pipe_inode_info * alloc_pipe_info(struct inode *inode)
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL); pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
if (pipe) { if (pipe) {
init_waitqueue_head(&pipe->wait); pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
pipe->r_counter = pipe->w_counter = 1; if (pipe->bufs) {
pipe->inode = inode; init_waitqueue_head(&pipe->wait);
pipe->r_counter = pipe->w_counter = 1;
pipe->inode = inode;
pipe->buffers = PIPE_DEF_BUFFERS;
return pipe;
}
kfree(pipe);
} }
return pipe; return NULL;
} }
void __free_pipe_info(struct pipe_inode_info *pipe) void __free_pipe_info(struct pipe_inode_info *pipe)
{ {
int i; int i;
for (i = 0; i < PIPE_BUFFERS; i++) { for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i; struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops) if (buf->ops)
buf->ops->release(pipe, buf); buf->ops->release(pipe, buf);
} }
if (pipe->tmp_page) if (pipe->tmp_page)
__free_page(pipe->tmp_page); __free_page(pipe->tmp_page);
kfree(pipe->bufs);
kfree(pipe); kfree(pipe);
} }
...@@ -1093,6 +1101,81 @@ SYSCALL_DEFINE1(pipe, int __user *, fildes) ...@@ -1093,6 +1101,81 @@ SYSCALL_DEFINE1(pipe, int __user *, fildes)
return sys_pipe2(fildes, 0); return sys_pipe2(fildes, 0);
} }
/*
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
{
struct pipe_buffer *bufs;
/*
* Must be a power-of-2 currently
*/
if (!is_power_of_2(arg))
return -EINVAL;
/*
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
* expect a lot of shrink+grow operations, just free and allocate
* again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
if (arg < pipe->nrbufs)
return -EBUSY;
bufs = kcalloc(arg, sizeof(struct pipe_buffer), GFP_KERNEL);
if (unlikely(!bufs))
return -ENOMEM;
/*
* The pipe array wraps around, so just start the new one at zero
* and adjust the indexes.
*/
if (pipe->nrbufs) {
const unsigned int tail = pipe->nrbufs & (pipe->buffers - 1);
const unsigned int head = pipe->nrbufs - tail;
if (head)
memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
if (tail)
memcpy(bufs + head, pipe->bufs + pipe->curbuf, tail * sizeof(struct pipe_buffer));
}
pipe->curbuf = 0;
kfree(pipe->bufs);
pipe->bufs = bufs;
pipe->buffers = arg;
return arg;
}
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe;
long ret;
pipe = file->f_path.dentry->d_inode->i_pipe;
if (!pipe)
return -EBADF;
mutex_lock(&pipe->inode->i_mutex);
switch (cmd) {
case F_SETPIPE_SZ:
ret = pipe_set_size(pipe, arg);
break;
case F_GETPIPE_SZ:
ret = pipe->buffers;
break;
default:
ret = -EINVAL;
break;
}
mutex_unlock(&pipe->inode->i_mutex);
return ret;
}
/* /*
* pipefs should _never_ be mounted by userland - too much of security hassle, * pipefs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need * no real gain from having the whole whorehouse mounted. So we don't need
......
This diff is collapsed.
...@@ -21,6 +21,12 @@ ...@@ -21,6 +21,12 @@
*/ */
#define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2) #define F_NOTIFY (F_LINUX_SPECIFIC_BASE+2)
/*
* Set and get of pipe page size array
*/
#define F_SETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 7)
#define F_GETPIPE_SZ (F_LINUX_SPECIFIC_BASE + 8)
/* /*
* Types of directory notifications that may be requested. * Types of directory notifications that may be requested.
*/ */
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#define PIPEFS_MAGIC 0x50495045 #define PIPEFS_MAGIC 0x50495045
#define PIPE_BUFFERS (16) #define PIPE_DEF_BUFFERS 16
#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
...@@ -44,17 +44,17 @@ struct pipe_buffer { ...@@ -44,17 +44,17 @@ struct pipe_buffer {
**/ **/
struct pipe_inode_info { struct pipe_inode_info {
wait_queue_head_t wait; wait_queue_head_t wait;
unsigned int nrbufs, curbuf; unsigned int nrbufs, curbuf, buffers;
struct page *tmp_page;
unsigned int readers; unsigned int readers;
unsigned int writers; unsigned int writers;
unsigned int waiting_writers; unsigned int waiting_writers;
unsigned int r_counter; unsigned int r_counter;
unsigned int w_counter; unsigned int w_counter;
struct page *tmp_page;
struct fasync_struct *fasync_readers; struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers; struct fasync_struct *fasync_writers;
struct inode *inode; struct inode *inode;
struct pipe_buffer bufs[PIPE_BUFFERS]; struct pipe_buffer *bufs;
}; };
/* /*
...@@ -154,4 +154,7 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); ...@@ -154,4 +154,7 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
#endif #endif
...@@ -82,4 +82,11 @@ extern ssize_t splice_to_pipe(struct pipe_inode_info *, ...@@ -82,4 +82,11 @@ extern ssize_t splice_to_pipe(struct pipe_inode_info *,
extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
splice_direct_actor *); splice_direct_actor *);
/*
* for dynamic pipe sizing
*/
extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
extern void splice_shrink_spd(struct pipe_inode_info *,
struct splice_pipe_desc *);
#endif #endif
...@@ -1231,8 +1231,8 @@ static ssize_t subbuf_splice_actor(struct file *in, ...@@ -1231,8 +1231,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
size_t read_subbuf = read_start / subbuf_size; size_t read_subbuf = read_start / subbuf_size;
size_t padding = rbuf->padding[read_subbuf]; size_t padding = rbuf->padding[read_subbuf];
size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding; size_t nonpad_end = read_subbuf * subbuf_size + subbuf_size - padding;
struct page *pages[PIPE_BUFFERS]; struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = { struct splice_pipe_desc spd = {
.pages = pages, .pages = pages,
.nr_pages = 0, .nr_pages = 0,
...@@ -1245,6 +1245,8 @@ static ssize_t subbuf_splice_actor(struct file *in, ...@@ -1245,6 +1245,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
if (rbuf->subbufs_produced == rbuf->subbufs_consumed) if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
return 0; return 0;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
/* /*
* Adjust read len, if longer than what is available * Adjust read len, if longer than what is available
...@@ -1255,7 +1257,7 @@ static ssize_t subbuf_splice_actor(struct file *in, ...@@ -1255,7 +1257,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT; subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
pidx = (read_start / PAGE_SIZE) % subbuf_pages; pidx = (read_start / PAGE_SIZE) % subbuf_pages;
poff = read_start & ~PAGE_MASK; poff = read_start & ~PAGE_MASK;
nr_pages = min_t(unsigned int, subbuf_pages, PIPE_BUFFERS); nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers);
for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
unsigned int this_len, this_end, private; unsigned int this_len, this_end, private;
...@@ -1289,16 +1291,19 @@ static ssize_t subbuf_splice_actor(struct file *in, ...@@ -1289,16 +1291,19 @@ static ssize_t subbuf_splice_actor(struct file *in,
} }
} }
ret = 0;
if (!spd.nr_pages) if (!spd.nr_pages)
return 0; goto out;
ret = *nonpad_ret = splice_to_pipe(pipe, &spd); ret = *nonpad_ret = splice_to_pipe(pipe, &spd);
if (ret < 0 || ret < total_len) if (ret < 0 || ret < total_len)
return ret; goto out;
if (read_start + ret == nonpad_end) if (read_start + ret == nonpad_end)
ret += padding; ret += padding;
out:
splice_shrink_spd(pipe, &spd);
return ret; return ret;
} }
......
...@@ -3269,12 +3269,12 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ...@@ -3269,12 +3269,12 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
size_t len, size_t len,
unsigned int flags) unsigned int flags)
{ {
struct page *pages[PIPE_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS];
struct trace_iterator *iter = filp->private_data; struct trace_iterator *iter = filp->private_data;
struct splice_pipe_desc spd = { struct splice_pipe_desc spd = {
.pages = pages, .pages = pages_def,
.partial = partial, .partial = partial_def,
.nr_pages = 0, /* This gets updated below. */ .nr_pages = 0, /* This gets updated below. */
.flags = flags, .flags = flags,
.ops = &tracing_pipe_buf_ops, .ops = &tracing_pipe_buf_ops,
...@@ -3285,6 +3285,9 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ...@@ -3285,6 +3285,9 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
size_t rem; size_t rem;
unsigned int i; unsigned int i;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
/* copy the tracer to avoid using a global lock all around */ /* copy the tracer to avoid using a global lock all around */
mutex_lock(&trace_types_lock); mutex_lock(&trace_types_lock);
if (unlikely(old_tracer != current_trace && current_trace)) { if (unlikely(old_tracer != current_trace && current_trace)) {
...@@ -3315,23 +3318,23 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ...@@ -3315,23 +3318,23 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
trace_access_lock(iter->cpu_file); trace_access_lock(iter->cpu_file);
/* Fill as many pages as possible. */ /* Fill as many pages as possible. */
for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
pages[i] = alloc_page(GFP_KERNEL); spd.pages[i] = alloc_page(GFP_KERNEL);
if (!pages[i]) if (!spd.pages[i])
break; break;
rem = tracing_fill_pipe_page(rem, iter); rem = tracing_fill_pipe_page(rem, iter);
/* Copy the data into the page, so we can start over. */ /* Copy the data into the page, so we can start over. */
ret = trace_seq_to_buffer(&iter->seq, ret = trace_seq_to_buffer(&iter->seq,
page_address(pages[i]), page_address(spd.pages[i]),
iter->seq.len); iter->seq.len);
if (ret < 0) { if (ret < 0) {
__free_page(pages[i]); __free_page(spd.pages[i]);
break; break;
} }
partial[i].offset = 0; spd.partial[i].offset = 0;
partial[i].len = iter->seq.len; spd.partial[i].len = iter->seq.len;
trace_seq_init(&iter->seq); trace_seq_init(&iter->seq);
} }
...@@ -3342,12 +3345,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ...@@ -3342,12 +3345,14 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
spd.nr_pages = i; spd.nr_pages = i;
return splice_to_pipe(pipe, &spd); ret = splice_to_pipe(pipe, &spd);
out:
splice_shrink_spd(pipe, &spd);
return ret;
out_err: out_err:
mutex_unlock(&iter->mutex); mutex_unlock(&iter->mutex);
goto out;
return ret;
} }
static ssize_t static ssize_t
...@@ -3746,11 +3751,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ...@@ -3746,11 +3751,11 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
unsigned int flags) unsigned int flags)
{ {
struct ftrace_buffer_info *info = file->private_data; struct ftrace_buffer_info *info = file->private_data;
struct partial_page partial[PIPE_BUFFERS]; struct partial_page partial_def[PIPE_DEF_BUFFERS];
struct page *pages[PIPE_BUFFERS]; struct page *pages_def[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = { struct splice_pipe_desc spd = {
.pages = pages, .pages = pages_def,
.partial = partial, .partial = partial_def,
.flags = flags, .flags = flags,
.ops = &buffer_pipe_buf_ops, .ops = &buffer_pipe_buf_ops,
.spd_release = buffer_spd_release, .spd_release = buffer_spd_release,
...@@ -3759,22 +3764,28 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ...@@ -3759,22 +3764,28 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
int entries, size, i; int entries, size, i;
size_t ret; size_t ret;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
if (*ppos & (PAGE_SIZE - 1)) { if (*ppos & (PAGE_SIZE - 1)) {
WARN_ONCE(1, "Ftrace: previous read must page-align\n"); WARN_ONCE(1, "Ftrace: previous read must page-align\n");
return -EINVAL; ret = -EINVAL;
goto out;
} }
if (len & (PAGE_SIZE - 1)) { if (len & (PAGE_SIZE - 1)) {
WARN_ONCE(1, "Ftrace: splice_read should page-align\n"); WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
if (len < PAGE_SIZE) if (len < PAGE_SIZE) {
return -EINVAL; ret = -EINVAL;
goto out;
}
len &= PAGE_MASK; len &= PAGE_MASK;
} }
trace_access_lock(info->cpu); trace_access_lock(info->cpu);
entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu); entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
for (i = 0; i < PIPE_BUFFERS && len && entries; i++, len -= PAGE_SIZE) { for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
struct page *page; struct page *page;
int r; int r;
...@@ -3829,11 +3840,12 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ...@@ -3829,11 +3840,12 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
else else
ret = 0; ret = 0;
/* TODO: block */ /* TODO: block */
return ret; goto out;
} }
ret = splice_to_pipe(pipe, &spd); ret = splice_to_pipe(pipe, &spd);
splice_shrink_spd(pipe, &spd);
out:
return ret; return ret;
} }
......
...@@ -1417,12 +1417,13 @@ static inline struct page *linear_to_page(struct page *page, unsigned int *len, ...@@ -1417,12 +1417,13 @@ static inline struct page *linear_to_page(struct page *page, unsigned int *len,
/* /*
* Fill page/offset/length into spd, if it can hold more pages. * Fill page/offset/length into spd, if it can hold more pages.
*/ */
static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page, static inline int spd_fill_page(struct splice_pipe_desc *spd,
struct pipe_inode_info *pipe, struct page *page,
unsigned int *len, unsigned int offset, unsigned int *len, unsigned int offset,
struct sk_buff *skb, int linear, struct sk_buff *skb, int linear,
struct sock *sk) struct sock *sk)
{ {
if (unlikely(spd->nr_pages == PIPE_BUFFERS)) if (unlikely(spd->nr_pages == pipe->buffers))
return 1; return 1;
if (linear) { if (linear) {
...@@ -1458,7 +1459,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff, ...@@ -1458,7 +1459,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off, unsigned int plen, unsigned int *off,
unsigned int *len, struct sk_buff *skb, unsigned int *len, struct sk_buff *skb,
struct splice_pipe_desc *spd, int linear, struct splice_pipe_desc *spd, int linear,
struct sock *sk) struct sock *sk,
struct pipe_inode_info *pipe)
{ {
if (!*len) if (!*len)
return 1; return 1;
...@@ -1481,7 +1483,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff, ...@@ -1481,7 +1483,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
/* the linear region may spread across several pages */ /* the linear region may spread across several pages */
flen = min_t(unsigned int, flen, PAGE_SIZE - poff); flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk)) if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
return 1; return 1;
__segment_seek(&page, &poff, &plen, flen); __segment_seek(&page, &poff, &plen, flen);
...@@ -1496,9 +1498,9 @@ static inline int __splice_segment(struct page *page, unsigned int poff, ...@@ -1496,9 +1498,9 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
* Map linear and fragment data from the skb to spd. It reports failure if the * Map linear and fragment data from the skb to spd. It reports failure if the
* pipe is full or if we already spliced the requested length. * pipe is full or if we already spliced the requested length.
*/ */
static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
unsigned int *len, struct splice_pipe_desc *spd, unsigned int *offset, unsigned int *len,
struct sock *sk) struct splice_pipe_desc *spd, struct sock *sk)
{ {
int seg; int seg;
...@@ -1508,7 +1510,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, ...@@ -1508,7 +1510,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
if (__splice_segment(virt_to_page(skb->data), if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1), (unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb), skb_headlen(skb),
offset, len, skb, spd, 1, sk)) offset, len, skb, spd, 1, sk, pipe))
return 1; return 1;
/* /*
...@@ -1518,7 +1520,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset, ...@@ -1518,7 +1520,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(f->page, f->page_offset, f->size, if (__splice_segment(f->page, f->page_offset, f->size,
offset, len, skb, spd, 0, sk)) offset, len, skb, spd, 0, sk, pipe))
return 1; return 1;
} }
...@@ -1535,8 +1537,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, ...@@ -1535,8 +1537,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen, struct pipe_inode_info *pipe, unsigned int tlen,
unsigned int flags) unsigned int flags)
{ {
struct partial_page partial[PIPE_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS];
struct page *pages[PIPE_BUFFERS]; struct page *pages[PIPE_DEF_BUFFERS];
struct splice_pipe_desc spd = { struct splice_pipe_desc spd = {
.pages = pages, .pages = pages,
.partial = partial, .partial = partial,
...@@ -1546,12 +1548,16 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, ...@@ -1546,12 +1548,16 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
}; };
struct sk_buff *frag_iter; struct sk_buff *frag_iter;
struct sock *sk = skb->sk; struct sock *sk = skb->sk;
int ret = 0;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
/* /*
* __skb_splice_bits() only fails if the output has no room left, * __skb_splice_bits() only fails if the output has no room left,
* so no point in going over the frag_list for the error case. * so no point in going over the frag_list for the error case.
*/ */
if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk)) if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
goto done; goto done;
else if (!tlen) else if (!tlen)
goto done; goto done;
...@@ -1562,14 +1568,12 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, ...@@ -1562,14 +1568,12 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
skb_walk_frags(skb, frag_iter) { skb_walk_frags(skb, frag_iter) {
if (!tlen) if (!tlen)
break; break;
if (__skb_splice_bits(frag_iter, &offset, &tlen, &spd, sk)) if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
break; break;
} }
done: done:
if (spd.nr_pages) { if (spd.nr_pages) {
int ret;
/* /*
* Drop the socket lock, otherwise we have reverse * Drop the socket lock, otherwise we have reverse
* locking dependencies between sk_lock and i_mutex * locking dependencies between sk_lock and i_mutex
...@@ -1582,10 +1586,10 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, ...@@ -1582,10 +1586,10 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
release_sock(sk); release_sock(sk);
ret = splice_to_pipe(pipe, &spd); ret = splice_to_pipe(pipe, &spd);
lock_sock(sk); lock_sock(sk);
return ret;
} }
return 0; splice_shrink_spd(pipe, &spd);
return ret;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment