Commit cbdf811c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'splice' of git://brick.kernel.dk/data/git/linux-2.6-block

* 'splice' of git://brick.kernel.dk/data/git/linux-2.6-block:
  [PATCH] compat_sys_vmsplice: one-off in UIO_MAXIOV check
  [PATCH] splice: redo page lookup if add_to_page_cache() returns -EEXIST
  [PATCH] splice: rename remaining info variables to pipe
  [PATCH] splice: LRU fixups
  [PATCH] splice: fix unlocking of page on error ->prepare_write()
parents 936ef1d4 98232d50
......@@ -1323,7 +1323,7 @@ compat_sys_vmsplice(int fd, const struct compat_iovec __user *iov32,
{
unsigned i;
struct iovec *iov;
if (nr_segs >= UIO_MAXIOV)
if (nr_segs > UIO_MAXIOV)
return -EINVAL;
iov = compat_alloc_user_space(nr_segs * sizeof(struct iovec));
for (i = 0; i < nr_segs; i++) {
......
......@@ -51,7 +51,7 @@ struct splice_pipe_desc {
* addition of remove_mapping(). If success is returned, the caller may
* attempt to reuse this page for another destination.
*/
static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
......@@ -78,16 +78,18 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
return 1;
}
buf->flags |= PIPE_BUF_FLAG_LRU;
return 0;
}
static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
buf->flags &= ~PIPE_BUF_FLAG_LRU;
}
static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
static int page_cache_pipe_buf_pin(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
......@@ -141,6 +143,7 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
if (!(buf->flags & PIPE_BUF_FLAG_GIFT))
return 1;
buf->flags |= PIPE_BUF_FLAG_LRU;
return generic_pipe_buf_steal(pipe, buf);
}
......@@ -321,6 +324,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
mapping_gfp_mask(mapping));
if (unlikely(error)) {
page_cache_release(page);
if (error == -EEXIST)
continue;
break;
}
/*
......@@ -497,14 +502,14 @@ EXPORT_SYMBOL(generic_file_splice_read);
* Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos'
* using sendpage(). Return the number of bytes sent.
*/
static int pipe_to_sendpage(struct pipe_inode_info *info,
static int pipe_to_sendpage(struct pipe_inode_info *pipe,
struct pipe_buffer *buf, struct splice_desc *sd)
{
struct file *file = sd->file;
loff_t pos = sd->pos;
int ret, more;
ret = buf->ops->pin(info, buf);
ret = buf->ops->pin(pipe, buf);
if (!ret) {
more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len;
......@@ -535,7 +540,7 @@ static int pipe_to_sendpage(struct pipe_inode_info *info,
* SPLICE_F_MOVE isn't set, or we cannot move the page, we simply create
* a new page in the output file page cache and fill/dirty that.
*/
static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
static int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct file *file = sd->file;
......@@ -549,7 +554,7 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
/*
* make sure the data in this buffer is uptodate
*/
ret = buf->ops->pin(info, buf);
ret = buf->ops->pin(pipe, buf);
if (unlikely(ret))
return ret;
......@@ -566,37 +571,23 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
*/
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
/*
* If steal succeeds, buf->page is now pruned from the vm
* side (page cache) and we can reuse it. The page will also
* be locked on successful return.
* If steal succeeds, buf->page is now pruned from the
* pagecache and we can reuse it. The page will also be
* locked on successful return.
*/
if (buf->ops->steal(info, buf))
if (buf->ops->steal(pipe, buf))
goto find_page;
page = buf->page;
page_cache_get(page);
/*
* page must be on the LRU for adding to the pagecache.
* Check this without grabbing the zone lock, if it isn't
* the do grab the zone lock, recheck, and add if necessary.
*/
if (!PageLRU(page)) {
struct zone *zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (!PageLRU(page)) {
SetPageLRU(page);
add_page_to_inactive_list(zone, page);
}
spin_unlock_irq(&zone->lru_lock);
}
if (add_to_page_cache(page, mapping, index, gfp_mask)) {
page_cache_release(page);
unlock_page(page);
goto find_page;
}
page_cache_get(page);
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
lru_cache_add(page);
} else {
find_page:
page = find_lock_page(mapping, index);
......@@ -647,23 +638,36 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
}
ret = mapping->a_ops->prepare_write(file, page, offset, offset+this_len);
if (ret == AOP_TRUNCATED_PAGE) {
if (unlikely(ret)) {
loff_t isize = i_size_read(mapping->host);
if (ret != AOP_TRUNCATED_PAGE)
unlock_page(page);
page_cache_release(page);
goto find_page;
} else if (ret)
if (ret == AOP_TRUNCATED_PAGE)
goto find_page;
/*
* prepare_write() may have instantiated a few blocks
* outside i_size. Trim these off again.
*/
if (sd->pos + this_len > isize)
vmtruncate(mapping->host, isize);
goto out;
}
if (buf->page != page) {
/*
* Careful, ->map() uses KM_USER0!
*/
char *src = buf->ops->map(info, buf, 1);
char *src = buf->ops->map(pipe, buf, 1);
char *dst = kmap_atomic(page, KM_USER1);
memcpy(dst + offset, src + buf->offset, this_len);
flush_dcache_page(page);
kunmap_atomic(dst, KM_USER1);
buf->ops->unmap(info, buf, src);
buf->ops->unmap(pipe, buf, src);
}
ret = mapping->a_ops->commit_write(file, page, offset, offset+this_len);
......
......@@ -5,8 +5,9 @@
#define PIPE_BUFFERS (16)
#define PIPE_BUF_FLAG_ATOMIC 0x01 /* was atomically mapped */
#define PIPE_BUF_FLAG_GIFT 0x02 /* page is a gift */
#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
struct pipe_buffer {
struct page *page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment