Commit 6a23b45f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull vfs and fs fixes from Al Viro:
 "Several AIO and OCFS2 fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  ocfs2: _really_ sync the right range
  ocfs2_file_write_iter: keep return value and current position update in sync
  [regression] ocfs2: do *not* increment ->ki_pos twice
  ioctx_alloc(): fix vma (and file) leak on failure
  fix mremap() vs. ioctx_kill() race
parents 54d8ccc3 64b4e252
...@@ -278,11 +278,11 @@ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -278,11 +278,11 @@ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
return 0; return 0;
} }
static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) static int aio_ring_remap(struct file *file, struct vm_area_struct *vma)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct kioctx_table *table; struct kioctx_table *table;
int i; int i, res = -EINVAL;
spin_lock(&mm->ioctx_lock); spin_lock(&mm->ioctx_lock);
rcu_read_lock(); rcu_read_lock();
...@@ -292,13 +292,17 @@ static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) ...@@ -292,13 +292,17 @@ static void aio_ring_remap(struct file *file, struct vm_area_struct *vma)
ctx = table->table[i]; ctx = table->table[i];
if (ctx && ctx->aio_ring_file == file) { if (ctx && ctx->aio_ring_file == file) {
ctx->user_id = ctx->mmap_base = vma->vm_start; if (!atomic_read(&ctx->dead)) {
ctx->user_id = ctx->mmap_base = vma->vm_start;
res = 0;
}
break; break;
} }
} }
rcu_read_unlock(); rcu_read_unlock();
spin_unlock(&mm->ioctx_lock); spin_unlock(&mm->ioctx_lock);
return res;
} }
static const struct file_operations aio_ring_fops = { static const struct file_operations aio_ring_fops = {
...@@ -727,6 +731,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -727,6 +731,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
err_cleanup: err_cleanup:
aio_nr_sub(ctx->max_reqs); aio_nr_sub(ctx->max_reqs);
err_ctx: err_ctx:
atomic_set(&ctx->dead, 1);
if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size);
aio_free_ring(ctx); aio_free_ring(ctx);
err: err:
mutex_unlock(&ctx->ring_lock); mutex_unlock(&ctx->ring_lock);
...@@ -748,11 +755,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, ...@@ -748,11 +755,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
{ {
struct kioctx_table *table; struct kioctx_table *table;
if (atomic_xchg(&ctx->dead, 1)) spin_lock(&mm->ioctx_lock);
if (atomic_xchg(&ctx->dead, 1)) {
spin_unlock(&mm->ioctx_lock);
return -EINVAL; return -EINVAL;
}
spin_lock(&mm->ioctx_lock);
table = rcu_dereference_raw(mm->ioctx_table); table = rcu_dereference_raw(mm->ioctx_table);
WARN_ON(ctx != table->table[ctx->id]); WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL; table->table[ctx->id] = NULL;
......
...@@ -2394,7 +2394,6 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, ...@@ -2394,7 +2394,6 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
/* /*
* for completing the rest of the request. * for completing the rest of the request.
*/ */
*ppos += written;
count -= written; count -= written;
written_buffered = generic_perform_write(file, from, *ppos); written_buffered = generic_perform_write(file, from, *ppos);
/* /*
...@@ -2409,7 +2408,6 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, ...@@ -2409,7 +2408,6 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
goto out_dio; goto out_dio;
} }
iocb->ki_pos = *ppos + written_buffered;
/* We need to ensure that the page cache pages are written to /* We need to ensure that the page cache pages are written to
* disk and invalidated to preserve the expected O_DIRECT * disk and invalidated to preserve the expected O_DIRECT
* semantics. * semantics.
...@@ -2418,6 +2416,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, ...@@ -2418,6 +2416,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
ret = filemap_write_and_wait_range(file->f_mapping, *ppos, ret = filemap_write_and_wait_range(file->f_mapping, *ppos,
endbyte); endbyte);
if (ret == 0) { if (ret == 0) {
iocb->ki_pos = *ppos + written_buffered;
written += written_buffered; written += written_buffered;
invalidate_mapping_pages(mapping, invalidate_mapping_pages(mapping,
*ppos >> PAGE_CACHE_SHIFT, *ppos >> PAGE_CACHE_SHIFT,
...@@ -2440,10 +2439,14 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, ...@@ -2440,10 +2439,14 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
/* buffered aio wouldn't have proper lock coverage today */ /* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT)); BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
if (unlikely(written <= 0))
goto no_sync;
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
((file->f_flags & O_DIRECT) && !direct_io)) { ((file->f_flags & O_DIRECT) && !direct_io)) {
ret = filemap_fdatawrite_range(file->f_mapping, *ppos, ret = filemap_fdatawrite_range(file->f_mapping,
*ppos + count - 1); iocb->ki_pos - written,
iocb->ki_pos - 1);
if (ret < 0) if (ret < 0)
written = ret; written = ret;
...@@ -2454,10 +2457,12 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb, ...@@ -2454,10 +2457,12 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
} }
if (!ret) if (!ret)
ret = filemap_fdatawait_range(file->f_mapping, *ppos, ret = filemap_fdatawait_range(file->f_mapping,
*ppos + count - 1); iocb->ki_pos - written,
iocb->ki_pos - 1);
} }
no_sync:
/* /*
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
* function pointer which is called when o_direct io completes so that * function pointer which is called when o_direct io completes so that
......
...@@ -1549,7 +1549,7 @@ struct file_operations { ...@@ -1549,7 +1549,7 @@ struct file_operations {
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *); int (*mmap) (struct file *, struct vm_area_struct *);
void (*mremap)(struct file *, struct vm_area_struct *); int (*mremap)(struct file *, struct vm_area_struct *);
int (*open) (struct inode *, struct file *); int (*open) (struct inode *, struct file *);
int (*flush) (struct file *, fl_owner_t id); int (*flush) (struct file *, fl_owner_t id);
int (*release) (struct inode *, struct file *); int (*release) (struct inode *, struct file *);
......
...@@ -286,8 +286,14 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -286,8 +286,14 @@ static unsigned long move_vma(struct vm_area_struct *vma,
old_len = new_len; old_len = new_len;
old_addr = new_addr; old_addr = new_addr;
new_addr = -ENOMEM; new_addr = -ENOMEM;
} else if (vma->vm_file && vma->vm_file->f_op->mremap) } else if (vma->vm_file && vma->vm_file->f_op->mremap) {
vma->vm_file->f_op->mremap(vma->vm_file, new_vma); err = vma->vm_file->f_op->mremap(vma->vm_file, new_vma);
if (err < 0) {
move_page_tables(new_vma, new_addr, vma, old_addr,
moved_len, true);
return err;
}
}
/* Conceal VM_ACCOUNT so old reservation is not undone */ /* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) { if (vm_flags & VM_ACCOUNT) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment