Commit 98794f93 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kvack.org/~bcrl/aio-fixes

Pull aio fixes from Ben LaHaise:
 "The first change from Anatol fixes a regression where io_destroy() no
  longer waits for outstanding aios to complete.  The second corrects a
  memory leak in an error path for vectored aio operations.

  Both of these bug fixes should be queued up for stable as well"

* git://git.kvack.org/~bcrl/aio-fixes:
  aio: fix potential leak in aio_run_iocb().
  aio: block io_destroy() until all context requests are completed
parents 8aa9e85a 754320d6
...@@ -112,6 +112,11 @@ struct kioctx { ...@@ -112,6 +112,11 @@ struct kioctx {
struct work_struct free_work; struct work_struct free_work;
/*
* signals when all in-flight requests are done
*/
struct completion *requests_done;
struct { struct {
/* /*
* This counts the number of available slots in the ringbuffer, * This counts the number of available slots in the ringbuffer,
...@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref) ...@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
{ {
struct kioctx *ctx = container_of(ref, struct kioctx, reqs); struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
/* At this point we know that there are no any in-flight requests */
if (ctx->requests_done)
complete(ctx->requests_done);
INIT_WORK(&ctx->free_work, free_ioctx); INIT_WORK(&ctx->free_work, free_ioctx);
schedule_work(&ctx->free_work); schedule_work(&ctx->free_work);
} }
...@@ -718,7 +727,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) ...@@ -718,7 +727,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
* when the processes owning a context have all exited to encourage * when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx. * the rapid destruction of the kioctx.
*/ */
static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
struct completion *requests_done)
{ {
if (!atomic_xchg(&ctx->dead, 1)) { if (!atomic_xchg(&ctx->dead, 1)) {
struct kioctx_table *table; struct kioctx_table *table;
...@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) ...@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
if (ctx->mmap_size) if (ctx->mmap_size)
vm_munmap(ctx->mmap_base, ctx->mmap_size); vm_munmap(ctx->mmap_base, ctx->mmap_size);
ctx->requests_done = requests_done;
percpu_ref_kill(&ctx->users); percpu_ref_kill(&ctx->users);
} else {
if (requests_done)
complete(requests_done);
} }
} }
...@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm) ...@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm)
*/ */
ctx->mmap_size = 0; ctx->mmap_size = 0;
kill_ioctx(mm, ctx); kill_ioctx(mm, ctx, NULL);
} }
} }
...@@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) ...@@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
if (!IS_ERR(ioctx)) { if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp); ret = put_user(ioctx->user_id, ctxp);
if (ret) if (ret)
kill_ioctx(current->mm, ioctx); kill_ioctx(current->mm, ioctx, NULL);
percpu_ref_put(&ioctx->users); percpu_ref_put(&ioctx->users);
} }
...@@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) ...@@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
{ {
struct kioctx *ioctx = lookup_ioctx(ctx); struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) { if (likely(NULL != ioctx)) {
kill_ioctx(current->mm, ioctx); struct completion requests_done =
COMPLETION_INITIALIZER_ONSTACK(requests_done);
/* Pass requests_done to kill_ioctx() where it can be set
* in a thread-safe way. If we try to set it here then we have
* a race condition if two io_destroy() called simultaneously.
*/
kill_ioctx(current->mm, ioctx, &requests_done);
percpu_ref_put(&ioctx->users); percpu_ref_put(&ioctx->users);
/* Wait until all IO for the context are done. Otherwise kernel
* keep using user-space buffers even if user thinks the context
* is destroyed.
*/
wait_for_completion(&requests_done);
return 0; return 0;
} }
pr_debug("EINVAL: io_destroy: invalid context id\n"); pr_debug("EINVAL: io_destroy: invalid context id\n");
...@@ -1299,10 +1327,8 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, ...@@ -1299,10 +1327,8 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
&iovec, compat) &iovec, compat)
: aio_setup_single_vector(req, rw, buf, &nr_segs, : aio_setup_single_vector(req, rw, buf, &nr_segs,
iovec); iovec);
if (ret) if (!ret)
return ret; ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
if (ret < 0) { if (ret < 0) {
if (iovec != &inline_vec) if (iovec != &inline_vec)
kfree(iovec); kfree(iovec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment