Commit 9fbe565c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-5.8-2020-07-05' of git://git.kernel.dk/linux-block

Pull io_uring fix from Jens Axboe:
 "Andres reported a regression with the fix that was merged earlier this
  week, where his setup of using signals to interrupt io_uring CQ waits
  no longer worked correctly.

  Fix this, and also limit our use of TWA_SIGNAL to the case where we
  need it, and continue using TWA_RESUME for task_work as before.

  Since the original is marked for 5.7 stable, let's flush this one out
  early"

* tag 'io_uring-5.8-2020-07-05' of git://git.kernel.dk/linux-block:
  io_uring: fix regression with always ignoring signals in io_cqring_wait()
parents 77834854 b7db41c9
...@@ -4072,14 +4072,22 @@ struct io_poll_table { ...@@ -4072,14 +4072,22 @@ struct io_poll_table {
int error; int error;
}; };
static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb)
int notify)
{ {
struct task_struct *tsk = req->task; struct task_struct *tsk = req->task;
int ret; struct io_ring_ctx *ctx = req->ctx;
int ret, notify = TWA_RESUME;
if (req->ctx->flags & IORING_SETUP_SQPOLL) /*
* SQPOLL kernel thread doesn't need notification, just a wakeup.
* If we're not using an eventfd, then TWA_RESUME is always fine,
* as we won't have dependencies between request completions for
* other kernel wait conditions.
*/
if (ctx->flags & IORING_SETUP_SQPOLL)
notify = 0; notify = 0;
else if (ctx->cq_ev_fd)
notify = TWA_SIGNAL;
ret = task_work_add(tsk, cb, notify); ret = task_work_add(tsk, cb, notify);
if (!ret) if (!ret)
...@@ -4110,7 +4118,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, ...@@ -4110,7 +4118,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
* of executing it. We can't safely execute it anyway, as we may not * of executing it. We can't safely execute it anyway, as we may not
* have the needed state needed for it anyway. * have the needed state needed for it anyway.
*/ */
ret = io_req_task_work_add(req, &req->task_work, TWA_SIGNAL); ret = io_req_task_work_add(req, &req->task_work);
if (unlikely(ret)) { if (unlikely(ret)) {
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
tsk = io_wq_get_task(req->ctx->io_wq); tsk = io_wq_get_task(req->ctx->io_wq);
...@@ -6201,7 +6209,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -6201,7 +6209,14 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
if (current->task_works) if (current->task_works)
task_work_run(); task_work_run();
if (signal_pending(current)) { if (signal_pending(current)) {
ret = -ERESTARTSYS; if (current->jobctl & JOBCTL_TASK_WORK) {
spin_lock_irq(&current->sighand->siglock);
current->jobctl &= ~JOBCTL_TASK_WORK;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
continue;
}
ret = -EINTR;
break; break;
} }
if (io_should_wake(&iowq, false)) if (io_should_wake(&iowq, false))
...@@ -6210,7 +6225,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -6210,7 +6225,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
} while (1); } while (1);
finish_wait(&ctx->wait, &iowq.wq); finish_wait(&ctx->wait, &iowq.wq);
restore_saved_sigmask_unless(ret == -ERESTARTSYS); restore_saved_sigmask_unless(ret == -EINTR);
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment