Commit a041f478 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-6.12-20241018' of git://git.kernel.dk/linux

Pull io_uring fixes from Jens Axboe:

 - Fix a regression this merge window where cloning of registered
   buffers didn't take into account the dummy_ubuf

 - Fix a race with reading how many SQRING entries are available,
   causing userspace to need to loop around io_uring_sqring_wait()
   rather than being able to rely on SQEs being available when it
   returned

 - Ensure that the SQPOLL thread is TASK_RUNNING before running
   task_work off the cancelation exit path

* tag 'io_uring-6.12-20241018' of git://git.kernel.dk/linux:
  io_uring/sqpoll: ensure task state is TASK_RUNNING when running task_work
  io_uring/rsrc: ignore dummy_ubuf for buffer cloning
  io_uring/sqpoll: close race on waiting for sqring entries
parents b04ae0f4 8f7033aa
...@@ -284,7 +284,14 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx) ...@@ -284,7 +284,14 @@ static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{ {
struct io_rings *r = ctx->rings; struct io_rings *r = ctx->rings;
return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; /*
* SQPOLL must use the actual sqring head, as using the cached_sq_head
* is race prone if the SQPOLL thread has grabbed entries but not yet
* committed them to the ring. For !SQPOLL, this doesn't matter, but
* since this helper is just used for SQPOLL sqring waits (or POLLOUT),
* just read the actual sqring head unconditionally.
*/
return READ_ONCE(r->sq.tail) - READ_ONCE(r->sq.head) == ctx->sq_entries;
} }
static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
...@@ -320,6 +327,7 @@ static inline int io_run_task_work(void) ...@@ -320,6 +327,7 @@ static inline int io_run_task_work(void)
if (current->io_uring) { if (current->io_uring) {
unsigned int count = 0; unsigned int count = 0;
__set_current_state(TASK_RUNNING);
tctx_task_work_run(current->io_uring, UINT_MAX, &count); tctx_task_work_run(current->io_uring, UINT_MAX, &count);
if (count) if (count)
ret = true; ret = true;
......
...@@ -1176,6 +1176,7 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx ...@@ -1176,6 +1176,7 @@ static int io_clone_buffers(struct io_ring_ctx *ctx, struct io_ring_ctx *src_ctx
for (i = 0; i < nbufs; i++) { for (i = 0; i < nbufs; i++) {
struct io_mapped_ubuf *src = src_ctx->user_bufs[i]; struct io_mapped_ubuf *src = src_ctx->user_bufs[i];
if (src != &dummy_ubuf)
refcount_inc(&src->refs); refcount_inc(&src->refs);
user_bufs[i] = src; user_bufs[i] = src;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment