Commit fb4b3d3f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.5/io_uring-20191121' of git://git.kernel.dk/linux-block

Pull io_uring updates from Jens Axboe:
 "A lot of stuff has been going on this cycle, with improving the
  support for networked IO (and hence unbounded request completion
  times) being one of the major themes. There's been a set of fixes done
  this week, I'll send those out as well once we're certain we're fully
  happy with them.

  This contains:

   - Unification of the "normal" submit path and the SQPOLL path (Pavel)

   - Support for sparse (and bigger) file sets, and updating of those
     file sets without needing to unregister/register again.

   - Independently sized CQ ring, instead of just making it always 2x
     the SQ ring size. This makes it more flexible for networked
     applications.

   - Support for overflowed CQ ring, never dropping events but providing
     backpressure on submits.

   - Add support for absolute timeouts, not just relative ones.

   - Support for generic cancellations. This divorces io_uring from
     workqueues as well, which additionally gets us one step closer to
     generic async system call support.

   - With cancellations, we can support grabbing the process file table
     as well, just like we do mm context. This allows support for system
     calls that create file descriptors, like accept4() support that's
     built on top of that.

   - Support for io_uring tracing (Dmitrii)

   - Support for linked timeouts. These abort an operation if it isn't
     completed by the time noted in the linke timeout.

   - Speedup tracking of poll requests

   - Various cleanups making the coder easier to follow (Jackie, Pavel,
     Bob, YueHaibing, me)

   - Update MAINTAINERS with new io_uring list"

* tag 'for-5.5/io_uring-20191121' of git://git.kernel.dk/linux-block: (64 commits)
  io_uring: make POLL_ADD/POLL_REMOVE scale better
  io-wq: remove now redundant struct io_wq_nulls_list
  io_uring: Fix getting file for non-fd opcodes
  io_uring: introduce req_need_defer()
  io_uring: clean up io_uring_cancel_files()
  io-wq: ensure free/busy list browsing see all items
  io-wq: ensure we have a stable view of ->cur_work for cancellations
  io_wq: add get/put_work handlers to io_wq_create()
  io_uring: check for validity of ->rings in teardown
  io_uring: fix potential deadlock in io_poll_wake()
  io_uring: use correct "is IO worker" helper
  io_uring: fix -ENOENT issue with linked timer with short timeout
  io_uring: don't do flush cancel under inflight_lock
  io_uring: flag SQPOLL busy condition to userspace
  io_uring: make ASYNC_CANCEL work with poll and timeout
  io_uring: provide fallback request for OOM situations
  io_uring: convert accept4() -ERESTARTSYS into -EINTR
  io_uring: fix error clear of ->file_table in io_sqe_files_register()
  io_uring: separate the io_free_req and io_free_req_find_next interface
  io_uring: keep io_put_req only responsible for release and put req
  ...
parents 54f0e540 eac406c6
...@@ -8564,12 +8564,13 @@ F: include/linux/iova.h ...@@ -8564,12 +8564,13 @@ F: include/linux/iova.h
IO_URING IO_URING
M: Jens Axboe <axboe@kernel.dk> M: Jens Axboe <axboe@kernel.dk>
L: linux-block@vger.kernel.org L: io-uring@vger.kernel.org
L: linux-fsdevel@vger.kernel.org
T: git git://git.kernel.dk/linux-block T: git git://git.kernel.dk/linux-block
T: git git://git.kernel.dk/liburing T: git git://git.kernel.dk/liburing
S: Maintained S: Maintained
F: fs/io_uring.c F: fs/io_uring.c
F: fs/io-wq.c
F: fs/io-wq.h
F: include/uapi/linux/io_uring.h F: include/uapi/linux/io_uring.h
IPMI SUBSYSTEM IPMI SUBSYSTEM
......
...@@ -322,4 +322,7 @@ source "fs/nls/Kconfig" ...@@ -322,4 +322,7 @@ source "fs/nls/Kconfig"
source "fs/dlm/Kconfig" source "fs/dlm/Kconfig"
source "fs/unicode/Kconfig" source "fs/unicode/Kconfig"
config IO_WQ
bool
endmenu endmenu
...@@ -32,6 +32,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o ...@@ -32,6 +32,7 @@ obj-$(CONFIG_EVENTFD) += eventfd.o
obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_AIO) += aio.o obj-$(CONFIG_AIO) += aio.o
obj-$(CONFIG_IO_URING) += io_uring.o obj-$(CONFIG_IO_URING) += io_uring.o
obj-$(CONFIG_IO_WQ) += io-wq.o
obj-$(CONFIG_FS_DAX) += dax.o obj-$(CONFIG_FS_DAX) += dax.o
obj-$(CONFIG_FS_ENCRYPTION) += crypto/ obj-$(CONFIG_FS_ENCRYPTION) += crypto/
obj-$(CONFIG_FS_VERITY) += verity/ obj-$(CONFIG_FS_VERITY) += verity/
......
// SPDX-License-Identifier: GPL-2.0
/*
* Basic worker thread pool for io_uring
*
* Copyright (C) 2019 Jens Axboe
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/sched/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/rculist_nulls.h>
#include "io-wq.h"
#define WORKER_IDLE_TIMEOUT (5 * HZ)
enum {
IO_WORKER_F_UP = 1, /* up and active */
IO_WORKER_F_RUNNING = 2, /* account as running */
IO_WORKER_F_FREE = 4, /* worker on free list */
IO_WORKER_F_EXITING = 8, /* worker exiting */
IO_WORKER_F_FIXED = 16, /* static idle worker */
IO_WORKER_F_BOUND = 32, /* is doing bounded work */
};
enum {
IO_WQ_BIT_EXIT = 0, /* wq exiting */
IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
};
enum {
IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
};
/*
* One for each thread in a wqe pool
*/
struct io_worker {
refcount_t ref;
unsigned flags;
struct hlist_nulls_node nulls_node;
struct list_head all_list;
struct task_struct *task;
wait_queue_head_t wait;
struct io_wqe *wqe;
struct io_wq_work *cur_work;
spinlock_t lock;
struct rcu_head rcu;
struct mm_struct *mm;
struct files_struct *restore_files;
};
#if BITS_PER_LONG == 64
#define IO_WQ_HASH_ORDER 6
#else
#define IO_WQ_HASH_ORDER 5
#endif
struct io_wqe_acct {
unsigned nr_workers;
unsigned max_workers;
atomic_t nr_running;
};
enum {
IO_WQ_ACCT_BOUND,
IO_WQ_ACCT_UNBOUND,
};
/*
* Per-node worker thread pool
*/
struct io_wqe {
struct {
spinlock_t lock;
struct list_head work_list;
unsigned long hash_map;
unsigned flags;
} ____cacheline_aligned_in_smp;
int node;
struct io_wqe_acct acct[2];
struct hlist_nulls_head free_list;
struct hlist_nulls_head busy_list;
struct list_head all_list;
struct io_wq *wq;
};
/*
* Per io_wq state
*/
struct io_wq {
struct io_wqe **wqes;
unsigned long state;
unsigned nr_wqes;
get_work_fn *get_work;
put_work_fn *put_work;
struct task_struct *manager;
struct user_struct *user;
struct mm_struct *mm;
refcount_t refs;
struct completion done;
};
static bool io_worker_get(struct io_worker *worker)
{
return refcount_inc_not_zero(&worker->ref);
}
static void io_worker_release(struct io_worker *worker)
{
if (refcount_dec_and_test(&worker->ref))
wake_up_process(worker->task);
}
/*
* Note: drops the wqe->lock if returning true! The caller must re-acquire
* the lock in that case. Some callers need to restart handling if this
* happens, so we can't just re-acquire the lock on behalf of the caller.
*/
static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
{
bool dropped_lock = false;
if (current->files != worker->restore_files) {
__acquire(&wqe->lock);
spin_unlock_irq(&wqe->lock);
dropped_lock = true;
task_lock(current);
current->files = worker->restore_files;
task_unlock(current);
}
/*
* If we have an active mm, we need to drop the wq lock before unusing
* it. If we do, return true and let the caller retry the idle loop.
*/
if (worker->mm) {
if (!dropped_lock) {
__acquire(&wqe->lock);
spin_unlock_irq(&wqe->lock);
dropped_lock = true;
}
__set_current_state(TASK_RUNNING);
set_fs(KERNEL_DS);
unuse_mm(worker->mm);
mmput(worker->mm);
worker->mm = NULL;
}
return dropped_lock;
}
static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
struct io_wq_work *work)
{
if (work->flags & IO_WQ_WORK_UNBOUND)
return &wqe->acct[IO_WQ_ACCT_UNBOUND];
return &wqe->acct[IO_WQ_ACCT_BOUND];
}
static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
struct io_worker *worker)
{
if (worker->flags & IO_WORKER_F_BOUND)
return &wqe->acct[IO_WQ_ACCT_BOUND];
return &wqe->acct[IO_WQ_ACCT_UNBOUND];
}
static void io_worker_exit(struct io_worker *worker)
{
struct io_wqe *wqe = worker->wqe;
struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
unsigned nr_workers;
/*
* If we're not at zero, someone else is holding a brief reference
* to the worker. Wait for that to go away.
*/
set_current_state(TASK_INTERRUPTIBLE);
if (!refcount_dec_and_test(&worker->ref))
schedule();
__set_current_state(TASK_RUNNING);
preempt_disable();
current->flags &= ~PF_IO_WORKER;
if (worker->flags & IO_WORKER_F_RUNNING)
atomic_dec(&acct->nr_running);
if (!(worker->flags & IO_WORKER_F_BOUND))
atomic_dec(&wqe->wq->user->processes);
worker->flags = 0;
preempt_enable();
spin_lock_irq(&wqe->lock);
hlist_nulls_del_rcu(&worker->nulls_node);
list_del_rcu(&worker->all_list);
if (__io_worker_unuse(wqe, worker)) {
__release(&wqe->lock);
spin_lock_irq(&wqe->lock);
}
acct->nr_workers--;
nr_workers = wqe->acct[IO_WQ_ACCT_BOUND].nr_workers +
wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers;
spin_unlock_irq(&wqe->lock);
/* all workers gone, wq exit can proceed */
if (!nr_workers && refcount_dec_and_test(&wqe->wq->refs))
complete(&wqe->wq->done);
kfree_rcu(worker, rcu);
}
static inline bool io_wqe_run_queue(struct io_wqe *wqe)
__must_hold(wqe->lock)
{
if (!list_empty(&wqe->work_list) && !(wqe->flags & IO_WQE_FLAG_STALLED))
return true;
return false;
}
/*
* Check head of free list for an available worker. If one isn't available,
* caller must wake up the wq manager to create one.
*/
static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
__must_hold(RCU)
{
struct hlist_nulls_node *n;
struct io_worker *worker;
n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
if (is_a_nulls(n))
return false;
worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
if (io_worker_get(worker)) {
wake_up(&worker->wait);
io_worker_release(worker);
return true;
}
return false;
}
/*
* We need a worker. If we find a free one, we're good. If not, and we're
* below the max number of workers, wake up the manager to create one.
*/
static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
{
bool ret;
/*
* Most likely an attempt to queue unbounded work on an io_wq that
* wasn't setup with any unbounded workers.
*/
WARN_ON_ONCE(!acct->max_workers);
rcu_read_lock();
ret = io_wqe_activate_free_worker(wqe);
rcu_read_unlock();
if (!ret && acct->nr_workers < acct->max_workers)
wake_up_process(wqe->wq->manager);
}
static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
{
struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
atomic_inc(&acct->nr_running);
}
static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
__must_hold(wqe->lock)
{
struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
io_wqe_wake_worker(wqe, acct);
}
static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
{
allow_kernel_signal(SIGINT);
current->flags |= PF_IO_WORKER;
worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
worker->restore_files = current->files;
io_wqe_inc_running(wqe, worker);
}
/*
* Worker will start processing some work. Move it to the busy list, if
* it's currently on the freelist
*/
static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
struct io_wq_work *work)
__must_hold(wqe->lock)
{
bool worker_bound, work_bound;
if (worker->flags & IO_WORKER_F_FREE) {
worker->flags &= ~IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
}
/*
* If worker is moving from bound to unbound (or vice versa), then
* ensure we update the running accounting.
*/
worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
if (worker_bound != work_bound) {
io_wqe_dec_running(wqe, worker);
if (work_bound) {
worker->flags |= IO_WORKER_F_BOUND;
wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
atomic_dec(&wqe->wq->user->processes);
} else {
worker->flags &= ~IO_WORKER_F_BOUND;
wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
atomic_inc(&wqe->wq->user->processes);
}
io_wqe_inc_running(wqe, worker);
}
}
/*
* No work, worker going to sleep. Move to freelist, and unuse mm if we
* have one attached. Dropping the mm may potentially sleep, so we drop
* the lock in that case and return success. Since the caller has to
* retry the loop in that case (we changed task state), we don't regrab
* the lock if we return success.
*/
static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
__must_hold(wqe->lock)
{
if (!(worker->flags & IO_WORKER_F_FREE)) {
worker->flags |= IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
}
return __io_worker_unuse(wqe, worker);
}
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe, unsigned *hash)
__must_hold(wqe->lock)
{
struct io_wq_work *work;
list_for_each_entry(work, &wqe->work_list, list) {
/* not hashed, can run anytime */
if (!(work->flags & IO_WQ_WORK_HASHED)) {
list_del(&work->list);
return work;
}
/* hashed, can run if not already running */
*hash = work->flags >> IO_WQ_HASH_SHIFT;
if (!(wqe->hash_map & BIT_ULL(*hash))) {
wqe->hash_map |= BIT_ULL(*hash);
list_del(&work->list);
return work;
}
}
return NULL;
}
static void io_worker_handle_work(struct io_worker *worker)
__releases(wqe->lock)
{
struct io_wq_work *work, *old_work = NULL, *put_work = NULL;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
do {
unsigned hash = -1U;
/*
* If we got some work, mark us as busy. If we didn't, but
* the list isn't empty, it means we stalled on hashed work.
* Mark us stalled so we don't keep looking for work when we
* can't make progress, any work completion or insertion will
* clear the stalled flag.
*/
work = io_get_next_work(wqe, &hash);
if (work)
__io_worker_busy(wqe, worker, work);
else if (!list_empty(&wqe->work_list))
wqe->flags |= IO_WQE_FLAG_STALLED;
spin_unlock_irq(&wqe->lock);
if (put_work && wq->put_work)
wq->put_work(old_work);
if (!work)
break;
next:
/* flush any pending signals before assigning new work */
if (signal_pending(current))
flush_signals(current);
spin_lock_irq(&worker->lock);
worker->cur_work = work;
spin_unlock_irq(&worker->lock);
if ((work->flags & IO_WQ_WORK_NEEDS_FILES) &&
current->files != work->files) {
task_lock(current);
current->files = work->files;
task_unlock(current);
}
if ((work->flags & IO_WQ_WORK_NEEDS_USER) && !worker->mm &&
wq->mm && mmget_not_zero(wq->mm)) {
use_mm(wq->mm);
set_fs(USER_DS);
worker->mm = wq->mm;
}
if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
work->flags |= IO_WQ_WORK_CANCEL;
if (worker->mm)
work->flags |= IO_WQ_WORK_HAS_MM;
if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
put_work = work;
wq->get_work(work);
}
old_work = work;
work->func(&work);
spin_lock_irq(&worker->lock);
worker->cur_work = NULL;
spin_unlock_irq(&worker->lock);
spin_lock_irq(&wqe->lock);
if (hash != -1U) {
wqe->hash_map &= ~BIT_ULL(hash);
wqe->flags &= ~IO_WQE_FLAG_STALLED;
}
if (work && work != old_work) {
spin_unlock_irq(&wqe->lock);
if (put_work && wq->put_work) {
wq->put_work(put_work);
put_work = NULL;
}
/* dependent work not hashed */
hash = -1U;
goto next;
}
} while (1);
}
static int io_wqe_worker(void *data)
{
struct io_worker *worker = data;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
DEFINE_WAIT(wait);
io_worker_start(wqe, worker);
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
prepare_to_wait(&worker->wait, &wait, TASK_INTERRUPTIBLE);
spin_lock_irq(&wqe->lock);
if (io_wqe_run_queue(wqe)) {
__set_current_state(TASK_RUNNING);
io_worker_handle_work(worker);
continue;
}
/* drops the lock on success, retry */
if (__io_worker_idle(wqe, worker)) {
__release(&wqe->lock);
continue;
}
spin_unlock_irq(&wqe->lock);
if (signal_pending(current))
flush_signals(current);
if (schedule_timeout(WORKER_IDLE_TIMEOUT))
continue;
/* timed out, exit unless we're the fixed worker */
if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
!(worker->flags & IO_WORKER_F_FIXED))
break;
}
finish_wait(&worker->wait, &wait);
if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
spin_lock_irq(&wqe->lock);
if (!list_empty(&wqe->work_list))
io_worker_handle_work(worker);
else
spin_unlock_irq(&wqe->lock);
}
io_worker_exit(worker);
return 0;
}
/*
* Called when a worker is scheduled in. Mark us as currently running.
*/
void io_wq_worker_running(struct task_struct *tsk)
{
struct io_worker *worker = kthread_data(tsk);
struct io_wqe *wqe = worker->wqe;
if (!(worker->flags & IO_WORKER_F_UP))
return;
if (worker->flags & IO_WORKER_F_RUNNING)
return;
worker->flags |= IO_WORKER_F_RUNNING;
io_wqe_inc_running(wqe, worker);
}
/*
* Called when worker is going to sleep. If there are no workers currently
* running and we have work pending, wake up a free one or have the manager
* set one up.
*/
void io_wq_worker_sleeping(struct task_struct *tsk)
{
struct io_worker *worker = kthread_data(tsk);
struct io_wqe *wqe = worker->wqe;
if (!(worker->flags & IO_WORKER_F_UP))
return;
if (!(worker->flags & IO_WORKER_F_RUNNING))
return;
worker->flags &= ~IO_WORKER_F_RUNNING;
spin_lock_irq(&wqe->lock);
io_wqe_dec_running(wqe, worker);
spin_unlock_irq(&wqe->lock);
}
static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
{
struct io_wqe_acct *acct =&wqe->acct[index];
struct io_worker *worker;
worker = kcalloc_node(1, sizeof(*worker), GFP_KERNEL, wqe->node);
if (!worker)
return;
refcount_set(&worker->ref, 1);
worker->nulls_node.pprev = NULL;
init_waitqueue_head(&worker->wait);
worker->wqe = wqe;
spin_lock_init(&worker->lock);
worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
"io_wqe_worker-%d/%d", index, wqe->node);
if (IS_ERR(worker->task)) {
kfree(worker);
return;
}
spin_lock_irq(&wqe->lock);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
list_add_tail_rcu(&worker->all_list, &wqe->all_list);
worker->flags |= IO_WORKER_F_FREE;
if (index == IO_WQ_ACCT_BOUND)
worker->flags |= IO_WORKER_F_BOUND;
if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
worker->flags |= IO_WORKER_F_FIXED;
acct->nr_workers++;
spin_unlock_irq(&wqe->lock);
if (index == IO_WQ_ACCT_UNBOUND)
atomic_inc(&wq->user->processes);
wake_up_process(worker->task);
}
static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
__must_hold(wqe->lock)
{
struct io_wqe_acct *acct = &wqe->acct[index];
/* always ensure we have one bounded worker */
if (index == IO_WQ_ACCT_BOUND && !acct->nr_workers)
return true;
/* if we have available workers or no work, no need */
if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
return false;
return acct->nr_workers < acct->max_workers;
}
/*
* Manager thread. Tasked with creating new workers, if we need them.
*/
static int io_wq_manager(void *data)
{
struct io_wq *wq = data;
while (!kthread_should_stop()) {
int i;
for (i = 0; i < wq->nr_wqes; i++) {
struct io_wqe *wqe = wq->wqes[i];
bool fork_worker[2] = { false, false };
spin_lock_irq(&wqe->lock);
if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
fork_worker[IO_WQ_ACCT_BOUND] = true;
if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
fork_worker[IO_WQ_ACCT_UNBOUND] = true;
spin_unlock_irq(&wqe->lock);
if (fork_worker[IO_WQ_ACCT_BOUND])
create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
if (fork_worker[IO_WQ_ACCT_UNBOUND])
create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
}
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
}
return 0;
}
static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
struct io_wq_work *work)
{
bool free_worker;
if (!(work->flags & IO_WQ_WORK_UNBOUND))
return true;
if (atomic_read(&acct->nr_running))
return true;
rcu_read_lock();
free_worker = !hlist_nulls_empty(&wqe->free_list);
rcu_read_unlock();
if (free_worker)
return true;
if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
!(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
return false;
return true;
}
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
unsigned long flags;
/*
* Do early check to see if we need a new unbound worker, and if we do,
* if we're allowed to do so. This isn't 100% accurate as there's a
* gap between this check and incrementing the value, but that's OK.
* It's close enough to not be an issue, fork() has the same delay.
*/
if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
work->flags |= IO_WQ_WORK_CANCEL;
work->func(&work);
return;
}
spin_lock_irqsave(&wqe->lock, flags);
list_add_tail(&work->list, &wqe->work_list);
wqe->flags &= ~IO_WQE_FLAG_STALLED;
spin_unlock_irqrestore(&wqe->lock, flags);
if (!atomic_read(&acct->nr_running))
io_wqe_wake_worker(wqe, acct);
}
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
{
struct io_wqe *wqe = wq->wqes[numa_node_id()];
io_wqe_enqueue(wqe, work);
}
/*
* Enqueue work, hashed by some key. Work items that hash to the same value
* will not be done in parallel. Used to limit concurrent writes, generally
* hashed by inode.
*/
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val)
{
struct io_wqe *wqe = wq->wqes[numa_node_id()];
unsigned bit;
bit = hash_ptr(val, IO_WQ_HASH_ORDER);
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
io_wqe_enqueue(wqe, work);
}
static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
{
send_sig(SIGINT, worker->task, 1);
return false;
}
/*
* Iterate the passed in list and call the specific function for each
* worker that isn't exiting
*/
static bool io_wq_for_each_worker(struct io_wqe *wqe,
bool (*func)(struct io_worker *, void *),
void *data)
{
struct io_worker *worker;
bool ret = false;
list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
if (io_worker_get(worker)) {
ret = func(worker, data);
io_worker_release(worker);
if (ret)
break;
}
}
return ret;
}
void io_wq_cancel_all(struct io_wq *wq)
{
int i;
set_bit(IO_WQ_BIT_CANCEL, &wq->state);
/*
* Browse both lists, as there's a gap between handing work off
* to a worker and the worker putting itself on the busy_list
*/
rcu_read_lock();
for (i = 0; i < wq->nr_wqes; i++) {
struct io_wqe *wqe = wq->wqes[i];
io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
}
rcu_read_unlock();
}
struct io_cb_cancel_data {
struct io_wqe *wqe;
work_cancel_fn *cancel;
void *caller_data;
};
static bool io_work_cancel(struct io_worker *worker, void *cancel_data)
{
struct io_cb_cancel_data *data = cancel_data;
unsigned long flags;
bool ret = false;
/*
* Hold the lock to avoid ->cur_work going out of scope, caller
* may dereference the passed in work.
*/
spin_lock_irqsave(&worker->lock, flags);
if (worker->cur_work &&
data->cancel(worker->cur_work, data->caller_data)) {
send_sig(SIGINT, worker->task, 1);
ret = true;
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
work_cancel_fn *cancel,
void *cancel_data)
{
struct io_cb_cancel_data data = {
.wqe = wqe,
.cancel = cancel,
.caller_data = cancel_data,
};
struct io_wq_work *work;
unsigned long flags;
bool found = false;
spin_lock_irqsave(&wqe->lock, flags);
list_for_each_entry(work, &wqe->work_list, list) {
if (cancel(work, cancel_data)) {
list_del(&work->list);
found = true;
break;
}
}
spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
work->func(&work);
return IO_WQ_CANCEL_OK;
}
rcu_read_lock();
found = io_wq_for_each_worker(wqe, io_work_cancel, &data);
rcu_read_unlock();
return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
}
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data)
{
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
int i;
for (i = 0; i < wq->nr_wqes; i++) {
struct io_wqe *wqe = wq->wqes[i];
ret = io_wqe_cancel_cb_work(wqe, cancel, data);
if (ret != IO_WQ_CANCEL_NOTFOUND)
break;
}
return ret;
}
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_wq_work *work = data;
unsigned long flags;
bool ret = false;
if (worker->cur_work != work)
return false;
spin_lock_irqsave(&worker->lock, flags);
if (worker->cur_work == work) {
send_sig(SIGINT, worker->task, 1);
ret = true;
}
spin_unlock_irqrestore(&worker->lock, flags);
return ret;
}
static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
struct io_wq_work *cwork)
{
struct io_wq_work *work;
unsigned long flags;
bool found = false;
cwork->flags |= IO_WQ_WORK_CANCEL;
/*
* First check pending list, if we're lucky we can just remove it
* from there. CANCEL_OK means that the work is returned as-new,
* no completion will be posted for it.
*/
spin_lock_irqsave(&wqe->lock, flags);
list_for_each_entry(work, &wqe->work_list, list) {
if (work == cwork) {
list_del(&work->list);
found = true;
break;
}
}
spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
work->flags |= IO_WQ_WORK_CANCEL;
work->func(&work);
return IO_WQ_CANCEL_OK;
}
/*
* Now check if a free (going busy) or busy worker has the work
* currently running. If we find it there, we'll return CANCEL_RUNNING
* as an indication that we attempte to signal cancellation. The
* completion will run normally in this case.
*/
rcu_read_lock();
found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, cwork);
rcu_read_unlock();
return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
}
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
{
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
int i;
for (i = 0; i < wq->nr_wqes; i++) {
struct io_wqe *wqe = wq->wqes[i];
ret = io_wqe_cancel_work(wqe, cwork);
if (ret != IO_WQ_CANCEL_NOTFOUND)
break;
}
return ret;
}
struct io_wq_flush_data {
struct io_wq_work work;
struct completion done;
};
static void io_wq_flush_func(struct io_wq_work **workptr)
{
struct io_wq_work *work = *workptr;
struct io_wq_flush_data *data;
data = container_of(work, struct io_wq_flush_data, work);
complete(&data->done);
}
/*
* Doesn't wait for previously queued work to finish. When this completes,
* it just means that previously queued work was started.
*/
void io_wq_flush(struct io_wq *wq)
{
struct io_wq_flush_data data;
int i;
for (i = 0; i < wq->nr_wqes; i++) {
struct io_wqe *wqe = wq->wqes[i];
init_completion(&data.done);
INIT_IO_WORK(&data.work, io_wq_flush_func);
data.work.flags |= IO_WQ_WORK_INTERNAL;
io_wqe_enqueue(wqe, &data.work);
wait_for_completion(&data.done);
}
}
struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
struct user_struct *user, get_work_fn *get_work,
put_work_fn *put_work)
{
int ret = -ENOMEM, i, node;
struct io_wq *wq;
wq = kcalloc(1, sizeof(*wq), GFP_KERNEL);
if (!wq)
return ERR_PTR(-ENOMEM);
wq->nr_wqes = num_online_nodes();
wq->wqes = kcalloc(wq->nr_wqes, sizeof(struct io_wqe *), GFP_KERNEL);
if (!wq->wqes) {
kfree(wq);
return ERR_PTR(-ENOMEM);
}
wq->get_work = get_work;
wq->put_work = put_work;
/* caller must already hold a reference to this */
wq->user = user;
i = 0;
refcount_set(&wq->refs, wq->nr_wqes);
for_each_online_node(node) {
struct io_wqe *wqe;
wqe = kcalloc_node(1, sizeof(struct io_wqe), GFP_KERNEL, node);
if (!wqe)
break;
wq->wqes[i] = wqe;
wqe->node = node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
if (user) {
wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
task_rlimit(current, RLIMIT_NPROC);
}
atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
wqe->node = node;
wqe->wq = wq;
spin_lock_init(&wqe->lock);
INIT_LIST_HEAD(&wqe->work_list);
INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
INIT_LIST_HEAD(&wqe->all_list);
i++;
}
init_completion(&wq->done);
if (i != wq->nr_wqes)
goto err;
/* caller must have already done mmgrab() on this mm */
wq->mm = mm;
wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
if (!IS_ERR(wq->manager)) {
wake_up_process(wq->manager);
return wq;
}
ret = PTR_ERR(wq->manager);
wq->manager = NULL;
err:
complete(&wq->done);
io_wq_destroy(wq);
return ERR_PTR(ret);
}
static bool io_wq_worker_wake(struct io_worker *worker, void *data)
{
wake_up_process(worker->task);
return false;
}
void io_wq_destroy(struct io_wq *wq)
{
int i;
if (wq->manager) {
set_bit(IO_WQ_BIT_EXIT, &wq->state);
kthread_stop(wq->manager);
}
rcu_read_lock();
for (i = 0; i < wq->nr_wqes; i++) {
struct io_wqe *wqe = wq->wqes[i];
if (!wqe)
continue;
io_wq_for_each_worker(wqe, io_wq_worker_wake, NULL);
}
rcu_read_unlock();
wait_for_completion(&wq->done);
for (i = 0; i < wq->nr_wqes; i++)
kfree(wq->wqes[i]);
kfree(wq->wqes);
kfree(wq);
}
#ifndef INTERNAL_IO_WQ_H
#define INTERNAL_IO_WQ_H
struct io_wq;
enum {
IO_WQ_WORK_CANCEL = 1,
IO_WQ_WORK_HAS_MM = 2,
IO_WQ_WORK_HASHED = 4,
IO_WQ_WORK_NEEDS_USER = 8,
IO_WQ_WORK_NEEDS_FILES = 16,
IO_WQ_WORK_UNBOUND = 32,
IO_WQ_WORK_INTERNAL = 64,
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
};
enum io_wq_cancel {
IO_WQ_CANCEL_OK, /* cancelled before started */
IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
IO_WQ_CANCEL_NOTFOUND, /* work not found */
};
struct io_wq_work {
struct list_head list;
void (*func)(struct io_wq_work **);
unsigned flags;
struct files_struct *files;
};
#define INIT_IO_WORK(work, _func) \
do { \
(work)->func = _func; \
(work)->flags = 0; \
(work)->files = NULL; \
} while (0) \
typedef void (get_work_fn)(struct io_wq_work *);
typedef void (put_work_fn)(struct io_wq_work *);
struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
struct user_struct *user,
get_work_fn *get_work, put_work_fn *put_work);
void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
void io_wq_flush(struct io_wq *wq);
void io_wq_cancel_all(struct io_wq *wq);
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
void *data);
#if defined(CONFIG_IO_WQ)
extern void io_wq_worker_sleeping(struct task_struct *);
extern void io_wq_worker_running(struct task_struct *);
#else
static inline void io_wq_worker_sleeping(struct task_struct *tsk)
{
}
static inline void io_wq_worker_running(struct task_struct *tsk)
{
}
#endif
static inline bool io_wq_current_is_worker(void)
{
return in_task() && (current->flags & PF_IO_WORKER);
}
#endif
...@@ -56,7 +56,6 @@ ...@@ -56,7 +56,6 @@
#include <linux/mmu_context.h> #include <linux/mmu_context.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/bvec.h> #include <linux/bvec.h>
...@@ -71,12 +70,24 @@ ...@@ -71,12 +70,24 @@
#include <linux/sizes.h> #include <linux/sizes.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
#include <uapi/linux/io_uring.h> #include <uapi/linux/io_uring.h>
#include "internal.h" #include "internal.h"
#include "io-wq.h"
#define IORING_MAX_ENTRIES 32768 #define IORING_MAX_ENTRIES 32768
#define IORING_MAX_FIXED_FILES 1024 #define IORING_MAX_CQ_ENTRIES (2 * IORING_MAX_ENTRIES)
/*
* Shift of 9 is 512 entries, or exactly one page on 64-bit archs
*/
#define IORING_FILE_TABLE_SHIFT 9
#define IORING_MAX_FILES_TABLE (1U << IORING_FILE_TABLE_SHIFT)
#define IORING_FILE_TABLE_MASK (IORING_MAX_FILES_TABLE - 1)
#define IORING_MAX_FIXED_FILES (64 * IORING_MAX_FILES_TABLE)
struct io_uring { struct io_uring {
u32 head ____cacheline_aligned_in_smp; u32 head ____cacheline_aligned_in_smp;
...@@ -161,14 +172,8 @@ struct io_mapped_ubuf { ...@@ -161,14 +172,8 @@ struct io_mapped_ubuf {
unsigned int nr_bvecs; unsigned int nr_bvecs;
}; };
struct async_list { struct fixed_file_table {
spinlock_t lock; struct file **files;
atomic_t cnt;
struct list_head list;
struct file *file;
off_t io_start;
size_t io_len;
}; };
struct io_ring_ctx { struct io_ring_ctx {
...@@ -180,6 +185,7 @@ struct io_ring_ctx { ...@@ -180,6 +185,7 @@ struct io_ring_ctx {
unsigned int flags; unsigned int flags;
bool compat; bool compat;
bool account_mem; bool account_mem;
bool cq_overflow_flushed;
/* /*
* Ring buffer of indices into array of io_uring_sqe, which is * Ring buffer of indices into array of io_uring_sqe, which is
...@@ -198,38 +204,30 @@ struct io_ring_ctx { ...@@ -198,38 +204,30 @@ struct io_ring_ctx {
unsigned sq_mask; unsigned sq_mask;
unsigned sq_thread_idle; unsigned sq_thread_idle;
unsigned cached_sq_dropped; unsigned cached_sq_dropped;
atomic_t cached_cq_overflow;
struct io_uring_sqe *sq_sqes; struct io_uring_sqe *sq_sqes;
struct list_head defer_list; struct list_head defer_list;
struct list_head timeout_list; struct list_head timeout_list;
struct list_head cq_overflow_list;
wait_queue_head_t inflight_wait;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
struct io_rings *rings;
/* IO offload */ /* IO offload */
struct workqueue_struct *sqo_wq[2]; struct io_wq *io_wq;
struct task_struct *sqo_thread; /* if using sq thread polling */ struct task_struct *sqo_thread; /* if using sq thread polling */
struct mm_struct *sqo_mm; struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait; wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
struct {
unsigned cached_cq_tail;
atomic_t cached_cq_overflow;
unsigned cq_entries;
unsigned cq_mask;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
atomic_t cq_timeouts;
} ____cacheline_aligned_in_smp;
struct io_rings *rings;
/* /*
* If used, fixed file set. Writers must ensure that ->refs is dead, * If used, fixed file set. Writers must ensure that ->refs is dead,
* readers must ensure that ->refs is alive as long as the file* is * readers must ensure that ->refs is alive as long as the file* is
* used. Only updated through io_uring_register(2). * used. Only updated through io_uring_register(2).
*/ */
struct file **user_files; struct fixed_file_table *file_table;
unsigned nr_user_files; unsigned nr_user_files;
/* if used, fixed mapped user buffers */ /* if used, fixed mapped user buffers */
...@@ -238,7 +236,25 @@ struct io_ring_ctx { ...@@ -238,7 +236,25 @@ struct io_ring_ctx {
struct user_struct *user; struct user_struct *user;
struct completion ctx_done; /* 0 is for ctx quiesce/reinit/free, 1 is for sqo_thread started */
struct completion *completions;
/* if all else fails... */
struct io_kiocb *fallback_req;
#if defined(CONFIG_UNIX)
struct socket *ring_sock;
#endif
struct {
unsigned cached_cq_tail;
unsigned cq_entries;
unsigned cq_mask;
atomic_t cq_timeouts;
struct wait_queue_head cq_wait;
struct fasync_struct *cq_fasync;
struct eventfd_ctx *cq_ev_fd;
} ____cacheline_aligned_in_smp;
struct { struct {
struct mutex uring_lock; struct mutex uring_lock;
...@@ -255,22 +271,20 @@ struct io_ring_ctx { ...@@ -255,22 +271,20 @@ struct io_ring_ctx {
* manipulate the list, hence no extra locking is needed there. * manipulate the list, hence no extra locking is needed there.
*/ */
struct list_head poll_list; struct list_head poll_list;
struct list_head cancel_list; struct rb_root cancel_tree;
} ____cacheline_aligned_in_smp;
struct async_list pending_async[2]; spinlock_t inflight_lock;
struct list_head inflight_list;
#if defined(CONFIG_UNIX) } ____cacheline_aligned_in_smp;
struct socket *ring_sock;
#endif
}; };
struct sqe_submit { struct sqe_submit {
const struct io_uring_sqe *sqe; const struct io_uring_sqe *sqe;
unsigned short index; struct file *ring_file;
int ring_fd;
u32 sequence; u32 sequence;
bool has_user; bool has_user;
bool needs_lock; bool in_async;
bool needs_fixed_file; bool needs_fixed_file;
}; };
...@@ -309,7 +323,10 @@ struct io_kiocb { ...@@ -309,7 +323,10 @@ struct io_kiocb {
struct sqe_submit submit; struct sqe_submit submit;
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
union {
struct list_head list; struct list_head list;
struct rb_node rb_node;
};
struct list_head link_list; struct list_head link_list;
unsigned int flags; unsigned int flags;
refcount_t refs; refcount_t refs;
...@@ -320,18 +337,22 @@ struct io_kiocb { ...@@ -320,18 +337,22 @@ struct io_kiocb {
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */ #define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */ #define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */ #define REQ_F_LINK 64 /* linked sqes */
#define REQ_F_LINK_DONE 128 /* linked sqes done */ #define REQ_F_LINK_TIMEOUT 128 /* has linked timeout */
#define REQ_F_FAIL_LINK 256 /* fail rest of links */ #define REQ_F_FAIL_LINK 256 /* fail rest of links */
#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */ #define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
#define REQ_F_TIMEOUT 1024 /* timeout request */ #define REQ_F_TIMEOUT 1024 /* timeout request */
#define REQ_F_ISREG 2048 /* regular file */ #define REQ_F_ISREG 2048 /* regular file */
#define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */ #define REQ_F_MUST_PUNT 4096 /* must be punted even for NONBLOCK */
#define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */ #define REQ_F_TIMEOUT_NOSEQ 8192 /* no timeout sequence */
#define REQ_F_INFLIGHT 16384 /* on inflight list */
#define REQ_F_COMP_LOCKED 32768 /* completion under lock */
u64 user_data; u64 user_data;
u32 result; u32 result;
u32 sequence; u32 sequence;
struct work_struct work; struct list_head inflight_entry;
struct io_wq_work work;
}; };
#define IO_PLUG_THRESHOLD 2 #define IO_PLUG_THRESHOLD 2
...@@ -357,10 +378,11 @@ struct io_submit_state { ...@@ -357,10 +378,11 @@ struct io_submit_state {
unsigned int ios_left; unsigned int ios_left;
}; };
static void io_sq_wq_submit_work(struct work_struct *work); static void io_wq_submit_work(struct io_wq_work **workptr);
static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data, static void io_cqring_fill_event(struct io_kiocb *req, long res);
long res);
static void __io_free_req(struct io_kiocb *req); static void __io_free_req(struct io_kiocb *req);
static void io_put_req(struct io_kiocb *req);
static void io_double_put_req(struct io_kiocb *req);
static struct kmem_cache *req_cachep; static struct kmem_cache *req_cachep;
...@@ -383,57 +405,67 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref) ...@@ -383,57 +405,67 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref)
{ {
struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
complete(&ctx->ctx_done); complete(&ctx->completions[0]);
} }
static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
{ {
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) if (!ctx)
return NULL; return NULL;
ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
if (!ctx->fallback_req)
goto err;
ctx->completions = kmalloc(2 * sizeof(struct completion), GFP_KERNEL);
if (!ctx->completions)
goto err;
if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free, if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) { PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
kfree(ctx); goto err;
return NULL;
}
ctx->flags = p->flags; ctx->flags = p->flags;
init_waitqueue_head(&ctx->cq_wait); init_waitqueue_head(&ctx->cq_wait);
init_completion(&ctx->ctx_done); INIT_LIST_HEAD(&ctx->cq_overflow_list);
init_completion(&ctx->sqo_thread_started); init_completion(&ctx->completions[0]);
init_completion(&ctx->completions[1]);
mutex_init(&ctx->uring_lock); mutex_init(&ctx->uring_lock);
init_waitqueue_head(&ctx->wait); init_waitqueue_head(&ctx->wait);
for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
spin_lock_init(&ctx->pending_async[i].lock);
INIT_LIST_HEAD(&ctx->pending_async[i].list);
atomic_set(&ctx->pending_async[i].cnt, 0);
}
spin_lock_init(&ctx->completion_lock); spin_lock_init(&ctx->completion_lock);
INIT_LIST_HEAD(&ctx->poll_list); INIT_LIST_HEAD(&ctx->poll_list);
INIT_LIST_HEAD(&ctx->cancel_list); ctx->cancel_tree = RB_ROOT;
INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->defer_list);
INIT_LIST_HEAD(&ctx->timeout_list); INIT_LIST_HEAD(&ctx->timeout_list);
init_waitqueue_head(&ctx->inflight_wait);
spin_lock_init(&ctx->inflight_lock);
INIT_LIST_HEAD(&ctx->inflight_list);
return ctx; return ctx;
err:
if (ctx->fallback_req)
kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx->completions);
kfree(ctx);
return NULL;
} }
static inline bool __io_sequence_defer(struct io_ring_ctx *ctx, static inline bool __req_need_defer(struct io_kiocb *req)
struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx;
return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+ atomic_read(&ctx->cached_cq_overflow); + atomic_read(&ctx->cached_cq_overflow);
} }
static inline bool io_sequence_defer(struct io_ring_ctx *ctx, static inline bool req_need_defer(struct io_kiocb *req)
struct io_kiocb *req)
{ {
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN) if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) == REQ_F_IO_DRAIN)
return false; return __req_need_defer(req);
return __io_sequence_defer(ctx, req); return false;
} }
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
...@@ -441,7 +473,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx) ...@@ -441,7 +473,7 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
struct io_kiocb *req; struct io_kiocb *req;
req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list); req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
if (req && !io_sequence_defer(ctx, req)) { if (req && !req_need_defer(req)) {
list_del_init(&req->list); list_del_init(&req->list);
return req; return req;
} }
...@@ -457,7 +489,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx) ...@@ -457,7 +489,7 @@ static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
if (req) { if (req) {
if (req->flags & REQ_F_TIMEOUT_NOSEQ) if (req->flags & REQ_F_TIMEOUT_NOSEQ)
return NULL; return NULL;
if (!__io_sequence_defer(ctx, req)) { if (!__req_need_defer(req)) {
list_del_init(&req->list); list_del_init(&req->list);
return req; return req;
} }
...@@ -481,21 +513,59 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -481,21 +513,59 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
} }
} }
static inline void io_queue_async_work(struct io_ring_ctx *ctx, static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
struct io_kiocb *req) {
u8 opcode = READ_ONCE(sqe->opcode);
return !(opcode == IORING_OP_READ_FIXED ||
opcode == IORING_OP_WRITE_FIXED);
}
static inline bool io_prep_async_work(struct io_kiocb *req)
{ {
int rw = 0; bool do_hashed = false;
if (req->submit.sqe) { if (req->submit.sqe) {
switch (req->submit.sqe->opcode) { switch (req->submit.sqe->opcode) {
case IORING_OP_WRITEV: case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE_FIXED:
rw = !(req->rw.ki_flags & IOCB_DIRECT); do_hashed = true;
/* fall-through */
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
case IORING_OP_SENDMSG:
case IORING_OP_RECVMSG:
case IORING_OP_ACCEPT:
case IORING_OP_POLL_ADD:
/*
* We know REQ_F_ISREG is not set on some of these
* opcodes, but this enables us to keep the check in
* just one place.
*/
if (!(req->flags & REQ_F_ISREG))
req->work.flags |= IO_WQ_WORK_UNBOUND;
break; break;
} }
if (io_sqe_needs_user(req->submit.sqe))
req->work.flags |= IO_WQ_WORK_NEEDS_USER;
} }
queue_work(ctx->sqo_wq[rw], &req->work); return do_hashed;
}
static inline void io_queue_async_work(struct io_kiocb *req)
{
bool do_hashed = io_prep_async_work(req);
struct io_ring_ctx *ctx = req->ctx;
trace_io_uring_queue_async_work(ctx, do_hashed, req, &req->work,
req->flags);
if (!do_hashed) {
io_wq_enqueue(ctx->io_wq, &req->work);
} else {
io_wq_enqueue_hashed(ctx->io_wq, &req->work,
file_inode(req->file));
}
} }
static void io_kill_timeout(struct io_kiocb *req) static void io_kill_timeout(struct io_kiocb *req)
...@@ -505,9 +575,9 @@ static void io_kill_timeout(struct io_kiocb *req) ...@@ -505,9 +575,9 @@ static void io_kill_timeout(struct io_kiocb *req)
ret = hrtimer_try_to_cancel(&req->timeout.timer); ret = hrtimer_try_to_cancel(&req->timeout.timer);
if (ret != -1) { if (ret != -1) {
atomic_inc(&req->ctx->cq_timeouts); atomic_inc(&req->ctx->cq_timeouts);
list_del(&req->list); list_del_init(&req->list);
io_cqring_fill_event(req->ctx, req->user_data, 0); io_cqring_fill_event(req, 0);
__io_free_req(req); io_put_req(req);
} }
} }
...@@ -537,7 +607,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -537,7 +607,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
continue; continue;
} }
req->flags |= REQ_F_IO_DRAINED; req->flags |= REQ_F_IO_DRAINED;
io_queue_async_work(ctx, req); io_queue_async_work(req);
} }
} }
...@@ -559,50 +629,124 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx) ...@@ -559,50 +629,124 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
return &rings->cqes[tail & ctx->cq_mask]; return &rings->cqes[tail & ctx->cq_mask];
} }
static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data, static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
long res) {
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
if (waitqueue_active(&ctx->sqo_wait))
wake_up(&ctx->sqo_wait);
if (ctx->cq_ev_fd)
eventfd_signal(ctx->cq_ev_fd, 1);
}
static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
{
struct io_rings *rings = ctx->rings;
struct io_uring_cqe *cqe;
struct io_kiocb *req;
unsigned long flags;
LIST_HEAD(list);
if (!force) {
if (list_empty_careful(&ctx->cq_overflow_list))
return;
if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
rings->cq_ring_entries))
return;
}
spin_lock_irqsave(&ctx->completion_lock, flags);
/* if force is set, the ring is going away. always drop after that */
if (force)
ctx->cq_overflow_flushed = true;
while (!list_empty(&ctx->cq_overflow_list)) {
cqe = io_get_cqring(ctx);
if (!cqe && !force)
break;
req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
list);
list_move(&req->list, &list);
if (cqe) {
WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, req->result);
WRITE_ONCE(cqe->flags, 0);
} else {
WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow));
}
}
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
while (!list_empty(&list)) {
req = list_first_entry(&list, struct io_kiocb, list);
list_del(&req->list);
io_put_req(req);
}
}
static void io_cqring_fill_event(struct io_kiocb *req, long res)
{ {
struct io_ring_ctx *ctx = req->ctx;
struct io_uring_cqe *cqe; struct io_uring_cqe *cqe;
trace_io_uring_complete(ctx, req->user_data, res);
/* /*
* If we can't get a cq entry, userspace overflowed the * If we can't get a cq entry, userspace overflowed the
* submission (by quite a lot). Increment the overflow count in * submission (by quite a lot). Increment the overflow count in
* the ring. * the ring.
*/ */
cqe = io_get_cqring(ctx); cqe = io_get_cqring(ctx);
if (cqe) { if (likely(cqe)) {
WRITE_ONCE(cqe->user_data, ki_user_data); WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, 0); WRITE_ONCE(cqe->flags, 0);
} else { } else if (ctx->cq_overflow_flushed) {
WRITE_ONCE(ctx->rings->cq_overflow, WRITE_ONCE(ctx->rings->cq_overflow,
atomic_inc_return(&ctx->cached_cq_overflow)); atomic_inc_return(&ctx->cached_cq_overflow));
} else {
refcount_inc(&req->refs);
req->result = res;
list_add_tail(&req->list, &ctx->cq_overflow_list);
} }
} }
static void io_cqring_ev_posted(struct io_ring_ctx *ctx) static void io_cqring_add_event(struct io_kiocb *req, long res)
{
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
if (waitqueue_active(&ctx->sqo_wait))
wake_up(&ctx->sqo_wait);
if (ctx->cq_ev_fd)
eventfd_signal(ctx->cq_ev_fd, 1);
}
static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
long res)
{ {
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
io_cqring_fill_event(ctx, user_data, res); io_cqring_fill_event(req, res);
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
} }
static inline bool io_is_fallback_req(struct io_kiocb *req)
{
return req == (struct io_kiocb *)
((unsigned long) req->ctx->fallback_req & ~1UL);
}
static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
req = ctx->fallback_req;
if (!test_and_set_bit_lock(0, (unsigned long *) ctx->fallback_req))
return req;
return NULL;
}
static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
struct io_submit_state *state) struct io_submit_state *state)
{ {
...@@ -615,7 +759,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, ...@@ -615,7 +759,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
if (!state) { if (!state) {
req = kmem_cache_alloc(req_cachep, gfp); req = kmem_cache_alloc(req_cachep, gfp);
if (unlikely(!req)) if (unlikely(!req))
goto out; goto fallback;
} else if (!state->free_reqs) { } else if (!state->free_reqs) {
size_t sz; size_t sz;
int ret; int ret;
...@@ -630,7 +774,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, ...@@ -630,7 +774,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
if (unlikely(ret <= 0)) { if (unlikely(ret <= 0)) {
state->reqs[0] = kmem_cache_alloc(req_cachep, gfp); state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
if (!state->reqs[0]) if (!state->reqs[0])
goto out; goto fallback;
ret = 1; ret = 1;
} }
state->free_reqs = ret - 1; state->free_reqs = ret - 1;
...@@ -642,14 +786,19 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, ...@@ -642,14 +786,19 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
state->cur_req++; state->cur_req++;
} }
got_it:
req->file = NULL; req->file = NULL;
req->ctx = ctx; req->ctx = ctx;
req->flags = 0; req->flags = 0;
/* one is dropped after submission, the other at completion */ /* one is dropped after submission, the other at completion */
refcount_set(&req->refs, 2); refcount_set(&req->refs, 2);
req->result = 0; req->result = 0;
INIT_IO_WORK(&req->work, io_wq_submit_work);
return req; return req;
out: fallback:
req = io_get_fallback_req(ctx);
if (req)
goto got_it;
percpu_ref_put(&ctx->refs); percpu_ref_put(&ctx->refs);
return NULL; return NULL;
} }
...@@ -665,15 +814,48 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr) ...@@ -665,15 +814,48 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
static void __io_free_req(struct io_kiocb *req) static void __io_free_req(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx;
if (req->file && !(req->flags & REQ_F_FIXED_FILE)) if (req->file && !(req->flags & REQ_F_FIXED_FILE))
fput(req->file); fput(req->file);
percpu_ref_put(&req->ctx->refs); if (req->flags & REQ_F_INFLIGHT) {
unsigned long flags;
spin_lock_irqsave(&ctx->inflight_lock, flags);
list_del(&req->inflight_entry);
if (waitqueue_active(&ctx->inflight_wait))
wake_up(&ctx->inflight_wait);
spin_unlock_irqrestore(&ctx->inflight_lock, flags);
}
percpu_ref_put(&ctx->refs);
if (likely(!io_is_fallback_req(req)))
kmem_cache_free(req_cachep, req); kmem_cache_free(req_cachep, req);
else
clear_bit_unlock(0, (unsigned long *) ctx->fallback_req);
}
static bool io_link_cancel_timeout(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
int ret;
ret = hrtimer_try_to_cancel(&req->timeout.timer);
if (ret != -1) {
io_cqring_fill_event(req, -ECANCELED);
io_commit_cqring(ctx);
req->flags &= ~REQ_F_LINK;
io_put_req(req);
return true;
}
return false;
} }
static void io_req_link_next(struct io_kiocb *req) static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{ {
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt; struct io_kiocb *nxt;
bool wake_ev = false;
/* /*
* The list should never be empty when we are called here. But could * The list should never be empty when we are called here. But could
...@@ -681,18 +863,35 @@ static void io_req_link_next(struct io_kiocb *req) ...@@ -681,18 +863,35 @@ static void io_req_link_next(struct io_kiocb *req)
* safe side. * safe side.
*/ */
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list); nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
if (nxt) { while (nxt) {
list_del(&nxt->list); list_del_init(&nxt->list);
if (!list_empty(&req->link_list)) { if (!list_empty(&req->link_list)) {
INIT_LIST_HEAD(&nxt->link_list); INIT_LIST_HEAD(&nxt->link_list);
list_splice(&req->link_list, &nxt->link_list); list_splice(&req->link_list, &nxt->link_list);
nxt->flags |= REQ_F_LINK; nxt->flags |= REQ_F_LINK;
} }
nxt->flags |= REQ_F_LINK_DONE; /*
INIT_WORK(&nxt->work, io_sq_wq_submit_work); * If we're in async work, we can continue processing the chain
io_queue_async_work(req->ctx, nxt); * in this context instead of having to queue up new async work.
*/
if (req->flags & REQ_F_LINK_TIMEOUT) {
wake_ev = io_link_cancel_timeout(nxt);
/* we dropped this link, get next */
nxt = list_first_entry_or_null(&req->link_list,
struct io_kiocb, list);
} else if (nxtptr && io_wq_current_is_worker()) {
*nxtptr = nxt;
break;
} else {
io_queue_async_work(nxt);
break;
} }
}
if (wake_ev)
io_cqring_ev_posted(ctx);
} }
/* /*
...@@ -700,43 +899,118 @@ static void io_req_link_next(struct io_kiocb *req) ...@@ -700,43 +899,118 @@ static void io_req_link_next(struct io_kiocb *req)
*/ */
static void io_fail_links(struct io_kiocb *req) static void io_fail_links(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *link; struct io_kiocb *link;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
while (!list_empty(&req->link_list)) { while (!list_empty(&req->link_list)) {
link = list_first_entry(&req->link_list, struct io_kiocb, list); link = list_first_entry(&req->link_list, struct io_kiocb, list);
list_del(&link->list); list_del_init(&link->list);
trace_io_uring_fail_link(req, link);
io_cqring_add_event(req->ctx, link->user_data, -ECANCELED); if ((req->flags & REQ_F_LINK_TIMEOUT) &&
__io_free_req(link); link->submit.sqe->opcode == IORING_OP_LINK_TIMEOUT) {
io_link_cancel_timeout(link);
} else {
io_cqring_fill_event(link, -ECANCELED);
io_double_put_req(link);
}
} }
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
} }
static void io_free_req(struct io_kiocb *req) static void io_free_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt)
{ {
if (likely(!(req->flags & REQ_F_LINK))) {
__io_free_req(req);
return;
}
/* /*
* If LINK is set, we have dependent requests in this chain. If we * If LINK is set, we have dependent requests in this chain. If we
* didn't fail this request, queue the first one up, moving any other * didn't fail this request, queue the first one up, moving any other
* dependencies to the next request. In case of failure, fail the rest * dependencies to the next request. In case of failure, fail the rest
* of the chain. * of the chain.
*/ */
if (req->flags & REQ_F_LINK) { if (req->flags & REQ_F_FAIL_LINK) {
if (req->flags & REQ_F_FAIL_LINK)
io_fail_links(req); io_fail_links(req);
else } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) ==
io_req_link_next(req); REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;
/*
* If this is a timeout link, we could be racing with the
* timeout timer. Grab the completion lock for this case to
* protect against that.
*/
spin_lock_irqsave(&ctx->completion_lock, flags);
io_req_link_next(req, nxt);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
} else {
io_req_link_next(req, nxt);
} }
__io_free_req(req); __io_free_req(req);
} }
static void io_free_req(struct io_kiocb *req)
{
io_free_req_find_next(req, NULL);
}
/*
* Drop reference to request, return next in chain (if there is one) if this
* was the last reference to this request.
*/
static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
struct io_kiocb *nxt = NULL;
if (refcount_dec_and_test(&req->refs))
io_free_req_find_next(req, &nxt);
if (nxt) {
if (nxtptr)
*nxtptr = nxt;
else
io_queue_async_work(nxt);
}
}
static void io_put_req(struct io_kiocb *req) static void io_put_req(struct io_kiocb *req)
{ {
if (refcount_dec_and_test(&req->refs)) if (refcount_dec_and_test(&req->refs))
io_free_req(req); io_free_req(req);
} }
static unsigned io_cqring_events(struct io_rings *rings) static void io_double_put_req(struct io_kiocb *req)
{
/* drop both submit and complete references */
if (refcount_sub_and_test(2, &req->refs))
__io_free_req(req);
}
static unsigned io_cqring_events(struct io_ring_ctx *ctx, bool noflush)
{ {
struct io_rings *rings = ctx->rings;
/*
* noflush == true is from the waitqueue handler, just ensure we wake
* up the task, and the next invocation will flush the entries. We
* cannot safely to it from here.
*/
if (noflush && !list_empty(&ctx->cq_overflow_list))
return -1U;
io_cqring_overflow_flush(ctx, false);
/* See comment at the top of this file */ /* See comment at the top of this file */
smp_rmb(); smp_rmb();
return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
...@@ -765,7 +1039,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -765,7 +1039,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
req = list_first_entry(done, struct io_kiocb, list); req = list_first_entry(done, struct io_kiocb, list);
list_del(&req->list); list_del(&req->list);
io_cqring_fill_event(ctx, req->user_data, req->result); io_cqring_fill_event(req, req->result);
(*nr_events)++; (*nr_events)++;
if (refcount_dec_and_test(&req->refs)) { if (refcount_dec_and_test(&req->refs)) {
...@@ -774,8 +1048,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, ...@@ -774,8 +1048,8 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
* completions for those, only batch free for fixed * completions for those, only batch free for fixed
* file and non-linked commands. * file and non-linked commands.
*/ */
if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) == if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
REQ_F_FIXED_FILE) { REQ_F_FIXED_FILE) && !io_is_fallback_req(req)) {
reqs[to_free++] = req; reqs[to_free++] = req;
if (to_free == ARRAY_SIZE(reqs)) if (to_free == ARRAY_SIZE(reqs))
io_free_req_many(ctx, reqs, &to_free); io_free_req_many(ctx, reqs, &to_free);
...@@ -892,7 +1166,7 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, ...@@ -892,7 +1166,7 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
* If we do, we can potentially be spinning for commands that * If we do, we can potentially be spinning for commands that
* already triggered a CQE (eg in error). * already triggered a CQE (eg in error).
*/ */
if (io_cqring_events(ctx->rings)) if (io_cqring_events(ctx, false))
break; break;
/* /*
...@@ -952,7 +1226,7 @@ static void kiocb_end_write(struct io_kiocb *req) ...@@ -952,7 +1226,7 @@ static void kiocb_end_write(struct io_kiocb *req)
file_end_write(req->file); file_end_write(req->file);
} }
static void io_complete_rw(struct kiocb *kiocb, long res, long res2) static void io_complete_rw_common(struct kiocb *kiocb, long res)
{ {
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
...@@ -961,17 +1235,35 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2) ...@@ -961,17 +1235,35 @@ static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
if ((req->flags & REQ_F_LINK) && res != req->result) if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req->ctx, req->user_data, res); io_cqring_add_event(req, res);
io_put_req(req);
} }
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
{ {
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw); struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
if (kiocb->ki_flags & IOCB_WRITE) io_complete_rw_common(kiocb, res);
kiocb_end_write(req); io_put_req(req);
}
static struct io_kiocb *__io_complete_rw(struct kiocb *kiocb, long res)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
struct io_kiocb *nxt = NULL;
io_complete_rw_common(kiocb, res);
io_put_req_find_next(req, &nxt);
return nxt;
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
{
struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req);
if ((req->flags & REQ_F_LINK) && res != req->result) if ((req->flags & REQ_F_LINK) && res != req->result)
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
req->result = res; req->result = res;
...@@ -1072,10 +1364,9 @@ static bool io_file_supports_async(struct file *file) ...@@ -1072,10 +1364,9 @@ static bool io_file_supports_async(struct file *file)
return false; return false;
} }
static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, static int io_prep_rw(struct io_kiocb *req, bool force_nonblock)
bool force_nonblock)
{ {
const struct io_uring_sqe *sqe = s->sqe; const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw; struct kiocb *kiocb = &req->rw;
unsigned ioprio; unsigned ioprio;
...@@ -1159,6 +1450,15 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) ...@@ -1159,6 +1450,15 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
} }
} }
static void kiocb_done(struct kiocb *kiocb, ssize_t ret, struct io_kiocb **nxt,
bool in_async)
{
if (in_async && ret >= 0 && nxt && kiocb->ki_complete == io_complete_rw)
*nxt = __io_complete_rw(kiocb, ret);
else
io_rw_done(kiocb, ret);
}
static int io_import_fixed(struct io_ring_ctx *ctx, int rw, static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
const struct io_uring_sqe *sqe, const struct io_uring_sqe *sqe,
struct iov_iter *iter) struct iov_iter *iter)
...@@ -1270,65 +1570,6 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw, ...@@ -1270,65 +1570,6 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter); return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
} }
static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
{
if (al->file == kiocb->ki_filp) {
off_t start, end;
/*
* Allow merging if we're anywhere in the range of the same
* page. Generally this happens for sub-page reads or writes,
* and it's beneficial to allow the first worker to bring the
* page in and the piggy backed work can then work on the
* cached page.
*/
start = al->io_start & PAGE_MASK;
end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
return true;
}
al->file = NULL;
return false;
}
/*
* Make a note of the last file/offset/direction we punted to async
* context. We'll use this information to see if we can piggy back a
* sequential request onto the previous one, if it's still hasn't been
* completed by the async worker.
*/
static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
{
struct async_list *async_list = &req->ctx->pending_async[rw];
struct kiocb *kiocb = &req->rw;
struct file *filp = kiocb->ki_filp;
if (io_should_merge(async_list, kiocb)) {
unsigned long max_bytes;
/* Use 8x RA size as a decent limiter for both reads/writes */
max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
if (!max_bytes)
max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
/* If max len are exceeded, reset the state */
if (async_list->io_len + len <= max_bytes) {
req->flags |= REQ_F_SEQ_PREV;
async_list->io_len += len;
} else {
async_list->file = NULL;
}
}
/* New file? Reset state. */
if (async_list->file != filp) {
async_list->io_start = kiocb->ki_pos;
async_list->io_len = len;
async_list->file = filp;
}
}
/* /*
* For files that don't have ->read_iter() and ->write_iter(), handle them * For files that don't have ->read_iter() and ->write_iter(), handle them
* by looping over ->read() or ->write() manually. * by looping over ->read() or ->write() manually.
...@@ -1374,7 +1615,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, ...@@ -1374,7 +1615,7 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
return ret; return ret;
} }
static int io_read(struct io_kiocb *req, const struct sqe_submit *s, static int io_read(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock) bool force_nonblock)
{ {
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
...@@ -1384,7 +1625,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1384,7 +1625,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count; size_t iov_count;
ssize_t read_size, ret; ssize_t read_size, ret;
ret = io_prep_rw(req, s, force_nonblock); ret = io_prep_rw(req, force_nonblock);
if (ret) if (ret)
return ret; return ret;
file = kiocb->ki_filp; file = kiocb->ki_filp;
...@@ -1392,7 +1633,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1392,7 +1633,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_READ))) if (unlikely(!(file->f_mode & FMODE_READ)))
return -EBADF; return -EBADF;
ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter); ret = io_import_iovec(req->ctx, READ, &req->submit, &iovec, &iter);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1423,23 +1664,16 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1423,23 +1664,16 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
ret2 > 0 && ret2 < read_size) ret2 > 0 && ret2 < read_size)
ret2 = -EAGAIN; ret2 = -EAGAIN;
/* Catch -EAGAIN return for forced non-blocking submission */ /* Catch -EAGAIN return for forced non-blocking submission */
if (!force_nonblock || ret2 != -EAGAIN) { if (!force_nonblock || ret2 != -EAGAIN)
io_rw_done(kiocb, ret2); kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
} else { else
/*
* If ->needs_lock is true, we're already in async
* context.
*/
if (!s->needs_lock)
io_async_list_note(READ, req, iov_count);
ret = -EAGAIN; ret = -EAGAIN;
} }
}
kfree(iovec); kfree(iovec);
return ret; return ret;
} }
static int io_write(struct io_kiocb *req, const struct sqe_submit *s, static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock) bool force_nonblock)
{ {
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
...@@ -1449,7 +1683,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1449,7 +1683,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
size_t iov_count; size_t iov_count;
ssize_t ret; ssize_t ret;
ret = io_prep_rw(req, s, force_nonblock); ret = io_prep_rw(req, force_nonblock);
if (ret) if (ret)
return ret; return ret;
...@@ -1457,7 +1691,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1457,7 +1691,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
if (unlikely(!(file->f_mode & FMODE_WRITE))) if (unlikely(!(file->f_mode & FMODE_WRITE)))
return -EBADF; return -EBADF;
ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter); ret = io_import_iovec(req->ctx, WRITE, &req->submit, &iovec, &iter);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1467,12 +1701,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1467,12 +1701,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
iov_count = iov_iter_count(&iter); iov_count = iov_iter_count(&iter);
ret = -EAGAIN; ret = -EAGAIN;
if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) { if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT))
/* If ->needs_lock is true, we're already in async context. */
if (!s->needs_lock)
io_async_list_note(WRITE, req, iov_count);
goto out_free; goto out_free;
}
ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count); ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
if (!ret) { if (!ret) {
...@@ -1497,18 +1727,11 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1497,18 +1727,11 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
ret2 = call_write_iter(file, kiocb, &iter); ret2 = call_write_iter(file, kiocb, &iter);
else else
ret2 = loop_rw_iter(WRITE, file, kiocb, &iter); ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
if (!force_nonblock || ret2 != -EAGAIN) { if (!force_nonblock || ret2 != -EAGAIN)
io_rw_done(kiocb, ret2); kiocb_done(kiocb, ret2, nxt, req->submit.in_async);
} else { else
/*
* If ->needs_lock is true, we're already in async
* context.
*/
if (!s->needs_lock)
io_async_list_note(WRITE, req, iov_count);
ret = -EAGAIN; ret = -EAGAIN;
} }
}
out_free: out_free:
kfree(iovec); kfree(iovec);
return ret; return ret;
...@@ -1517,15 +1740,14 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s, ...@@ -1517,15 +1740,14 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
/* /*
* IORING_OP_NOP just posts a completion event, nothing else. * IORING_OP_NOP just posts a completion event, nothing else.
*/ */
static int io_nop(struct io_kiocb *req, u64 user_data) static int io_nop(struct io_kiocb *req)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
long err = 0;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
io_cqring_add_event(ctx, user_data, err); io_cqring_add_event(req, 0);
io_put_req(req); io_put_req(req);
return 0; return 0;
} }
...@@ -1546,7 +1768,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1546,7 +1768,7 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} }
static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
bool force_nonblock) struct io_kiocb **nxt, bool force_nonblock)
{ {
loff_t sqe_off = READ_ONCE(sqe->off); loff_t sqe_off = READ_ONCE(sqe->off);
loff_t sqe_len = READ_ONCE(sqe->len); loff_t sqe_len = READ_ONCE(sqe->len);
...@@ -1572,8 +1794,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -1572,8 +1794,8 @@ static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (ret < 0 && (req->flags & REQ_F_LINK)) if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req->ctx, sqe->user_data, ret); io_cqring_add_event(req, ret);
io_put_req(req); io_put_req_find_next(req, nxt);
return 0; return 0;
} }
...@@ -1595,6 +1817,7 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1595,6 +1817,7 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static int io_sync_file_range(struct io_kiocb *req, static int io_sync_file_range(struct io_kiocb *req,
const struct io_uring_sqe *sqe, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt,
bool force_nonblock) bool force_nonblock)
{ {
loff_t sqe_off; loff_t sqe_off;
...@@ -1618,14 +1841,14 @@ static int io_sync_file_range(struct io_kiocb *req, ...@@ -1618,14 +1841,14 @@ static int io_sync_file_range(struct io_kiocb *req,
if (ret < 0 && (req->flags & REQ_F_LINK)) if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req->ctx, sqe->user_data, ret); io_cqring_add_event(req, ret);
io_put_req(req); io_put_req_find_next(req, nxt);
return 0; return 0;
} }
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
bool force_nonblock, struct io_kiocb **nxt, bool force_nonblock,
long (*fn)(struct socket *, struct user_msghdr __user *, long (*fn)(struct socket *, struct user_msghdr __user *,
unsigned int)) unsigned int))
{ {
...@@ -1654,32 +1877,80 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -1654,32 +1877,80 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
return ret; return ret;
} }
io_cqring_add_event(req->ctx, sqe->user_data, ret); io_cqring_add_event(req, ret);
io_put_req(req); if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_put_req_find_next(req, nxt);
return 0; return 0;
} }
#endif #endif
static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
bool force_nonblock) struct io_kiocb **nxt, bool force_nonblock)
{ {
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock); return io_send_recvmsg(req, sqe, nxt, force_nonblock,
__sys_sendmsg_sock);
#else #else
return -EOPNOTSUPP; return -EOPNOTSUPP;
#endif #endif
} }
static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
bool force_nonblock) struct io_kiocb **nxt, bool force_nonblock)
{ {
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock); return io_send_recvmsg(req, sqe, nxt, force_nonblock,
__sys_recvmsg_sock);
#else #else
return -EOPNOTSUPP; return -EOPNOTSUPP;
#endif #endif
} }
static int io_accept(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt, bool force_nonblock)
{
#if defined(CONFIG_NET)
struct sockaddr __user *addr;
int __user *addr_len;
unsigned file_flags;
int flags, ret;
if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
return -EINVAL;
addr = (struct sockaddr __user *) (unsigned long) READ_ONCE(sqe->addr);
addr_len = (int __user *) (unsigned long) READ_ONCE(sqe->addr2);
flags = READ_ONCE(sqe->accept_flags);
file_flags = force_nonblock ? O_NONBLOCK : 0;
ret = __sys_accept4_file(req->file, file_flags, addr, addr_len, flags);
if (ret == -EAGAIN && force_nonblock) {
req->work.flags |= IO_WQ_WORK_NEEDS_FILES;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
io_put_req_find_next(req, nxt);
return 0;
#else
return -EOPNOTSUPP;
#endif
}
static inline void io_poll_remove_req(struct io_kiocb *req)
{
if (!RB_EMPTY_NODE(&req->rb_node)) {
rb_erase(&req->rb_node, &req->ctx->cancel_tree);
RB_CLEAR_NODE(&req->rb_node);
}
}
static void io_poll_remove_one(struct io_kiocb *req) static void io_poll_remove_one(struct io_kiocb *req)
{ {
struct io_poll_iocb *poll = &req->poll; struct io_poll_iocb *poll = &req->poll;
...@@ -1688,25 +1959,47 @@ static void io_poll_remove_one(struct io_kiocb *req) ...@@ -1688,25 +1959,47 @@ static void io_poll_remove_one(struct io_kiocb *req)
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) { if (!list_empty(&poll->wait.entry)) {
list_del_init(&poll->wait.entry); list_del_init(&poll->wait.entry);
io_queue_async_work(req->ctx, req); io_queue_async_work(req);
} }
spin_unlock(&poll->head->lock); spin_unlock(&poll->head->lock);
io_poll_remove_req(req);
list_del_init(&req->list);
} }
static void io_poll_remove_all(struct io_ring_ctx *ctx) static void io_poll_remove_all(struct io_ring_ctx *ctx)
{ {
struct rb_node *node;
struct io_kiocb *req; struct io_kiocb *req;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
while (!list_empty(&ctx->cancel_list)) { while ((node = rb_first(&ctx->cancel_tree)) != NULL) {
req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list); req = rb_entry(node, struct io_kiocb, rb_node);
io_poll_remove_one(req); io_poll_remove_one(req);
} }
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
} }
static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
{
struct rb_node *p, *parent = NULL;
struct io_kiocb *req;
p = ctx->cancel_tree.rb_node;
while (p) {
parent = p;
req = rb_entry(parent, struct io_kiocb, rb_node);
if (sqe_addr < req->user_data) {
p = p->rb_left;
} else if (sqe_addr > req->user_data) {
p = p->rb_right;
} else {
io_poll_remove_one(req);
return 0;
}
}
return -ENOENT;
}
/* /*
* Find a running poll command that matches one specified in sqe->addr, * Find a running poll command that matches one specified in sqe->addr,
* and remove it if found. * and remove it if found.
...@@ -1714,8 +2007,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx) ...@@ -1714,8 +2007,7 @@ static void io_poll_remove_all(struct io_ring_ctx *ctx)
static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *poll_req, *next; int ret;
int ret = -ENOENT;
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
...@@ -1724,36 +2016,38 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1724,36 +2016,38 @@ static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EINVAL; return -EINVAL;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) { ret = io_poll_cancel(ctx, READ_ONCE(sqe->addr));
if (READ_ONCE(sqe->addr) == poll_req->user_data) {
io_poll_remove_one(poll_req);
ret = 0;
break;
}
}
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
io_cqring_add_event(req->ctx, sqe->user_data, ret); io_cqring_add_event(req, ret);
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_put_req(req); io_put_req(req);
return 0; return 0;
} }
static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req, static void io_poll_complete(struct io_kiocb *req, __poll_t mask)
__poll_t mask)
{ {
struct io_ring_ctx *ctx = req->ctx;
req->poll.done = true; req->poll.done = true;
io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask)); io_cqring_fill_event(req, mangle_poll(mask));
io_commit_cqring(ctx); io_commit_cqring(ctx);
} }
static void io_poll_complete_work(struct work_struct *work) static void io_poll_complete_work(struct io_wq_work **workptr)
{ {
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_poll_iocb *poll = &req->poll; struct io_poll_iocb *poll = &req->poll;
struct poll_table_struct pt = { ._key = poll->events }; struct poll_table_struct pt = { ._key = poll->events };
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *nxt = NULL;
__poll_t mask = 0; __poll_t mask = 0;
if (work->flags & IO_WQ_WORK_CANCEL)
WRITE_ONCE(poll->canceled, true);
if (!READ_ONCE(poll->canceled)) if (!READ_ONCE(poll->canceled))
mask = vfs_poll(poll->file, &pt) & poll->events; mask = vfs_poll(poll->file, &pt) & poll->events;
...@@ -1770,12 +2064,15 @@ static void io_poll_complete_work(struct work_struct *work) ...@@ -1770,12 +2064,15 @@ static void io_poll_complete_work(struct work_struct *work)
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
return; return;
} }
list_del_init(&req->list); io_poll_remove_req(req);
io_poll_complete(ctx, req, mask); io_poll_complete(req, mask);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
io_put_req(req);
io_put_req_find_next(req, &nxt);
if (nxt)
*workptr = &nxt->work;
} }
static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
...@@ -1794,15 +2091,22 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, ...@@ -1794,15 +2091,22 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
list_del_init(&poll->wait.entry); list_del_init(&poll->wait.entry);
/*
* Run completion inline if we can. We're using trylock here because
* we are violating the completion_lock -> poll wq lock ordering.
* If we have a link timeout we're going to need the completion_lock
* for finalizing the request, mark us as having grabbed that already.
*/
if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) { if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
list_del(&req->list); io_poll_remove_req(req);
io_poll_complete(ctx, req, mask); io_poll_complete(req, mask);
req->flags |= REQ_F_COMP_LOCKED;
io_put_req(req);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
io_put_req(req);
} else { } else {
io_queue_async_work(ctx, req); io_queue_async_work(req);
} }
return 1; return 1;
...@@ -1829,7 +2133,27 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, ...@@ -1829,7 +2133,27 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
add_wait_queue(head, &pt->req->poll.wait); add_wait_queue(head, &pt->req->poll.wait);
} }
static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) static void io_poll_req_insert(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
struct rb_node **p = &ctx->cancel_tree.rb_node;
struct rb_node *parent = NULL;
struct io_kiocb *tmp;
while (*p) {
parent = *p;
tmp = rb_entry(parent, struct io_kiocb, rb_node);
if (req->user_data < tmp->user_data)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&req->rb_node, parent, p);
rb_insert_color(&req->rb_node, &ctx->cancel_tree);
}
static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt)
{ {
struct io_poll_iocb *poll = &req->poll; struct io_poll_iocb *poll = &req->poll;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -1846,9 +2170,10 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1846,9 +2170,10 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EBADF; return -EBADF;
req->submit.sqe = NULL; req->submit.sqe = NULL;
INIT_WORK(&req->work, io_poll_complete_work); INIT_IO_WORK(&req->work, io_poll_complete_work);
events = READ_ONCE(sqe->poll_events); events = READ_ONCE(sqe->poll_events);
poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
RB_CLEAR_NODE(&req->rb_node);
poll->head = NULL; poll->head = NULL;
poll->done = false; poll->done = false;
...@@ -1881,18 +2206,18 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1881,18 +2206,18 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
else if (cancel) else if (cancel)
WRITE_ONCE(poll->canceled, true); WRITE_ONCE(poll->canceled, true);
else if (!poll->done) /* actually waiting for an event */ else if (!poll->done) /* actually waiting for an event */
list_add_tail(&req->list, &ctx->cancel_list); io_poll_req_insert(req);
spin_unlock(&poll->head->lock); spin_unlock(&poll->head->lock);
} }
if (mask) { /* no async, we'd stolen it */ if (mask) { /* no async, we'd stolen it */
ipt.error = 0; ipt.error = 0;
io_poll_complete(ctx, req, mask); io_poll_complete(req, mask);
} }
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
if (mask) { if (mask) {
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
io_put_req(req); io_put_req_find_next(req, nxt);
} }
return ipt.error; return ipt.error;
} }
...@@ -1900,7 +2225,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1900,7 +2225,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
{ {
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
struct io_kiocb *req, *prev; struct io_kiocb *req;
unsigned long flags; unsigned long flags;
req = container_of(timer, struct io_kiocb, timeout.timer); req = container_of(timer, struct io_kiocb, timeout.timer);
...@@ -1908,44 +2233,119 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) ...@@ -1908,44 +2233,119 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
atomic_inc(&ctx->cq_timeouts); atomic_inc(&ctx->cq_timeouts);
spin_lock_irqsave(&ctx->completion_lock, flags); spin_lock_irqsave(&ctx->completion_lock, flags);
/*
* We could be racing with timeout deletion. If the list is empty,
* then timeout lookup already found it and will be handling it.
*/
if (!list_empty(&req->list)) {
struct io_kiocb *prev;
/* /*
* Adjust the reqs sequence before the current one because it * Adjust the reqs sequence before the current one because it
* will consume a slot in the cq_ring and the the cq_tail pointer * will consume a slot in the cq_ring and the the cq_tail
* will be increased, otherwise other timeout reqs may return in * pointer will be increased, otherwise other timeout reqs may
* advance without waiting for enough wait_nr. * return in advance without waiting for enough wait_nr.
*/ */
prev = req; prev = req;
list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list) list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
prev->sequence++; prev->sequence++;
list_del(&req->list); list_del_init(&req->list);
}
io_cqring_fill_event(ctx, req->user_data, -ETIME); io_cqring_fill_event(req, -ETIME);
io_commit_cqring(ctx); io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags); spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx); io_cqring_ev_posted(ctx);
if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
io_put_req(req); io_put_req(req);
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
{
struct io_kiocb *req;
int ret = -ENOENT;
list_for_each_entry(req, &ctx->timeout_list, list) {
if (user_data == req->user_data) {
list_del_init(&req->list);
ret = 0;
break;
}
}
if (ret == -ENOENT)
return ret;
ret = hrtimer_try_to_cancel(&req->timeout.timer);
if (ret == -1)
return -EALREADY;
io_cqring_fill_event(req, -ECANCELED);
io_put_req(req);
return 0;
}
/*
* Remove or update an existing timeout command
*/
static int io_timeout_remove(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_ring_ctx *ctx = req->ctx;
unsigned flags;
int ret;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len)
return -EINVAL;
flags = READ_ONCE(sqe->timeout_flags);
if (flags)
return -EINVAL;
spin_lock_irq(&ctx->completion_lock);
ret = io_timeout_cancel(ctx, READ_ONCE(sqe->addr));
io_cqring_fill_event(req, ret);
io_commit_cqring(ctx);
spin_unlock_irq(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
if (ret < 0 && req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
io_put_req(req);
return 0;
}
static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
unsigned count; unsigned count;
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct list_head *entry; struct list_head *entry;
enum hrtimer_mode mode;
struct timespec64 ts; struct timespec64 ts;
unsigned span = 0; unsigned span = 0;
unsigned flags;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL; return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags || if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->len != 1)
sqe->len != 1) return -EINVAL;
flags = READ_ONCE(sqe->timeout_flags);
if (flags & ~IORING_TIMEOUT_ABS)
return -EINVAL; return -EINVAL;
if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT; return -EFAULT;
if (flags & IORING_TIMEOUT_ABS)
mode = HRTIMER_MODE_ABS;
else
mode = HRTIMER_MODE_REL;
hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, mode);
req->flags |= REQ_F_TIMEOUT; req->flags |= REQ_F_TIMEOUT;
/* /*
...@@ -2006,21 +2406,92 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -2006,21 +2406,92 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->sequence -= span; req->sequence -= span;
add: add:
list_add(&req->list, entry); list_add(&req->list, entry);
req->timeout.timer.function = io_timeout_fn;
hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), mode);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
return 0;
}
hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); static bool io_cancel_cb(struct io_wq_work *work, void *data)
req->timeout.timer.function = io_timeout_fn; {
hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts), struct io_kiocb *req = container_of(work, struct io_kiocb, work);
HRTIMER_MODE_REL);
return req->user_data == (unsigned long) data;
}
static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
{
enum io_wq_cancel cancel_ret;
int ret = 0;
cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
switch (cancel_ret) {
case IO_WQ_CANCEL_OK:
ret = 0;
break;
case IO_WQ_CANCEL_RUNNING:
ret = -EALREADY;
break;
case IO_WQ_CANCEL_NOTFOUND:
ret = -ENOENT;
break;
}
return ret;
}
static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
struct io_kiocb *req, __u64 sqe_addr,
struct io_kiocb **nxt)
{
unsigned long flags;
int ret;
ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
if (ret != -ENOENT) {
spin_lock_irqsave(&ctx->completion_lock, flags);
goto done;
}
spin_lock_irqsave(&ctx->completion_lock, flags);
ret = io_timeout_cancel(ctx, sqe_addr);
if (ret != -ENOENT)
goto done;
ret = io_poll_cancel(ctx, sqe_addr);
done:
io_cqring_fill_event(req, ret);
io_commit_cqring(ctx);
spin_unlock_irqrestore(&ctx->completion_lock, flags);
io_cqring_ev_posted(ctx);
if (ret < 0 && (req->flags & REQ_F_LINK))
req->flags |= REQ_F_FAIL_LINK;
io_put_req_find_next(req, nxt);
}
static int io_async_cancel(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt)
{
struct io_ring_ctx *ctx = req->ctx;
if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->flags || sqe->ioprio || sqe->off || sqe->len ||
sqe->cancel_flags)
return -EINVAL;
io_async_find_and_cancel(ctx, req, READ_ONCE(sqe->addr), NULL);
return 0; return 0;
} }
static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_req_defer(struct io_kiocb *req)
const struct io_uring_sqe *sqe)
{ {
const struct io_uring_sqe *sqe = req->submit.sqe;
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
struct io_ring_ctx *ctx = req->ctx;
if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) /* Still need defer if there is pending req in defer list. */
if (!req_need_defer(req) && list_empty(&ctx->defer_list))
return 0; return 0;
sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL); sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
...@@ -2028,7 +2499,7 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2028,7 +2499,7 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EAGAIN; return -EAGAIN;
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) { if (!req_need_defer(req) && list_empty(&ctx->defer_list)) {
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
kfree(sqe_copy); kfree(sqe_copy);
return 0; return 0;
...@@ -2037,64 +2508,70 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2037,64 +2508,70 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
memcpy(sqe_copy, sqe, sizeof(*sqe_copy)); memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
req->submit.sqe = sqe_copy; req->submit.sqe = sqe_copy;
INIT_WORK(&req->work, io_sq_wq_submit_work); trace_io_uring_defer(ctx, req, false);
list_add_tail(&req->list, &ctx->defer_list); list_add_tail(&req->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
return -EIOCBQUEUED; return -EIOCBQUEUED;
} }
static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static int __io_submit_sqe(struct io_kiocb *req, struct io_kiocb **nxt,
const struct sqe_submit *s, bool force_nonblock) bool force_nonblock)
{ {
int ret, opcode; int ret, opcode;
struct sqe_submit *s = &req->submit;
req->user_data = READ_ONCE(s->sqe->user_data); struct io_ring_ctx *ctx = req->ctx;
if (unlikely(s->index >= ctx->sq_entries))
return -EINVAL;
opcode = READ_ONCE(s->sqe->opcode); opcode = READ_ONCE(s->sqe->opcode);
switch (opcode) { switch (opcode) {
case IORING_OP_NOP: case IORING_OP_NOP:
ret = io_nop(req, req->user_data); ret = io_nop(req);
break; break;
case IORING_OP_READV: case IORING_OP_READV:
if (unlikely(s->sqe->buf_index)) if (unlikely(s->sqe->buf_index))
return -EINVAL; return -EINVAL;
ret = io_read(req, s, force_nonblock); ret = io_read(req, nxt, force_nonblock);
break; break;
case IORING_OP_WRITEV: case IORING_OP_WRITEV:
if (unlikely(s->sqe->buf_index)) if (unlikely(s->sqe->buf_index))
return -EINVAL; return -EINVAL;
ret = io_write(req, s, force_nonblock); ret = io_write(req, nxt, force_nonblock);
break; break;
case IORING_OP_READ_FIXED: case IORING_OP_READ_FIXED:
ret = io_read(req, s, force_nonblock); ret = io_read(req, nxt, force_nonblock);
break; break;
case IORING_OP_WRITE_FIXED: case IORING_OP_WRITE_FIXED:
ret = io_write(req, s, force_nonblock); ret = io_write(req, nxt, force_nonblock);
break; break;
case IORING_OP_FSYNC: case IORING_OP_FSYNC:
ret = io_fsync(req, s->sqe, force_nonblock); ret = io_fsync(req, s->sqe, nxt, force_nonblock);
break; break;
case IORING_OP_POLL_ADD: case IORING_OP_POLL_ADD:
ret = io_poll_add(req, s->sqe); ret = io_poll_add(req, s->sqe, nxt);
break; break;
case IORING_OP_POLL_REMOVE: case IORING_OP_POLL_REMOVE:
ret = io_poll_remove(req, s->sqe); ret = io_poll_remove(req, s->sqe);
break; break;
case IORING_OP_SYNC_FILE_RANGE: case IORING_OP_SYNC_FILE_RANGE:
ret = io_sync_file_range(req, s->sqe, force_nonblock); ret = io_sync_file_range(req, s->sqe, nxt, force_nonblock);
break; break;
case IORING_OP_SENDMSG: case IORING_OP_SENDMSG:
ret = io_sendmsg(req, s->sqe, force_nonblock); ret = io_sendmsg(req, s->sqe, nxt, force_nonblock);
break; break;
case IORING_OP_RECVMSG: case IORING_OP_RECVMSG:
ret = io_recvmsg(req, s->sqe, force_nonblock); ret = io_recvmsg(req, s->sqe, nxt, force_nonblock);
break; break;
case IORING_OP_TIMEOUT: case IORING_OP_TIMEOUT:
ret = io_timeout(req, s->sqe); ret = io_timeout(req, s->sqe);
break; break;
case IORING_OP_TIMEOUT_REMOVE:
ret = io_timeout_remove(req, s->sqe);
break;
case IORING_OP_ACCEPT:
ret = io_accept(req, s->sqe, nxt, force_nonblock);
break;
case IORING_OP_ASYNC_CANCEL:
ret = io_async_cancel(req, s->sqe, nxt);
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
...@@ -2108,81 +2585,40 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2108,81 +2585,40 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return -EAGAIN; return -EAGAIN;
/* workqueue context doesn't hold uring_lock, grab it now */ /* workqueue context doesn't hold uring_lock, grab it now */
if (s->needs_lock) if (s->in_async)
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
io_iopoll_req_issued(req); io_iopoll_req_issued(req);
if (s->needs_lock) if (s->in_async)
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
return 0; return 0;
} }
static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx, static void io_wq_submit_work(struct io_wq_work **workptr)
const struct io_uring_sqe *sqe)
{
switch (sqe->opcode) {
case IORING_OP_READV:
case IORING_OP_READ_FIXED:
return &ctx->pending_async[READ];
case IORING_OP_WRITEV:
case IORING_OP_WRITE_FIXED:
return &ctx->pending_async[WRITE];
default:
return NULL;
}
}
static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
{
u8 opcode = READ_ONCE(sqe->opcode);
return !(opcode == IORING_OP_READ_FIXED ||
opcode == IORING_OP_WRITE_FIXED);
}
static void io_sq_wq_submit_work(struct work_struct *work)
{ {
struct io_wq_work *work = *workptr;
struct io_kiocb *req = container_of(work, struct io_kiocb, work); struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_ring_ctx *ctx = req->ctx;
struct mm_struct *cur_mm = NULL;
struct async_list *async_list;
LIST_HEAD(req_list);
mm_segment_t old_fs;
int ret;
async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
restart:
do {
struct sqe_submit *s = &req->submit; struct sqe_submit *s = &req->submit;
const struct io_uring_sqe *sqe = s->sqe; const struct io_uring_sqe *sqe = s->sqe;
unsigned int flags = req->flags; struct io_kiocb *nxt = NULL;
int ret = 0;
/* Ensure we clear previously set non-block flag */ /* Ensure we clear previously set non-block flag */
req->rw.ki_flags &= ~IOCB_NOWAIT; req->rw.ki_flags &= ~IOCB_NOWAIT;
ret = 0; if (work->flags & IO_WQ_WORK_CANCEL)
if (io_sqe_needs_user(sqe) && !cur_mm) { ret = -ECANCELED;
if (!mmget_not_zero(ctx->sqo_mm)) {
ret = -EFAULT;
} else {
cur_mm = ctx->sqo_mm;
use_mm(cur_mm);
old_fs = get_fs();
set_fs(USER_DS);
}
}
if (!ret) { if (!ret) {
s->has_user = cur_mm != NULL; s->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
s->needs_lock = true; s->in_async = true;
do { do {
ret = __io_submit_sqe(ctx, req, s, false); ret = __io_submit_sqe(req, &nxt, false);
/* /*
* We can get EAGAIN for polled IO even though * We can get EAGAIN for polled IO even though we're
* we're forcing a sync submission from here, * forcing a sync submission from here, since we can't
* since we can't wait for request slots on the * wait for request slots on the block side.
* block side.
*/ */
if (ret != -EAGAIN) if (ret != -EAGAIN)
break; break;
...@@ -2194,103 +2630,22 @@ static void io_sq_wq_submit_work(struct work_struct *work) ...@@ -2194,103 +2630,22 @@ static void io_sq_wq_submit_work(struct work_struct *work)
io_put_req(req); io_put_req(req);
if (ret) { if (ret) {
io_cqring_add_event(ctx, sqe->user_data, ret); if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK;
io_cqring_add_event(req, ret);
io_put_req(req); io_put_req(req);
} }
/* async context always use a copy of the sqe */ /* async context always use a copy of the sqe */
kfree(sqe); kfree(sqe);
/* req from defer and link list needn't decrease async cnt */ /* if a dependent link is ready, pass it back */
if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE)) if (!ret && nxt) {
goto out; io_prep_async_work(nxt);
*workptr = &nxt->work;
if (!async_list)
break;
if (!list_empty(&req_list)) {
req = list_first_entry(&req_list, struct io_kiocb,
list);
list_del(&req->list);
continue;
}
if (list_empty(&async_list->list))
break;
req = NULL;
spin_lock(&async_list->lock);
if (list_empty(&async_list->list)) {
spin_unlock(&async_list->lock);
break;
}
list_splice_init(&async_list->list, &req_list);
spin_unlock(&async_list->lock);
req = list_first_entry(&req_list, struct io_kiocb, list);
list_del(&req->list);
} while (req);
/*
* Rare case of racing with a submitter. If we find the count has
* dropped to zero AND we have pending work items, then restart
* the processing. This is a tiny race window.
*/
if (async_list) {
ret = atomic_dec_return(&async_list->cnt);
while (!ret && !list_empty(&async_list->list)) {
spin_lock(&async_list->lock);
atomic_inc(&async_list->cnt);
list_splice_init(&async_list->list, &req_list);
spin_unlock(&async_list->lock);
if (!list_empty(&req_list)) {
req = list_first_entry(&req_list,
struct io_kiocb, list);
list_del(&req->list);
goto restart;
}
ret = atomic_dec_return(&async_list->cnt);
}
}
out:
if (cur_mm) {
set_fs(old_fs);
unuse_mm(cur_mm);
mmput(cur_mm);
} }
} }
/*
* See if we can piggy back onto previously submitted work, that is still
* running. We currently only allow this if the new request is sequential
* to the previous one we punted.
*/
static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
{
bool ret;
if (!list)
return false;
if (!(req->flags & REQ_F_SEQ_PREV))
return false;
if (!atomic_read(&list->cnt))
return false;
ret = true;
spin_lock(&list->lock);
list_add_tail(&req->list, &list->list);
/*
* Ensure we see a simultaneous modification from io_sq_wq_submit_work()
*/
smp_mb();
if (!atomic_read(&list->cnt)) {
list_del_init(&req->list);
ret = false;
}
spin_unlock(&list->lock);
return ret;
}
static bool io_op_needs_file(const struct io_uring_sqe *sqe) static bool io_op_needs_file(const struct io_uring_sqe *sqe)
{ {
int op = READ_ONCE(sqe->opcode); int op = READ_ONCE(sqe->opcode);
...@@ -2299,15 +2654,28 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe) ...@@ -2299,15 +2654,28 @@ static bool io_op_needs_file(const struct io_uring_sqe *sqe)
case IORING_OP_NOP: case IORING_OP_NOP:
case IORING_OP_POLL_REMOVE: case IORING_OP_POLL_REMOVE:
case IORING_OP_TIMEOUT: case IORING_OP_TIMEOUT:
case IORING_OP_TIMEOUT_REMOVE:
case IORING_OP_ASYNC_CANCEL:
case IORING_OP_LINK_TIMEOUT:
return false; return false;
default: default:
return true; return true;
} }
} }
static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
struct io_submit_state *state, struct io_kiocb *req) int index)
{ {
struct fixed_file_table *table;
table = &ctx->file_table[index >> IORING_FILE_TABLE_SHIFT];
return table->files[index & IORING_FILE_TABLE_MASK];
}
static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req)
{
struct sqe_submit *s = &req->submit;
struct io_ring_ctx *ctx = req->ctx;
unsigned flags; unsigned flags;
int fd; int fd;
...@@ -2327,14 +2695,18 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, ...@@ -2327,14 +2695,18 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
return 0; return 0;
if (flags & IOSQE_FIXED_FILE) { if (flags & IOSQE_FIXED_FILE) {
if (unlikely(!ctx->user_files || if (unlikely(!ctx->file_table ||
(unsigned) fd >= ctx->nr_user_files)) (unsigned) fd >= ctx->nr_user_files))
return -EBADF; return -EBADF;
req->file = ctx->user_files[fd]; fd = array_index_nospec(fd, ctx->nr_user_files);
req->file = io_file_from_index(ctx, fd);
if (!req->file)
return -EBADF;
req->flags |= REQ_F_FIXED_FILE; req->flags |= REQ_F_FIXED_FILE;
} else { } else {
if (s->needs_fixed_file) if (s->needs_fixed_file)
return -EBADF; return -EBADF;
trace_io_uring_file_get(ctx, fd);
req->file = io_file_get(state, fd); req->file = io_file_get(state, fd);
if (unlikely(!req->file)) if (unlikely(!req->file))
return -EBADF; return -EBADF;
...@@ -2343,12 +2715,146 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s, ...@@ -2343,12 +2715,146 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
return 0; return 0;
} }
static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_grab_files(struct io_kiocb *req)
struct sqe_submit *s)
{ {
int ret = -EBADF;
struct io_ring_ctx *ctx = req->ctx;
rcu_read_lock();
spin_lock_irq(&ctx->inflight_lock);
/*
* We use the f_ops->flush() handler to ensure that we can flush
* out work accessing these files if the fd is closed. Check if
* the fd has changed since we started down this path, and disallow
* this operation if it has.
*/
if (fcheck(req->submit.ring_fd) == req->submit.ring_file) {
list_add(&req->inflight_entry, &ctx->inflight_list);
req->flags |= REQ_F_INFLIGHT;
req->work.files = current->files;
ret = 0;
}
spin_unlock_irq(&ctx->inflight_lock);
rcu_read_unlock();
return ret;
}
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
{
struct io_kiocb *req = container_of(timer, struct io_kiocb,
timeout.timer);
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *prev = NULL;
unsigned long flags;
spin_lock_irqsave(&ctx->completion_lock, flags);
/*
* We don't expect the list to be empty, that will only happen if we
* race with the completion of the linked work.
*/
if (!list_empty(&req->list)) {
prev = list_entry(req->list.prev, struct io_kiocb, link_list);
if (refcount_inc_not_zero(&prev->refs))
list_del_init(&req->list);
else
prev = NULL;
}
spin_unlock_irqrestore(&ctx->completion_lock, flags);
if (prev) {
io_async_find_and_cancel(ctx, req, prev->user_data, NULL);
io_put_req(prev);
} else {
io_cqring_add_event(req, -ETIME);
io_put_req(req);
}
return HRTIMER_NORESTART;
}
static void io_queue_linked_timeout(struct io_kiocb *req, struct timespec64 *ts,
enum hrtimer_mode *mode)
{
struct io_ring_ctx *ctx = req->ctx;
/*
* If the list is now empty, then our linked request finished before
* we got a chance to setup the timer
*/
spin_lock_irq(&ctx->completion_lock);
if (!list_empty(&req->list)) {
req->timeout.timer.function = io_link_timeout_fn;
hrtimer_start(&req->timeout.timer, timespec64_to_ktime(*ts),
*mode);
}
spin_unlock_irq(&ctx->completion_lock);
/* drop submission reference */
io_put_req(req);
}
static int io_validate_link_timeout(const struct io_uring_sqe *sqe,
struct timespec64 *ts)
{
if (sqe->ioprio || sqe->buf_index || sqe->len != 1 || sqe->off)
return -EINVAL;
if (sqe->timeout_flags & ~IORING_TIMEOUT_ABS)
return -EINVAL;
if (get_timespec64(ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
return 0;
}
static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req,
struct timespec64 *ts,
enum hrtimer_mode *mode)
{
struct io_kiocb *nxt;
int ret; int ret;
ret = __io_submit_sqe(ctx, req, s, true); if (!(req->flags & REQ_F_LINK))
return NULL;
nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
if (!nxt || nxt->submit.sqe->opcode != IORING_OP_LINK_TIMEOUT)
return NULL;
ret = io_validate_link_timeout(nxt->submit.sqe, ts);
if (ret) {
list_del_init(&nxt->list);
io_cqring_add_event(nxt, ret);
io_double_put_req(nxt);
return ERR_PTR(-ECANCELED);
}
if (nxt->submit.sqe->timeout_flags & IORING_TIMEOUT_ABS)
*mode = HRTIMER_MODE_ABS;
else
*mode = HRTIMER_MODE_REL;
req->flags |= REQ_F_LINK_TIMEOUT;
hrtimer_init(&nxt->timeout.timer, CLOCK_MONOTONIC, *mode);
return nxt;
}
static int __io_queue_sqe(struct io_kiocb *req)
{
enum hrtimer_mode mode;
struct io_kiocb *nxt;
struct timespec64 ts;
int ret;
nxt = io_prep_linked_timeout(req, &ts, &mode);
if (IS_ERR(nxt)) {
ret = PTR_ERR(nxt);
nxt = NULL;
goto err;
}
ret = __io_submit_sqe(req, NULL, true);
/* /*
* We async punt it if the file wasn't marked NOWAIT, or if the file * We async punt it if the file wasn't marked NOWAIT, or if the file
...@@ -2356,36 +2862,47 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2356,36 +2862,47 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
*/ */
if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
(req->flags & REQ_F_MUST_PUNT))) { (req->flags & REQ_F_MUST_PUNT))) {
struct sqe_submit *s = &req->submit;
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
if (sqe_copy) { if (sqe_copy) {
struct async_list *list;
s->sqe = sqe_copy; s->sqe = sqe_copy;
memcpy(&req->submit, s, sizeof(*s)); if (req->work.flags & IO_WQ_WORK_NEEDS_FILES) {
list = io_async_list_from_sqe(ctx, s->sqe); ret = io_grab_files(req);
if (!io_add_to_prev_work(list, req)) { if (ret) {
if (list) kfree(sqe_copy);
atomic_inc(&list->cnt); goto err;
INIT_WORK(&req->work, io_sq_wq_submit_work); }
io_queue_async_work(ctx, req);
} }
/* /*
* Queued up for async execution, worker will release * Queued up for async execution, worker will release
* submit reference when the iocb is actually submitted. * submit reference when the iocb is actually submitted.
*/ */
io_queue_async_work(req);
if (nxt)
io_queue_linked_timeout(nxt, &ts, &mode);
return 0; return 0;
} }
} }
err:
/* drop submission reference */ /* drop submission reference */
io_put_req(req); io_put_req(req);
if (nxt) {
if (!ret)
io_queue_linked_timeout(nxt, &ts, &mode);
else
io_put_req(nxt);
}
/* and drop final reference, if we failed */ /* and drop final reference, if we failed */
if (ret) { if (ret) {
io_cqring_add_event(ctx, req->user_data, ret); io_cqring_add_event(req, ret);
if (req->flags & REQ_F_LINK) if (req->flags & REQ_F_LINK)
req->flags |= REQ_F_FAIL_LINK; req->flags |= REQ_F_FAIL_LINK;
io_put_req(req); io_put_req(req);
...@@ -2394,31 +2911,30 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2394,31 +2911,30 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return ret; return ret;
} }
static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_queue_sqe(struct io_kiocb *req)
struct sqe_submit *s)
{ {
int ret; int ret;
ret = io_req_defer(ctx, req, s->sqe); ret = io_req_defer(req);
if (ret) { if (ret) {
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED) {
io_free_req(req); io_cqring_add_event(req, ret);
io_cqring_add_event(ctx, s->sqe->user_data, ret); io_double_put_req(req);
} }
return 0; return 0;
} }
return __io_queue_sqe(ctx, req, s); return __io_queue_sqe(req);
} }
static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_queue_link_head(struct io_kiocb *req, struct io_kiocb *shadow)
struct sqe_submit *s, struct io_kiocb *shadow)
{ {
int ret; int ret;
int need_submit = false; int need_submit = false;
struct io_ring_ctx *ctx = req->ctx;
if (!shadow) if (!shadow)
return io_queue_sqe(ctx, req, s); return io_queue_sqe(req);
/* /*
* Mark the first IO in link list as DRAIN, let all the following * Mark the first IO in link list as DRAIN, let all the following
...@@ -2426,12 +2942,12 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2426,12 +2942,12 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
* list. * list.
*/ */
req->flags |= REQ_F_IO_DRAIN; req->flags |= REQ_F_IO_DRAIN;
ret = io_req_defer(ctx, req, s->sqe); ret = io_req_defer(req);
if (ret) { if (ret) {
if (ret != -EIOCBQUEUED) { if (ret != -EIOCBQUEUED) {
io_free_req(req); io_cqring_add_event(req, ret);
io_double_put_req(req);
__io_free_req(shadow); __io_free_req(shadow);
io_cqring_add_event(ctx, s->sqe->user_data, ret);
return 0; return 0;
} }
} else { } else {
...@@ -2444,47 +2960,42 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req, ...@@ -2444,47 +2960,42 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
/* Insert shadow req to defer_list, blocking next IOs */ /* Insert shadow req to defer_list, blocking next IOs */
spin_lock_irq(&ctx->completion_lock); spin_lock_irq(&ctx->completion_lock);
trace_io_uring_defer(ctx, shadow, true);
list_add_tail(&shadow->list, &ctx->defer_list); list_add_tail(&shadow->list, &ctx->defer_list);
spin_unlock_irq(&ctx->completion_lock); spin_unlock_irq(&ctx->completion_lock);
if (need_submit) if (need_submit)
return __io_queue_sqe(ctx, req, s); return __io_queue_sqe(req);
return 0; return 0;
} }
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK) #define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, static void io_submit_sqe(struct io_kiocb *req, struct io_submit_state *state,
struct io_submit_state *state, struct io_kiocb **link) struct io_kiocb **link)
{ {
struct io_uring_sqe *sqe_copy; struct io_uring_sqe *sqe_copy;
struct io_kiocb *req; struct sqe_submit *s = &req->submit;
struct io_ring_ctx *ctx = req->ctx;
int ret; int ret;
req->user_data = s->sqe->user_data;
/* enforce forwards compatibility on users */ /* enforce forwards compatibility on users */
if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) { if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
ret = -EINVAL; ret = -EINVAL;
goto err; goto err_req;
}
req = io_get_req(ctx, state);
if (unlikely(!req)) {
ret = -EAGAIN;
goto err;
} }
ret = io_req_set_file(ctx, s, state, req); ret = io_req_set_file(state, req);
if (unlikely(ret)) { if (unlikely(ret)) {
err_req: err_req:
io_free_req(req); io_cqring_add_event(req, ret);
err: io_double_put_req(req);
io_cqring_add_event(ctx, s->sqe->user_data, ret);
return; return;
} }
req->user_data = s->sqe->user_data;
/* /*
* If we already have a head request, queue this one for async * If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but * submittal once the head completes. If we don't have a head but
...@@ -2502,16 +3013,19 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, ...@@ -2502,16 +3013,19 @@ static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
} }
s->sqe = sqe_copy; s->sqe = sqe_copy;
memcpy(&req->submit, s, sizeof(*s)); trace_io_uring_link(ctx, req, prev);
list_add_tail(&req->list, &prev->link_list); list_add_tail(&req->list, &prev->link_list);
} else if (s->sqe->flags & IOSQE_IO_LINK) { } else if (s->sqe->flags & IOSQE_IO_LINK) {
req->flags |= REQ_F_LINK; req->flags |= REQ_F_LINK;
memcpy(&req->submit, s, sizeof(*s));
INIT_LIST_HEAD(&req->link_list); INIT_LIST_HEAD(&req->link_list);
*link = req; *link = req;
} else if (READ_ONCE(s->sqe->opcode) == IORING_OP_LINK_TIMEOUT) {
/* Only valid as a linked SQE */
ret = -EINVAL;
goto err_req;
} else { } else {
io_queue_sqe(ctx, req, s); io_queue_sqe(req);
} }
} }
...@@ -2582,7 +3096,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) ...@@ -2582,7 +3096,7 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
head = READ_ONCE(sq_array[head & ctx->sq_mask]); head = READ_ONCE(sq_array[head & ctx->sq_mask]);
if (head < ctx->sq_entries) { if (head < ctx->sq_entries) {
s->index = head; s->ring_file = NULL;
s->sqe = &ctx->sq_sqes[head]; s->sqe = &ctx->sq_sqes[head];
s->sequence = ctx->cached_sq_head; s->sequence = ctx->cached_sq_head;
ctx->cached_sq_head++; ctx->cached_sq_head++;
...@@ -2597,13 +3111,19 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s) ...@@ -2597,13 +3111,19 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
} }
static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
bool has_user, bool mm_fault) struct file *ring_file, int ring_fd,
struct mm_struct **mm, bool async)
{ {
struct io_submit_state state, *statep = NULL; struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL; struct io_kiocb *link = NULL;
struct io_kiocb *shadow_req = NULL; struct io_kiocb *shadow_req = NULL;
bool prev_was_link = false;
int i, submitted = 0; int i, submitted = 0;
bool mm_fault = false;
if (!list_empty(&ctx->cq_overflow_list)) {
io_cqring_overflow_flush(ctx, false);
return -EBUSY;
}
if (nr > IO_PLUG_THRESHOLD) { if (nr > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, nr); io_submit_state_start(&state, ctx, nr);
...@@ -2611,23 +3131,31 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -2611,23 +3131,31 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
} }
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
struct sqe_submit s; struct io_kiocb *req;
unsigned int sqe_flags;
if (!io_get_sqring(ctx, &s)) req = io_get_req(ctx, statep);
if (unlikely(!req)) {
if (!submitted)
submitted = -EAGAIN;
break;
}
if (!io_get_sqring(ctx, &req->submit)) {
__io_free_req(req);
break; break;
}
/* if (io_sqe_needs_user(req->submit.sqe) && !*mm) {
* If previous wasn't linked and we have a linked command, mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
* that's the end of the chain. Submit the previous link. if (!mm_fault) {
*/ use_mm(ctx->sqo_mm);
if (!prev_was_link && link) { *mm = ctx->sqo_mm;
io_queue_link_head(ctx, link, &link->submit, shadow_req); }
link = NULL;
shadow_req = NULL;
} }
prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { sqe_flags = req->submit.sqe->flags;
if (link && (sqe_flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) { if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL); shadow_req = io_get_req(ctx, NULL);
if (unlikely(!shadow_req)) if (unlikely(!shadow_req))
...@@ -2635,27 +3163,39 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, ...@@ -2635,27 +3163,39 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN); shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs); refcount_dec(&shadow_req->refs);
} }
shadow_req->sequence = s.sequence; shadow_req->sequence = req->submit.sequence;
} }
out: out:
if (unlikely(mm_fault)) { req->submit.ring_file = ring_file;
io_cqring_add_event(ctx, s.sqe->user_data, req->submit.ring_fd = ring_fd;
-EFAULT); req->submit.has_user = *mm != NULL;
} else { req->submit.in_async = async;
s.has_user = has_user; req->submit.needs_fixed_file = async;
s.needs_lock = true; trace_io_uring_submit_sqe(ctx, req->submit.sqe->user_data,
s.needs_fixed_file = true; true, async);
io_submit_sqe(ctx, &s, statep, &link); io_submit_sqe(req, statep, &link);
submitted++; submitted++;
/*
* If previous wasn't linked and we have a linked command,
* that's the end of the chain. Submit the previous link.
*/
if (!(sqe_flags & IOSQE_IO_LINK) && link) {
io_queue_link_head(link, shadow_req);
link = NULL;
shadow_req = NULL;
} }
} }
if (link) if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req); io_queue_link_head(link, shadow_req);
if (statep) if (statep)
io_submit_state_end(&state); io_submit_state_end(&state);
/* Commit SQ ring head once we've consumed and submitted all SQEs */
io_commit_sqring(ctx);
return submitted; return submitted;
} }
...@@ -2667,15 +3207,15 @@ static int io_sq_thread(void *data) ...@@ -2667,15 +3207,15 @@ static int io_sq_thread(void *data)
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
unsigned inflight; unsigned inflight;
unsigned long timeout; unsigned long timeout;
int ret;
complete(&ctx->sqo_thread_started); complete(&ctx->completions[1]);
old_fs = get_fs(); old_fs = get_fs();
set_fs(USER_DS); set_fs(USER_DS);
timeout = inflight = 0; ret = timeout = inflight = 0;
while (!kthread_should_park()) { while (!kthread_should_park()) {
bool mm_fault = false;
unsigned int to_submit; unsigned int to_submit;
if (inflight) { if (inflight) {
...@@ -2710,13 +3250,21 @@ static int io_sq_thread(void *data) ...@@ -2710,13 +3250,21 @@ static int io_sq_thread(void *data)
} }
to_submit = io_sqring_entries(ctx); to_submit = io_sqring_entries(ctx);
if (!to_submit) {
/*
* If submit got -EBUSY, flag us as needing the application
* to enter the kernel to reap and flush events.
*/
if (!to_submit || ret == -EBUSY) {
/* /*
* We're polling. If we're within the defined idle * We're polling. If we're within the defined idle
* period, then let us spin without work before going * period, then let us spin without work before going
* to sleep. * to sleep. The exception is if we got EBUSY doing
* more IO, we should wait for the application to
* reap events and wake us up.
*/ */
if (inflight || !time_after(jiffies, timeout)) { if (inflight ||
(!time_after(jiffies, timeout) && ret != -EBUSY)) {
cond_resched(); cond_resched();
continue; continue;
} }
...@@ -2742,7 +3290,7 @@ static int io_sq_thread(void *data) ...@@ -2742,7 +3290,7 @@ static int io_sq_thread(void *data)
smp_mb(); smp_mb();
to_submit = io_sqring_entries(ctx); to_submit = io_sqring_entries(ctx);
if (!to_submit) { if (!to_submit || ret == -EBUSY) {
if (kthread_should_park()) { if (kthread_should_park()) {
finish_wait(&ctx->sqo_wait, &wait); finish_wait(&ctx->sqo_wait, &wait);
break; break;
...@@ -2760,21 +3308,10 @@ static int io_sq_thread(void *data) ...@@ -2760,21 +3308,10 @@ static int io_sq_thread(void *data)
ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
} }
/* Unless all new commands are FIXED regions, grab mm */
if (!cur_mm) {
mm_fault = !mmget_not_zero(ctx->sqo_mm);
if (!mm_fault) {
use_mm(ctx->sqo_mm);
cur_mm = ctx->sqo_mm;
}
}
to_submit = min(to_submit, ctx->sq_entries); to_submit = min(to_submit, ctx->sq_entries);
inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL, ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
mm_fault); if (ret > 0)
inflight += ret;
/* Commit SQ ring head once we've consumed all SQEs */
io_commit_sqring(ctx);
} }
set_fs(old_fs); set_fs(old_fs);
...@@ -2788,65 +3325,6 @@ static int io_sq_thread(void *data) ...@@ -2788,65 +3325,6 @@ static int io_sq_thread(void *data)
return 0; return 0;
} }
static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
struct io_kiocb *shadow_req = NULL;
bool prev_was_link = false;
int i, submit = 0;
if (to_submit > IO_PLUG_THRESHOLD) {
io_submit_state_start(&state, ctx, to_submit);
statep = &state;
}
for (i = 0; i < to_submit; i++) {
struct sqe_submit s;
if (!io_get_sqring(ctx, &s))
break;
/*
* If previous wasn't linked and we have a linked command,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
io_queue_link_head(ctx, link, &link->submit, shadow_req);
link = NULL;
shadow_req = NULL;
}
prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
if (!shadow_req) {
shadow_req = io_get_req(ctx, NULL);
if (unlikely(!shadow_req))
goto out;
shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
refcount_dec(&shadow_req->refs);
}
shadow_req->sequence = s.sequence;
}
out:
s.has_user = true;
s.needs_lock = false;
s.needs_fixed_file = false;
submit++;
io_submit_sqe(ctx, &s, statep, &link);
}
if (link)
io_queue_link_head(ctx, link, &link->submit, shadow_req);
if (statep)
io_submit_state_end(statep);
io_commit_sqring(ctx);
return submit;
}
struct io_wait_queue { struct io_wait_queue {
struct wait_queue_entry wq; struct wait_queue_entry wq;
struct io_ring_ctx *ctx; struct io_ring_ctx *ctx;
...@@ -2854,7 +3332,7 @@ struct io_wait_queue { ...@@ -2854,7 +3332,7 @@ struct io_wait_queue {
unsigned nr_timeouts; unsigned nr_timeouts;
}; };
static inline bool io_should_wake(struct io_wait_queue *iowq) static inline bool io_should_wake(struct io_wait_queue *iowq, bool noflush)
{ {
struct io_ring_ctx *ctx = iowq->ctx; struct io_ring_ctx *ctx = iowq->ctx;
...@@ -2863,7 +3341,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq) ...@@ -2863,7 +3341,7 @@ static inline bool io_should_wake(struct io_wait_queue *iowq)
* started waiting. For timeouts, we always want to return to userspace, * started waiting. For timeouts, we always want to return to userspace,
* regardless of event count. * regardless of event count.
*/ */
return io_cqring_events(ctx->rings) >= iowq->to_wait || return io_cqring_events(ctx, noflush) >= iowq->to_wait ||
atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts; atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
} }
...@@ -2873,7 +3351,8 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode, ...@@ -2873,7 +3351,8 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
wq); wq);
if (!io_should_wake(iowq)) /* use noflush == true, as we can't safely rely on locking context */
if (!io_should_wake(iowq, true))
return -1; return -1;
return autoremove_wake_function(curr, mode, wake_flags, key); return autoremove_wake_function(curr, mode, wake_flags, key);
...@@ -2896,9 +3375,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -2896,9 +3375,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
.to_wait = min_events, .to_wait = min_events,
}; };
struct io_rings *rings = ctx->rings; struct io_rings *rings = ctx->rings;
int ret; int ret = 0;
if (io_cqring_events(rings) >= min_events) if (io_cqring_events(ctx, false) >= min_events)
return 0; return 0;
if (sig) { if (sig) {
...@@ -2914,24 +3393,22 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, ...@@ -2914,24 +3393,22 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret; return ret;
} }
ret = 0;
iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
trace_io_uring_cqring_wait(ctx, min_events);
do { do {
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
TASK_INTERRUPTIBLE); TASK_INTERRUPTIBLE);
if (io_should_wake(&iowq)) if (io_should_wake(&iowq, false))
break; break;
schedule(); schedule();
if (signal_pending(current)) { if (signal_pending(current)) {
ret = -ERESTARTSYS; ret = -EINTR;
break; break;
} }
} while (1); } while (1);
finish_wait(&ctx->wait, &iowq.wq); finish_wait(&ctx->wait, &iowq.wq);
restore_saved_sigmask_unless(ret == -ERESTARTSYS); restore_saved_sigmask_unless(ret == -EINTR);
if (ret == -ERESTARTSYS)
ret = -EINTR;
return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
} }
...@@ -2949,19 +3426,29 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx) ...@@ -2949,19 +3426,29 @@ static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
#else #else
int i; int i;
for (i = 0; i < ctx->nr_user_files; i++) for (i = 0; i < ctx->nr_user_files; i++) {
fput(ctx->user_files[i]); struct file *file;
file = io_file_from_index(ctx, i);
if (file)
fput(file);
}
#endif #endif
} }
static int io_sqe_files_unregister(struct io_ring_ctx *ctx) static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{ {
if (!ctx->user_files) unsigned nr_tables, i;
if (!ctx->file_table)
return -ENXIO; return -ENXIO;
__io_sqe_files_unregister(ctx); __io_sqe_files_unregister(ctx);
kfree(ctx->user_files); nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
ctx->user_files = NULL; for (i = 0; i < nr_tables; i++)
kfree(ctx->file_table[i].files);
kfree(ctx->file_table);
ctx->file_table = NULL;
ctx->nr_user_files = 0; ctx->nr_user_files = 0;
return 0; return 0;
} }
...@@ -2969,7 +3456,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx) ...@@ -2969,7 +3456,7 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
static void io_sq_thread_stop(struct io_ring_ctx *ctx) static void io_sq_thread_stop(struct io_ring_ctx *ctx)
{ {
if (ctx->sqo_thread) { if (ctx->sqo_thread) {
wait_for_completion(&ctx->sqo_thread_started); wait_for_completion(&ctx->completions[1]);
/* /*
* The park is a bit of a work-around, without it we get * The park is a bit of a work-around, without it we get
* warning spews on shutdown with SQPOLL set and affinity * warning spews on shutdown with SQPOLL set and affinity
...@@ -2983,15 +3470,11 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx) ...@@ -2983,15 +3470,11 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
static void io_finish_async(struct io_ring_ctx *ctx) static void io_finish_async(struct io_ring_ctx *ctx)
{ {
int i;
io_sq_thread_stop(ctx); io_sq_thread_stop(ctx);
for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) { if (ctx->io_wq) {
if (ctx->sqo_wq[i]) { io_wq_destroy(ctx->io_wq);
destroy_workqueue(ctx->sqo_wq[i]); ctx->io_wq = NULL;
ctx->sqo_wq[i] = NULL;
}
} }
} }
...@@ -2999,11 +3482,9 @@ static void io_finish_async(struct io_ring_ctx *ctx) ...@@ -2999,11 +3482,9 @@ static void io_finish_async(struct io_ring_ctx *ctx)
static void io_destruct_skb(struct sk_buff *skb) static void io_destruct_skb(struct sk_buff *skb)
{ {
struct io_ring_ctx *ctx = skb->sk->sk_user_data; struct io_ring_ctx *ctx = skb->sk->sk_user_data;
int i;
for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) if (ctx->io_wq)
if (ctx->sqo_wq[i]) io_wq_flush(ctx->io_wq);
flush_workqueue(ctx->sqo_wq[i]);
unix_destruct_scm(skb); unix_destruct_scm(skb);
} }
...@@ -3018,7 +3499,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) ...@@ -3018,7 +3499,7 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
struct sock *sk = ctx->ring_sock->sk; struct sock *sk = ctx->ring_sock->sk;
struct scm_fp_list *fpl; struct scm_fp_list *fpl;
struct sk_buff *skb; struct sk_buff *skb;
int i; int i, nr_files;
if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) { if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
unsigned long inflight = ctx->user->unix_inflight + nr; unsigned long inflight = ctx->user->unix_inflight + nr;
...@@ -3038,21 +3519,33 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset) ...@@ -3038,21 +3519,33 @@ static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
} }
skb->sk = sk; skb->sk = sk;
skb->destructor = io_destruct_skb;
nr_files = 0;
fpl->user = get_uid(ctx->user); fpl->user = get_uid(ctx->user);
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
fpl->fp[i] = get_file(ctx->user_files[i + offset]); struct file *file = io_file_from_index(ctx, i + offset);
unix_inflight(fpl->user, fpl->fp[i]);
if (!file)
continue;
fpl->fp[nr_files] = get_file(file);
unix_inflight(fpl->user, fpl->fp[nr_files]);
nr_files++;
} }
fpl->max = fpl->count = nr; if (nr_files) {
fpl->max = SCM_MAX_FD;
fpl->count = nr_files;
UNIXCB(skb).fp = fpl; UNIXCB(skb).fp = fpl;
skb->destructor = io_destruct_skb;
refcount_add(skb->truesize, &sk->sk_wmem_alloc); refcount_add(skb->truesize, &sk->sk_wmem_alloc);
skb_queue_head(&sk->sk_receive_queue, skb); skb_queue_head(&sk->sk_receive_queue, skb);
for (i = 0; i < nr; i++) for (i = 0; i < nr_files; i++)
fput(fpl->fp[i]); fput(fpl->fp[i]);
} else {
kfree_skb(skb);
kfree(fpl);
}
return 0; return 0;
} }
...@@ -3083,7 +3576,10 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx) ...@@ -3083,7 +3576,10 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
return 0; return 0;
while (total < ctx->nr_user_files) { while (total < ctx->nr_user_files) {
fput(ctx->user_files[total]); struct file *file = io_file_from_index(ctx, total);
if (file)
fput(file);
total++; total++;
} }
...@@ -3096,33 +3592,79 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx) ...@@ -3096,33 +3592,79 @@ static int io_sqe_files_scm(struct io_ring_ctx *ctx)
} }
#endif #endif
static int io_sqe_alloc_file_tables(struct io_ring_ctx *ctx, unsigned nr_tables,
unsigned nr_files)
{
int i;
for (i = 0; i < nr_tables; i++) {
struct fixed_file_table *table = &ctx->file_table[i];
unsigned this_files;
this_files = min(nr_files, IORING_MAX_FILES_TABLE);
table->files = kcalloc(this_files, sizeof(struct file *),
GFP_KERNEL);
if (!table->files)
break;
nr_files -= this_files;
}
if (i == nr_tables)
return 0;
for (i = 0; i < nr_tables; i++) {
struct fixed_file_table *table = &ctx->file_table[i];
kfree(table->files);
}
return 1;
}
static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args) unsigned nr_args)
{ {
__s32 __user *fds = (__s32 __user *) arg; __s32 __user *fds = (__s32 __user *) arg;
unsigned nr_tables;
int fd, ret = 0; int fd, ret = 0;
unsigned i; unsigned i;
if (ctx->user_files) if (ctx->file_table)
return -EBUSY; return -EBUSY;
if (!nr_args) if (!nr_args)
return -EINVAL; return -EINVAL;
if (nr_args > IORING_MAX_FIXED_FILES) if (nr_args > IORING_MAX_FIXED_FILES)
return -EMFILE; return -EMFILE;
ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL); nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
if (!ctx->user_files) ctx->file_table = kcalloc(nr_tables, sizeof(struct fixed_file_table),
GFP_KERNEL);
if (!ctx->file_table)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < nr_args; i++) { if (io_sqe_alloc_file_tables(ctx, nr_tables, nr_args)) {
kfree(ctx->file_table);
ctx->file_table = NULL;
return -ENOMEM;
}
for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
struct fixed_file_table *table;
unsigned index;
ret = -EFAULT; ret = -EFAULT;
if (copy_from_user(&fd, &fds[i], sizeof(fd))) if (copy_from_user(&fd, &fds[i], sizeof(fd)))
break; break;
/* allow sparse sets */
if (fd == -1) {
ret = 0;
continue;
}
ctx->user_files[i] = fget(fd); table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
index = i & IORING_FILE_TABLE_MASK;
table->files[index] = fget(fd);
ret = -EBADF; ret = -EBADF;
if (!ctx->user_files[i]) if (!table->files[index])
break; break;
/* /*
* Don't allow io_uring instances to be registered. If UNIX * Don't allow io_uring instances to be registered. If UNIX
...@@ -3131,20 +3673,26 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -3131,20 +3673,26 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
* handle it just fine, but there's still no point in allowing * handle it just fine, but there's still no point in allowing
* a ring fd as it doesn't support regular read/write anyway. * a ring fd as it doesn't support regular read/write anyway.
*/ */
if (ctx->user_files[i]->f_op == &io_uring_fops) { if (table->files[index]->f_op == &io_uring_fops) {
fput(ctx->user_files[i]); fput(table->files[index]);
break; break;
} }
ctx->nr_user_files++;
ret = 0; ret = 0;
} }
if (ret) { if (ret) {
for (i = 0; i < ctx->nr_user_files; i++) for (i = 0; i < ctx->nr_user_files; i++) {
fput(ctx->user_files[i]); struct file *file;
kfree(ctx->user_files); file = io_file_from_index(ctx, i);
ctx->user_files = NULL; if (file)
fput(file);
}
for (i = 0; i < nr_tables; i++)
kfree(ctx->file_table[i].files);
kfree(ctx->file_table);
ctx->file_table = NULL;
ctx->nr_user_files = 0; ctx->nr_user_files = 0;
return ret; return ret;
} }
...@@ -3156,9 +3704,201 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, ...@@ -3156,9 +3704,201 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
return ret; return ret;
} }
static void io_sqe_file_unregister(struct io_ring_ctx *ctx, int index)
{
#if defined(CONFIG_UNIX)
struct file *file = io_file_from_index(ctx, index);
struct sock *sock = ctx->ring_sock->sk;
struct sk_buff_head list, *head = &sock->sk_receive_queue;
struct sk_buff *skb;
int i;
__skb_queue_head_init(&list);
/*
* Find the skb that holds this file in its SCM_RIGHTS. When found,
* remove this entry and rearrange the file array.
*/
skb = skb_dequeue(head);
while (skb) {
struct scm_fp_list *fp;
fp = UNIXCB(skb).fp;
for (i = 0; i < fp->count; i++) {
int left;
if (fp->fp[i] != file)
continue;
unix_notinflight(fp->user, fp->fp[i]);
left = fp->count - 1 - i;
if (left) {
memmove(&fp->fp[i], &fp->fp[i + 1],
left * sizeof(struct file *));
}
fp->count--;
if (!fp->count) {
kfree_skb(skb);
skb = NULL;
} else {
__skb_queue_tail(&list, skb);
}
fput(file);
file = NULL;
break;
}
if (!file)
break;
__skb_queue_tail(&list, skb);
skb = skb_dequeue(head);
}
if (skb_peek(&list)) {
spin_lock_irq(&head->lock);
while ((skb = __skb_dequeue(&list)) != NULL)
__skb_queue_tail(head, skb);
spin_unlock_irq(&head->lock);
}
#else
fput(io_file_from_index(ctx, index));
#endif
}
static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
int index)
{
#if defined(CONFIG_UNIX)
struct sock *sock = ctx->ring_sock->sk;
struct sk_buff_head *head = &sock->sk_receive_queue;
struct sk_buff *skb;
/*
* See if we can merge this file into an existing skb SCM_RIGHTS
* file set. If there's no room, fall back to allocating a new skb
* and filling it in.
*/
spin_lock_irq(&head->lock);
skb = skb_peek(head);
if (skb) {
struct scm_fp_list *fpl = UNIXCB(skb).fp;
if (fpl->count < SCM_MAX_FD) {
__skb_unlink(skb, head);
spin_unlock_irq(&head->lock);
fpl->fp[fpl->count] = get_file(file);
unix_inflight(fpl->user, fpl->fp[fpl->count]);
fpl->count++;
spin_lock_irq(&head->lock);
__skb_queue_head(head, skb);
} else {
skb = NULL;
}
}
spin_unlock_irq(&head->lock);
if (skb) {
fput(file);
return 0;
}
return __io_sqe_files_scm(ctx, 1, index);
#else
return 0;
#endif
}
static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
unsigned nr_args)
{
struct io_uring_files_update up;
__s32 __user *fds;
int fd, i, err;
__u32 done;
if (!ctx->file_table)
return -ENXIO;
if (!nr_args)
return -EINVAL;
if (copy_from_user(&up, arg, sizeof(up)))
return -EFAULT;
if (check_add_overflow(up.offset, nr_args, &done))
return -EOVERFLOW;
if (done > ctx->nr_user_files)
return -EINVAL;
done = 0;
fds = (__s32 __user *) up.fds;
while (nr_args) {
struct fixed_file_table *table;
unsigned index;
err = 0;
if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
err = -EFAULT;
break;
}
i = array_index_nospec(up.offset, ctx->nr_user_files);
table = &ctx->file_table[i >> IORING_FILE_TABLE_SHIFT];
index = i & IORING_FILE_TABLE_MASK;
if (table->files[index]) {
io_sqe_file_unregister(ctx, i);
table->files[index] = NULL;
}
if (fd != -1) {
struct file *file;
file = fget(fd);
if (!file) {
err = -EBADF;
break;
}
/*
* Don't allow io_uring instances to be registered. If
* UNIX isn't enabled, then this causes a reference
* cycle and this instance can never get freed. If UNIX
* is enabled we'll handle it just fine, but there's
* still no point in allowing a ring fd as it doesn't
* support regular read/write anyway.
*/
if (file->f_op == &io_uring_fops) {
fput(file);
err = -EBADF;
break;
}
table->files[index] = file;
err = io_sqe_file_register(ctx, file, i);
if (err)
break;
}
nr_args--;
done++;
up.offset++;
}
return done ? done : err;
}
static void io_put_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
io_put_req(req);
}
static void io_get_work(struct io_wq_work *work)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
refcount_inc(&req->refs);
}
static int io_sq_offload_start(struct io_ring_ctx *ctx, static int io_sq_offload_start(struct io_ring_ctx *ctx,
struct io_uring_params *p) struct io_uring_params *p)
{ {
unsigned concurrency;
int ret; int ret;
init_waitqueue_head(&ctx->sqo_wait); init_waitqueue_head(&ctx->sqo_wait);
...@@ -3202,26 +3942,13 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, ...@@ -3202,26 +3942,13 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
goto err; goto err;
} }
/* Do QD, or 2 * CPUS, whatever is smallest */ /* Do QD, or 4 * CPUS, whatever is smallest */
ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq", concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
WQ_UNBOUND | WQ_FREEZABLE, ctx->io_wq = io_wq_create(concurrency, ctx->sqo_mm, ctx->user,
min(ctx->sq_entries - 1, 2 * num_online_cpus())); io_get_work, io_put_work);
if (!ctx->sqo_wq[0]) { if (IS_ERR(ctx->io_wq)) {
ret = -ENOMEM; ret = PTR_ERR(ctx->io_wq);
goto err; ctx->io_wq = NULL;
}
/*
* This is for buffered writes, where we want to limit the parallelism
* due to file locking in file systems. As "normal" buffered writes
* should parellelize on writeout quite nicely, limit us to having 2
* pending. This avoids massive contention on the inode when doing
* buffered async writes.
*/
ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
WQ_UNBOUND | WQ_FREEZABLE, 2);
if (!ctx->sqo_wq[1]) {
ret = -ENOMEM;
goto err; goto err;
} }
...@@ -3567,6 +4294,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx) ...@@ -3567,6 +4294,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_unaccount_mem(ctx->user, io_unaccount_mem(ctx->user,
ring_pages(ctx->sq_entries, ctx->cq_entries)); ring_pages(ctx->sq_entries, ctx->cq_entries));
free_uid(ctx->user); free_uid(ctx->user);
kfree(ctx->completions);
kmem_cache_free(req_cachep, ctx->fallback_req);
kfree(ctx); kfree(ctx);
} }
...@@ -3605,8 +4334,15 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) ...@@ -3605,8 +4334,15 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_kill_timeouts(ctx); io_kill_timeouts(ctx);
io_poll_remove_all(ctx); io_poll_remove_all(ctx);
if (ctx->io_wq)
io_wq_cancel_all(ctx->io_wq);
io_iopoll_reap_events(ctx); io_iopoll_reap_events(ctx);
wait_for_completion(&ctx->ctx_done); /* if we failed setting up the ctx, we might not have any rings */
if (ctx->rings)
io_cqring_overflow_flush(ctx, true);
wait_for_completion(&ctx->completions[0]);
io_ring_ctx_free(ctx); io_ring_ctx_free(ctx);
} }
...@@ -3619,6 +4355,53 @@ static int io_uring_release(struct inode *inode, struct file *file) ...@@ -3619,6 +4355,53 @@ static int io_uring_release(struct inode *inode, struct file *file)
return 0; return 0;
} }
static void io_uring_cancel_files(struct io_ring_ctx *ctx,
struct files_struct *files)
{
struct io_kiocb *req;
DEFINE_WAIT(wait);
while (!list_empty_careful(&ctx->inflight_list)) {
struct io_kiocb *cancel_req = NULL;
spin_lock_irq(&ctx->inflight_lock);
list_for_each_entry(req, &ctx->inflight_list, inflight_entry) {
if (req->work.files != files)
continue;
/* req is being completed, ignore */
if (!refcount_inc_not_zero(&req->refs))
continue;
cancel_req = req;
break;
}
if (cancel_req)
prepare_to_wait(&ctx->inflight_wait, &wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&ctx->inflight_lock);
/* We need to keep going until we don't find a matching req */
if (!cancel_req)
break;
io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
io_put_req(cancel_req);
schedule();
}
finish_wait(&ctx->inflight_wait, &wait);
}
static int io_uring_flush(struct file *file, void *data)
{
struct io_ring_ctx *ctx = file->private_data;
io_uring_cancel_files(ctx, data);
if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
io_cqring_overflow_flush(ctx, true);
io_wq_cancel_all(ctx->io_wq);
}
return 0;
}
static int io_uring_mmap(struct file *file, struct vm_area_struct *vma) static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
{ {
loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT; loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
...@@ -3680,14 +4463,20 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -3680,14 +4463,20 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
*/ */
ret = 0; ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) { if (ctx->flags & IORING_SETUP_SQPOLL) {
if (!list_empty_careful(&ctx->cq_overflow_list))
io_cqring_overflow_flush(ctx, false);
if (flags & IORING_ENTER_SQ_WAKEUP) if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sqo_wait); wake_up(&ctx->sqo_wait);
submitted = to_submit; submitted = to_submit;
} else if (to_submit) { } else if (to_submit) {
to_submit = min(to_submit, ctx->sq_entries); struct mm_struct *cur_mm;
to_submit = min(to_submit, ctx->sq_entries);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
submitted = io_ring_submit(ctx, to_submit); /* already have mm, so io_submit_sqes() won't try to grab it */
cur_mm = ctx->sqo_mm;
submitted = io_submit_sqes(ctx, to_submit, f.file, fd,
&cur_mm, false);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
if (flags & IORING_ENTER_GETEVENTS) { if (flags & IORING_ENTER_GETEVENTS) {
...@@ -3710,6 +4499,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -3710,6 +4499,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
static const struct file_operations io_uring_fops = { static const struct file_operations io_uring_fops = {
.release = io_uring_release, .release = io_uring_release,
.flush = io_uring_flush,
.mmap = io_uring_mmap, .mmap = io_uring_mmap,
.poll = io_uring_poll, .poll = io_uring_poll,
.fasync = io_uring_fasync, .fasync = io_uring_fasync,
...@@ -3809,10 +4599,23 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) ...@@ -3809,10 +4599,23 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
* Use twice as many entries for the CQ ring. It's possible for the * Use twice as many entries for the CQ ring. It's possible for the
* application to drive a higher depth than the size of the SQ ring, * application to drive a higher depth than the size of the SQ ring,
* since the sqes are only used at submission time. This allows for * since the sqes are only used at submission time. This allows for
* some flexibility in overcommitting a bit. * some flexibility in overcommitting a bit. If the application has
* set IORING_SETUP_CQSIZE, it will have passed in the desired number
* of CQ ring entries manually.
*/ */
p->sq_entries = roundup_pow_of_two(entries); p->sq_entries = roundup_pow_of_two(entries);
if (p->flags & IORING_SETUP_CQSIZE) {
/*
* If IORING_SETUP_CQSIZE is set, we do the same roundup
* to a power-of-two, if it isn't already. We do NOT impose
* any cq vs sq ring sizing.
*/
if (p->cq_entries < p->sq_entries || p->cq_entries > IORING_MAX_CQ_ENTRIES)
return -EINVAL;
p->cq_entries = roundup_pow_of_two(p->cq_entries);
} else {
p->cq_entries = 2 * p->sq_entries; p->cq_entries = 2 * p->sq_entries;
}
user = get_uid(current_user()); user = get_uid(current_user());
account_mem = !capable(CAP_IPC_LOCK); account_mem = !capable(CAP_IPC_LOCK);
...@@ -3871,7 +4674,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p) ...@@ -3871,7 +4674,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
if (ret < 0) if (ret < 0)
goto err; goto err;
p->features = IORING_FEAT_SINGLE_MMAP; p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP;
trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
return ret; return ret;
err: err:
io_ring_ctx_wait_and_kill(ctx); io_ring_ctx_wait_and_kill(ctx);
...@@ -3897,7 +4701,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) ...@@ -3897,7 +4701,7 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
} }
if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
IORING_SETUP_SQ_AFF)) IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE))
return -EINVAL; return -EINVAL;
ret = io_uring_create(entries, &p); ret = io_uring_create(entries, &p);
...@@ -3941,7 +4745,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ...@@ -3941,7 +4745,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
* no new references will come in after we've killed the percpu ref. * no new references will come in after we've killed the percpu ref.
*/ */
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
wait_for_completion(&ctx->ctx_done); wait_for_completion(&ctx->completions[0]);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
switch (opcode) { switch (opcode) {
...@@ -3963,6 +4767,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ...@@ -3963,6 +4767,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break; break;
ret = io_sqe_files_unregister(ctx); ret = io_sqe_files_unregister(ctx);
break; break;
case IORING_REGISTER_FILES_UPDATE:
ret = io_sqe_files_update(ctx, arg, nr_args);
break;
case IORING_REGISTER_EVENTFD: case IORING_REGISTER_EVENTFD:
ret = -EINVAL; ret = -EINVAL;
if (nr_args != 1) if (nr_args != 1)
...@@ -3981,7 +4788,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ...@@ -3981,7 +4788,7 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
} }
/* bring the ctx back to life */ /* bring the ctx back to life */
reinit_completion(&ctx->ctx_done); reinit_completion(&ctx->completions[0]);
percpu_ref_reinit(&ctx->refs); percpu_ref_reinit(&ctx->refs);
return ret; return ret;
} }
...@@ -4006,6 +4813,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, ...@@ -4006,6 +4813,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
ret = __io_uring_register(ctx, opcode, arg, nr_args); ret = __io_uring_register(ctx, opcode, arg, nr_args);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
ctx->cq_ev_fd != NULL, ret);
out_fput: out_fput:
fdput(f); fdput(f);
return ret; return ret;
......
...@@ -1027,6 +1027,7 @@ header-test- += trace/events/fsi_master_gpio.h ...@@ -1027,6 +1027,7 @@ header-test- += trace/events/fsi_master_gpio.h
header-test- += trace/events/huge_memory.h header-test- += trace/events/huge_memory.h
header-test- += trace/events/ib_mad.h header-test- += trace/events/ib_mad.h
header-test- += trace/events/ib_umad.h header-test- += trace/events/ib_umad.h
header-test- += trace/events/io_uring.h
header-test- += trace/events/iscsi.h header-test- += trace/events/iscsi.h
header-test- += trace/events/jbd2.h header-test- += trace/events/jbd2.h
header-test- += trace/events/kvm.h header-test- += trace/events/kvm.h
......
...@@ -1468,6 +1468,7 @@ extern struct pid *cad_pid; ...@@ -1468,6 +1468,7 @@ extern struct pid *cad_pid;
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
#define PF_IO_WORKER 0x20000000 /* Task is an IO worker */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
......
...@@ -392,6 +392,9 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size, ...@@ -392,6 +392,9 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len, extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr, unsigned int flags, struct sockaddr __user *addr,
int addr_len); int addr_len);
extern int __sys_accept4_file(struct file *file, unsigned file_flags,
struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags); int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol); extern int __sys_socket(int family, int type, int protocol);
......
/* SPDX-License-Identifier: GPL-2.0 */
#undef TRACE_SYSTEM
#define TRACE_SYSTEM io_uring
#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_IO_URING_H
#include <linux/tracepoint.h>
struct io_wq_work;
/**
* io_uring_create - called after a new io_uring context was prepared
*
* @fd: corresponding file descriptor
* @ctx: pointer to a ring context structure
* @sq_entries: actual SQ size
* @cq_entries: actual CQ size
* @flags: SQ ring flags, provided to io_uring_setup(2)
*
* Allows to trace io_uring creation and provide pointer to a context, that can
* be used later to find correlated events.
*/
TRACE_EVENT(io_uring_create,
TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
TP_STRUCT__entry (
__field( int, fd )
__field( void *, ctx )
__field( u32, sq_entries )
__field( u32, cq_entries )
__field( u32, flags )
),
TP_fast_assign(
__entry->fd = fd;
__entry->ctx = ctx;
__entry->sq_entries = sq_entries;
__entry->cq_entries = cq_entries;
__entry->flags = flags;
),
TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
__entry->ctx, __entry->fd, __entry->sq_entries,
__entry->cq_entries, __entry->flags)
);
/**
* io_uring_register - called after a buffer/file/eventfd was succesfully
* registered for a ring
*
* @ctx: pointer to a ring context structure
* @opcode: describes which operation to perform
* @nr_user_files: number of registered files
* @nr_user_bufs: number of registered buffers
* @cq_ev_fd: whether eventfs registered or not
* @ret: return code
*
* Allows to trace fixed files/buffers/eventfds, that could be registered to
* avoid an overhead of getting references to them for every operation. This
* event, together with io_uring_file_get, can provide a full picture of how
* much overhead one can reduce via fixing.
*/
TRACE_EVENT(io_uring_register,
TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
unsigned nr_bufs, bool eventfd, long ret),
TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
TP_STRUCT__entry (
__field( void *, ctx )
__field( unsigned, opcode )
__field( unsigned, nr_files )
__field( unsigned, nr_bufs )
__field( bool, eventfd )
__field( long, ret )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->opcode = opcode;
__entry->nr_files = nr_files;
__entry->nr_bufs = nr_bufs;
__entry->eventfd = eventfd;
__entry->ret = ret;
),
TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
"eventfd %d, ret %ld",
__entry->ctx, __entry->opcode, __entry->nr_files,
__entry->nr_bufs, __entry->eventfd, __entry->ret)
);
/**
* io_uring_file_get - called before getting references to an SQE file
*
* @ctx: pointer to a ring context structure
* @fd: SQE file descriptor
*
* Allows to trace out how often an SQE file reference is obtained, which can
* help figuring out if it makes sense to use fixed files, or check that fixed
* files are used correctly.
*/
TRACE_EVENT(io_uring_file_get,
TP_PROTO(void *ctx, int fd),
TP_ARGS(ctx, fd),
TP_STRUCT__entry (
__field( void *, ctx )
__field( int, fd )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->fd = fd;
),
TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
);
/**
* io_uring_queue_async_work - called before submitting a new async work
*
* @ctx: pointer to a ring context structure
* @hashed: type of workqueue, hashed or normal
* @req: pointer to a submitted request
* @work: pointer to a submitted io_wq_work
*
* Allows to trace asynchronous work submission.
*/
TRACE_EVENT(io_uring_queue_async_work,
TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
unsigned int flags),
TP_ARGS(ctx, rw, req, work, flags),
TP_STRUCT__entry (
__field( void *, ctx )
__field( int, rw )
__field( void *, req )
__field( struct io_wq_work *, work )
__field( unsigned int, flags )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->rw = rw;
__entry->req = req;
__entry->work = work;
__entry->flags = flags;
),
TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
__entry->ctx, __entry->req, __entry->flags,
__entry->rw ? "hashed" : "normal", __entry->work)
);
/**
* io_uring_defer_list - called before the io_uring work added into defer_list
*
* @ctx: pointer to a ring context structure
* @req: pointer to a deferred request
* @shadow: whether request is shadow or not
*
* Allows to track deferred requests, to get an insight about what requests are
* not started immediately.
*/
TRACE_EVENT(io_uring_defer,
TP_PROTO(void *ctx, void *req, bool shadow),
TP_ARGS(ctx, req, shadow),
TP_STRUCT__entry (
__field( void *, ctx )
__field( void *, req )
__field( bool, shadow )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
__entry->shadow = shadow;
),
TP_printk("ring %p, request %p%s", __entry->ctx, __entry->req,
__entry->shadow ? ", shadow": "")
);
/**
* io_uring_link - called before the io_uring request added into link_list of
* another request
*
* @ctx: pointer to a ring context structure
* @req: pointer to a linked request
* @target_req: pointer to a previous request, that would contain @req
*
* Allows to track linked requests, to understand dependencies between requests
* and how does it influence their execution flow.
*/
TRACE_EVENT(io_uring_link,
TP_PROTO(void *ctx, void *req, void *target_req),
TP_ARGS(ctx, req, target_req),
TP_STRUCT__entry (
__field( void *, ctx )
__field( void *, req )
__field( void *, target_req )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->req = req;
__entry->target_req = target_req;
),
TP_printk("ring %p, request %p linked after %p",
__entry->ctx, __entry->req, __entry->target_req)
);
/**
* io_uring_cqring_wait - called before start waiting for an available CQE
*
* @ctx: pointer to a ring context structure
* @min_events: minimal number of events to wait for
*
* Allows to track waiting for CQE, so that we can e.g. troubleshoot
* situations, when an application wants to wait for an event, that never
* comes.
*/
TRACE_EVENT(io_uring_cqring_wait,
TP_PROTO(void *ctx, int min_events),
TP_ARGS(ctx, min_events),
TP_STRUCT__entry (
__field( void *, ctx )
__field( int, min_events )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->min_events = min_events;
),
TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
);
/**
* io_uring_fail_link - called before failing a linked request
*
* @req: request, which links were cancelled
* @link: cancelled link
*
* Allows to track linked requests cancellation, to see not only that some work
* was cancelled, but also which request was the reason.
*/
TRACE_EVENT(io_uring_fail_link,
TP_PROTO(void *req, void *link),
TP_ARGS(req, link),
TP_STRUCT__entry (
__field( void *, req )
__field( void *, link )
),
TP_fast_assign(
__entry->req = req;
__entry->link = link;
),
TP_printk("request %p, link %p", __entry->req, __entry->link)
);
/**
* io_uring_complete - called when completing an SQE
*
* @ctx: pointer to a ring context structure
* @user_data: user data associated with the request
* @res: result of the request
*
*/
TRACE_EVENT(io_uring_complete,
TP_PROTO(void *ctx, u64 user_data, long res),
TP_ARGS(ctx, user_data, res),
TP_STRUCT__entry (
__field( void *, ctx )
__field( u64, user_data )
__field( long, res )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->user_data = user_data;
__entry->res = res;
),
TP_printk("ring %p, user_data 0x%llx, result %ld",
__entry->ctx, (unsigned long long)__entry->user_data,
__entry->res)
);
/**
* io_uring_submit_sqe - called before submitting one SQE
*
* @ctx: pointer to a ring context structure
* @user_data: user data associated with the request
* @force_nonblock: whether a context blocking or not
* @sq_thread: true if sq_thread has submitted this SQE
*
* Allows to track SQE submitting, to understand what was the source of it, SQ
* thread or io_uring_enter call.
*/
TRACE_EVENT(io_uring_submit_sqe,
TP_PROTO(void *ctx, u64 user_data, bool force_nonblock, bool sq_thread),
TP_ARGS(ctx, user_data, force_nonblock, sq_thread),
TP_STRUCT__entry (
__field( void *, ctx )
__field( u64, user_data )
__field( bool, force_nonblock )
__field( bool, sq_thread )
),
TP_fast_assign(
__entry->ctx = ctx;
__entry->user_data = user_data;
__entry->force_nonblock = force_nonblock;
__entry->sq_thread = sq_thread;
),
TP_printk("ring %p, user data 0x%llx, non block %d, sq_thread %d",
__entry->ctx, (unsigned long long) __entry->user_data,
__entry->force_nonblock, __entry->sq_thread)
);
#endif /* _TRACE_IO_URING_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
...@@ -19,7 +19,10 @@ struct io_uring_sqe { ...@@ -19,7 +19,10 @@ struct io_uring_sqe {
__u8 flags; /* IOSQE_ flags */ __u8 flags; /* IOSQE_ flags */
__u16 ioprio; /* ioprio for the request */ __u16 ioprio; /* ioprio for the request */
__s32 fd; /* file descriptor to do IO on */ __s32 fd; /* file descriptor to do IO on */
union {
__u64 off; /* offset into file */ __u64 off; /* offset into file */
__u64 addr2;
};
__u64 addr; /* pointer to buffer or iovecs */ __u64 addr; /* pointer to buffer or iovecs */
__u32 len; /* buffer size or number of iovecs */ __u32 len; /* buffer size or number of iovecs */
union { union {
...@@ -29,6 +32,8 @@ struct io_uring_sqe { ...@@ -29,6 +32,8 @@ struct io_uring_sqe {
__u32 sync_range_flags; __u32 sync_range_flags;
__u32 msg_flags; __u32 msg_flags;
__u32 timeout_flags; __u32 timeout_flags;
__u32 accept_flags;
__u32 cancel_flags;
}; };
__u64 user_data; /* data to be passed back at completion time */ __u64 user_data; /* data to be passed back at completion time */
union { union {
...@@ -50,6 +55,7 @@ struct io_uring_sqe { ...@@ -50,6 +55,7 @@ struct io_uring_sqe {
#define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */ #define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */
#define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */ #define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */ #define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
#define IORING_OP_NOP 0 #define IORING_OP_NOP 0
#define IORING_OP_READV 1 #define IORING_OP_READV 1
...@@ -63,12 +69,21 @@ struct io_uring_sqe { ...@@ -63,12 +69,21 @@ struct io_uring_sqe {
#define IORING_OP_SENDMSG 9 #define IORING_OP_SENDMSG 9
#define IORING_OP_RECVMSG 10 #define IORING_OP_RECVMSG 10
#define IORING_OP_TIMEOUT 11 #define IORING_OP_TIMEOUT 11
#define IORING_OP_TIMEOUT_REMOVE 12
#define IORING_OP_ACCEPT 13
#define IORING_OP_ASYNC_CANCEL 14
#define IORING_OP_LINK_TIMEOUT 15
/* /*
* sqe->fsync_flags * sqe->fsync_flags
*/ */
#define IORING_FSYNC_DATASYNC (1U << 0) #define IORING_FSYNC_DATASYNC (1U << 0)
/*
* sqe->timeout_flags
*/
#define IORING_TIMEOUT_ABS (1U << 0)
/* /*
* IO completion data structure (Completion Queue Entry) * IO completion data structure (Completion Queue Entry)
*/ */
...@@ -140,6 +155,7 @@ struct io_uring_params { ...@@ -140,6 +155,7 @@ struct io_uring_params {
* io_uring_params->features flags * io_uring_params->features flags
*/ */
#define IORING_FEAT_SINGLE_MMAP (1U << 0) #define IORING_FEAT_SINGLE_MMAP (1U << 0)
#define IORING_FEAT_NODROP (1U << 1)
/* /*
* io_uring_register(2) opcodes and arguments * io_uring_register(2) opcodes and arguments
...@@ -150,5 +166,11 @@ struct io_uring_params { ...@@ -150,5 +166,11 @@ struct io_uring_params {
#define IORING_UNREGISTER_FILES 3 #define IORING_UNREGISTER_FILES 3
#define IORING_REGISTER_EVENTFD 4 #define IORING_REGISTER_EVENTFD 4
#define IORING_UNREGISTER_EVENTFD 5 #define IORING_UNREGISTER_EVENTFD 5
#define IORING_REGISTER_FILES_UPDATE 6
struct io_uring_files_update {
__u32 offset;
__s32 *fds;
};
#endif #endif
...@@ -1548,6 +1548,7 @@ config AIO ...@@ -1548,6 +1548,7 @@ config AIO
config IO_URING config IO_URING
bool "Enable IO uring support" if EXPERT bool "Enable IO uring support" if EXPERT
select ANON_INODES select ANON_INODES
select IO_WQ
default y default y
help help
This option enables support for the io_uring interface, enabling This option enables support for the io_uring interface, enabling
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include "../workqueue_internal.h" #include "../workqueue_internal.h"
#include "../../fs/io-wq.h"
#include "../smpboot.h" #include "../smpboot.h"
#include "pelt.h" #include "pelt.h"
...@@ -4112,9 +4113,12 @@ static inline void sched_submit_work(struct task_struct *tsk) ...@@ -4112,9 +4113,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
* we disable preemption to avoid it calling schedule() again * we disable preemption to avoid it calling schedule() again
* in the possible wakeup of a kworker. * in the possible wakeup of a kworker.
*/ */
if (tsk->flags & PF_WQ_WORKER) { if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
preempt_disable(); preempt_disable();
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk); wq_worker_sleeping(tsk);
else
io_wq_worker_sleeping(tsk);
preempt_enable_no_resched(); preempt_enable_no_resched();
} }
...@@ -4131,8 +4135,12 @@ static inline void sched_submit_work(struct task_struct *tsk) ...@@ -4131,8 +4135,12 @@ static inline void sched_submit_work(struct task_struct *tsk)
static void sched_update_worker(struct task_struct *tsk) static void sched_update_worker(struct task_struct *tsk)
{ {
if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
if (tsk->flags & PF_WQ_WORKER) if (tsk->flags & PF_WQ_WORKER)
wq_worker_running(tsk); wq_worker_running(tsk);
else
io_wq_worker_running(tsk);
}
} }
asmlinkage __visible void __sched schedule(void) asmlinkage __visible void __sched schedule(void)
......
...@@ -1691,24 +1691,13 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog) ...@@ -1691,24 +1691,13 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
return __sys_listen(fd, backlog); return __sys_listen(fd, backlog);
} }
/* int __sys_accept4_file(struct file *file, unsigned file_flags,
* For accept, we attempt to create a new socket, set up the link struct sockaddr __user *upeer_sockaddr,
* with the client, wake up the client, then return the new
* connected fd. We collect the address of the connector in kernel
* space and move it to user at the very end. This is unclean because
* we open the socket then return an error.
*
* 1003.1g adds the ability to recvmsg() to query connection pending
* status to recvmsg. We need to add that support in a way thats
* clean when we restructure accept also.
*/
int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags) int __user *upeer_addrlen, int flags)
{ {
struct socket *sock, *newsock; struct socket *sock, *newsock;
struct file *newfile; struct file *newfile;
int err, len, newfd, fput_needed; int err, len, newfd;
struct sockaddr_storage address; struct sockaddr_storage address;
if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
...@@ -1717,14 +1706,14 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, ...@@ -1717,14 +1706,14 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
sock = sockfd_lookup_light(fd, &err, &fput_needed); sock = sock_from_file(file, &err);
if (!sock) if (!sock)
goto out; goto out;
err = -ENFILE; err = -ENFILE;
newsock = sock_alloc(); newsock = sock_alloc();
if (!newsock) if (!newsock)
goto out_put; goto out;
newsock->type = sock->type; newsock->type = sock->type;
newsock->ops = sock->ops; newsock->ops = sock->ops;
...@@ -1739,20 +1728,21 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, ...@@ -1739,20 +1728,21 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
if (unlikely(newfd < 0)) { if (unlikely(newfd < 0)) {
err = newfd; err = newfd;
sock_release(newsock); sock_release(newsock);
goto out_put; goto out;
} }
newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name); newfile = sock_alloc_file(newsock, flags, sock->sk->sk_prot_creator->name);
if (IS_ERR(newfile)) { if (IS_ERR(newfile)) {
err = PTR_ERR(newfile); err = PTR_ERR(newfile);
put_unused_fd(newfd); put_unused_fd(newfd);
goto out_put; goto out;
} }
err = security_socket_accept(sock, newsock); err = security_socket_accept(sock, newsock);
if (err) if (err)
goto out_fd; goto out_fd;
err = sock->ops->accept(sock, newsock, sock->file->f_flags, false); err = sock->ops->accept(sock, newsock, sock->file->f_flags | file_flags,
false);
if (err < 0) if (err < 0)
goto out_fd; goto out_fd;
...@@ -1773,15 +1763,42 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, ...@@ -1773,15 +1763,42 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
fd_install(newfd, newfile); fd_install(newfd, newfile);
err = newfd; err = newfd;
out_put:
fput_light(sock->file, fput_needed);
out: out:
return err; return err;
out_fd: out_fd:
fput(newfile); fput(newfile);
put_unused_fd(newfd); put_unused_fd(newfd);
goto out_put; goto out;
}
/*
* For accept, we attempt to create a new socket, set up the link
* with the client, wake up the client, then return the new
* connected fd. We collect the address of the connector in kernel
* space and move it to user at the very end. This is unclean because
* we open the socket then return an error.
*
* 1003.1g adds the ability to recvmsg() to query connection pending
* status to recvmsg. We need to add that support in a way thats
* clean when we restructure accept also.
*/
int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags)
{
int ret = -EBADF;
struct fd f;
f = fdget(fd);
if (f.file) {
ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
upeer_addrlen, flags);
if (f.flags)
fput(f.file);
}
return ret;
} }
SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment