Commit c98c70ed authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'io_uring-6.1-2022-10-13' of git://git.kernel.dk/linux

Pull more io_uring updates from Jens Axboe:
 "A collection of fixes that ended up either being later than the
  initial pull, or dependent on multiple branches (6.0-late being one of
  them) and hence deferred purposely. This contains:

   - Cleanup fixes for the single submitter late 6.0 change, which we
     pushed to 6.1 to keep the 6.0 changes small (Dylan, Pavel)

   - Fix for IORING_OP_CONNECT not handling -EINPROGRESS correctly (me)

   - Ensure that the zc sendmsg variant gets audited correctly (me)

   - Regression fix from this merge window where kiocb_end_write()
     doesn't always gets called, which can cause issues with fs freezing
     (me)

   - Registered files SCM handling fix (Pavel)

   - Regression fix for big sqe dumping in fdinfo (Pavel)

   - Registered buffers accounting fix (Pavel)

   - Remove leftover notification structures, we killed them off late in
     6.0 (Pavel)

   - Minor optimizations (Pavel)

   - Cosmetic variable shadowing fix (Stefan)"

* tag 'io_uring-6.1-2022-10-13' of git://git.kernel.dk/linux:
  io_uring/rw: ensure kiocb_end_write() is always called
  io_uring: fix fdinfo sqe offsets calculation
  io_uring: local variable rw shadows outer variable in io_write
  io_uring/opdef: remove 'audit_skip' from SENDMSG_ZC
  io_uring: optimise locking for local tw with submit_wait
  io_uring: remove redundant memory barrier in io_req_local_work_add
  io_uring/net: handle -EINPROGRESS correct for IORING_OP_CONNECT
  io_uring: remove notif leftovers
  io_uring: correct pinned_vm accounting
  io_uring/af_unix: defer registered files gc to io_uring release
  io_uring: limit registration w/ SINGLE_ISSUER
  io_uring: remove io_register_submitter
  io_uring: simplify __io_uring_add_tctx_node
parents 6d84c258 2ec33a6c
...@@ -34,9 +34,6 @@ struct io_file_table { ...@@ -34,9 +34,6 @@ struct io_file_table {
unsigned int alloc_hint; unsigned int alloc_hint;
}; };
struct io_notif;
struct io_notif_slot;
struct io_hash_bucket { struct io_hash_bucket {
spinlock_t lock; spinlock_t lock;
struct hlist_head list; struct hlist_head list;
...@@ -242,8 +239,6 @@ struct io_ring_ctx { ...@@ -242,8 +239,6 @@ struct io_ring_ctx {
unsigned nr_user_files; unsigned nr_user_files;
unsigned nr_user_bufs; unsigned nr_user_bufs;
struct io_mapped_ubuf **user_bufs; struct io_mapped_ubuf **user_bufs;
struct io_notif_slot *notif_slots;
unsigned nr_notif_slots;
struct io_submit_state submit_state; struct io_submit_state submit_state;
......
...@@ -803,6 +803,7 @@ typedef unsigned char *sk_buff_data_t; ...@@ -803,6 +803,7 @@ typedef unsigned char *sk_buff_data_t;
* @csum_level: indicates the number of consecutive checksums found in * @csum_level: indicates the number of consecutive checksums found in
* the packet minus one that have been verified as * the packet minus one that have been verified as
* CHECKSUM_UNNECESSARY (max 3) * CHECKSUM_UNNECESSARY (max 3)
* @scm_io_uring: SKB holds io_uring registered files
* @dst_pending_confirm: need to confirm neighbour * @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB * @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required * @slow_gro: state present at GRO time, slower prepare step required
...@@ -982,6 +983,7 @@ struct sk_buff { ...@@ -982,6 +983,7 @@ struct sk_buff {
#endif #endif
__u8 slow_gro:1; __u8 slow_gro:1;
__u8 csum_not_inet:1; __u8 csum_not_inet:1;
__u8 scm_io_uring:1;
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */ __u16 tc_index; /* traffic control index */
......
...@@ -94,7 +94,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, ...@@ -94,7 +94,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]);
if (sq_idx > sq_mask) if (sq_idx > sq_mask)
continue; continue;
sqe = &ctx->sq_sqes[sq_idx << 1]; sqe = &ctx->sq_sqes[sq_idx << sq_shift];
seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, " seq_printf(m, "%5u: opcode:%s, fd:%d, flags:%x, off:%llu, "
"addr:0x%llx, rw_flags:0x%x, buf_index:%d " "addr:0x%llx, rw_flags:0x%x, buf_index:%d "
"user_data:%llu", "user_data:%llu",
......
...@@ -1106,6 +1106,8 @@ static void io_req_local_work_add(struct io_kiocb *req) ...@@ -1106,6 +1106,8 @@ static void io_req_local_work_add(struct io_kiocb *req)
if (!llist_add(&req->io_task_work.node, &ctx->work_llist)) if (!llist_add(&req->io_task_work.node, &ctx->work_llist))
return; return;
/* need it for the following io_cqring_wake() */
smp_mb__after_atomic();
if (unlikely(atomic_read(&req->task->io_uring->in_idle))) { if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
io_move_task_work_from_local(ctx); io_move_task_work_from_local(ctx);
...@@ -1117,8 +1119,7 @@ static void io_req_local_work_add(struct io_kiocb *req) ...@@ -1117,8 +1119,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
if (ctx->has_evfd) if (ctx->has_evfd)
io_eventfd_signal(ctx); io_eventfd_signal(ctx);
io_cqring_wake(ctx); __io_cqring_wake(ctx);
} }
static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local) static inline void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
...@@ -2585,12 +2586,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) ...@@ -2585,12 +2586,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
{ {
io_sq_thread_finish(ctx); io_sq_thread_finish(ctx);
if (ctx->mm_account) {
mmdrop(ctx->mm_account);
ctx->mm_account = NULL;
}
io_rsrc_refs_drop(ctx); io_rsrc_refs_drop(ctx);
/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */ /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
io_wait_rsrc_data(ctx->buf_data); io_wait_rsrc_data(ctx->buf_data);
...@@ -2631,8 +2626,11 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ...@@ -2631,8 +2626,11 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
} }
#endif #endif
WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
WARN_ON_ONCE(ctx->notif_slots || ctx->nr_notif_slots);
if (ctx->mm_account) {
mmdrop(ctx->mm_account);
ctx->mm_account = NULL;
}
io_mem_free(ctx->rings); io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes); io_mem_free(ctx->sq_sqes);
...@@ -3229,8 +3227,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ...@@ -3229,8 +3227,16 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
goto out; goto out;
} }
if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll) if (flags & IORING_ENTER_GETEVENTS) {
if (ctx->syscall_iopoll)
goto iopoll_locked; goto iopoll_locked;
/*
* Ignore errors, we'll soon call io_cqring_wait() and
* it should handle ownership problems if any.
*/
if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
(void)io_run_local_work_locked(ctx);
}
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
...@@ -3355,7 +3361,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file) ...@@ -3355,7 +3361,7 @@ static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
if (fd < 0) if (fd < 0)
return fd; return fd;
ret = __io_uring_add_tctx_node(ctx, false); ret = __io_uring_add_tctx_node(ctx);
if (ret) { if (ret) {
put_unused_fd(fd); put_unused_fd(fd);
return ret; return ret;
...@@ -3890,6 +3896,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, ...@@ -3890,6 +3896,9 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs))) if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
return -ENXIO; return -ENXIO;
if (ctx->submitter_task && ctx->submitter_task != current)
return -EEXIST;
if (ctx->restricted) { if (ctx->restricted) {
if (opcode >= IORING_REGISTER_LAST) if (opcode >= IORING_REGISTER_LAST)
return -EINVAL; return -EINVAL;
......
...@@ -203,17 +203,24 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx) ...@@ -203,17 +203,24 @@ static inline void io_commit_cqring(struct io_ring_ctx *ctx)
smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
} }
static inline void io_cqring_wake(struct io_ring_ctx *ctx) /* requires smb_mb() prior, see wq_has_sleeper() */
static inline void __io_cqring_wake(struct io_ring_ctx *ctx)
{ {
/* /*
* wake_up_all() may seem excessive, but io_wake_function() and * wake_up_all() may seem excessive, but io_wake_function() and
* io_should_wake() handle the termination of the loop and only * io_should_wake() handle the termination of the loop and only
* wake as many waiters as we need to. * wake as many waiters as we need to.
*/ */
if (wq_has_sleeper(&ctx->cq_wait)) if (waitqueue_active(&ctx->cq_wait))
wake_up_all(&ctx->cq_wait); wake_up_all(&ctx->cq_wait);
} }
static inline void io_cqring_wake(struct io_ring_ctx *ctx)
{
smp_mb();
__io_cqring_wake(ctx);
}
static inline bool io_sqring_full(struct io_ring_ctx *ctx) static inline bool io_sqring_full(struct io_ring_ctx *ctx)
{ {
struct io_rings *r = ctx->rings; struct io_rings *r = ctx->rings;
...@@ -268,6 +275,13 @@ static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx) ...@@ -268,6 +275,13 @@ static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx)
return ret; return ret;
} }
static inline int io_run_local_work_locked(struct io_ring_ctx *ctx)
{
if (llist_empty(&ctx->work_llist))
return 0;
return __io_run_local_work(ctx, true);
}
static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked)
{ {
if (!*locked) { if (!*locked) {
......
...@@ -46,6 +46,7 @@ struct io_connect { ...@@ -46,6 +46,7 @@ struct io_connect {
struct file *file; struct file *file;
struct sockaddr __user *addr; struct sockaddr __user *addr;
int addr_len; int addr_len;
bool in_progress;
}; };
struct io_sr_msg { struct io_sr_msg {
...@@ -1386,6 +1387,7 @@ int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -1386,6 +1387,7 @@ int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr)); conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
conn->addr_len = READ_ONCE(sqe->addr2); conn->addr_len = READ_ONCE(sqe->addr2);
conn->in_progress = false;
return 0; return 0;
} }
...@@ -1397,6 +1399,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1397,6 +1399,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
int ret; int ret;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
if (connect->in_progress) {
struct socket *socket;
ret = -ENOTSOCK;
socket = sock_from_file(req->file);
if (socket)
ret = sock_error(socket->sk);
goto out;
}
if (req_has_async_data(req)) { if (req_has_async_data(req)) {
io = req->async_data; io = req->async_data;
} else { } else {
...@@ -1413,6 +1425,9 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1413,6 +1425,9 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
ret = __sys_connect_file(req->file, &io->address, ret = __sys_connect_file(req->file, &io->address,
connect->addr_len, file_flags); connect->addr_len, file_flags);
if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) { if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
if (ret == -EINPROGRESS) {
connect->in_progress = true;
} else {
if (req_has_async_data(req)) if (req_has_async_data(req))
return -EAGAIN; return -EAGAIN;
if (io_alloc_async_data(req)) { if (io_alloc_async_data(req)) {
...@@ -1420,6 +1435,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags) ...@@ -1420,6 +1435,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
goto out; goto out;
} }
memcpy(req->async_data, &__io, sizeof(__io)); memcpy(req->async_data, &__io, sizeof(__io));
}
return -EAGAIN; return -EAGAIN;
} }
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
......
...@@ -510,7 +510,6 @@ const struct io_op_def io_op_defs[] = { ...@@ -510,7 +510,6 @@ const struct io_op_def io_op_defs[] = {
.needs_file = 1, .needs_file = 1,
.unbound_nonreg_file = 1, .unbound_nonreg_file = 1,
.pollout = 1, .pollout = 1,
.audit_skip = 1,
.ioprio = 1, .ioprio = 1,
.manual_alloc = 1, .manual_alloc = 1,
#if defined(CONFIG_NET) #if defined(CONFIG_NET)
......
...@@ -855,6 +855,7 @@ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file) ...@@ -855,6 +855,7 @@ int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
UNIXCB(skb).fp = fpl; UNIXCB(skb).fp = fpl;
skb->sk = sk; skb->sk = sk;
skb->scm_io_uring = 1;
skb->destructor = unix_destruct_scm; skb->destructor = unix_destruct_scm;
refcount_add(skb->truesize, &sk->sk_wmem_alloc); refcount_add(skb->truesize, &sk->sk_wmem_alloc);
} }
......
...@@ -234,11 +234,34 @@ static void kiocb_end_write(struct io_kiocb *req) ...@@ -234,11 +234,34 @@ static void kiocb_end_write(struct io_kiocb *req)
} }
} }
/*
* Trigger the notifications after having done some IO, and finish the write
* accounting, if any.
*/
static void io_req_io_end(struct io_kiocb *req)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
WARN_ON(!in_task());
if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
}
}
static bool __io_complete_rw_common(struct io_kiocb *req, long res) static bool __io_complete_rw_common(struct io_kiocb *req, long res)
{ {
if (unlikely(res != req->cqe.res)) { if (unlikely(res != req->cqe.res)) {
if ((res == -EAGAIN || res == -EOPNOTSUPP) && if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
io_rw_should_reissue(req)) { io_rw_should_reissue(req)) {
/*
* Reissue will start accounting again, finish the
* current cycle.
*/
io_req_io_end(req);
req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
return true; return true;
} }
...@@ -264,15 +287,7 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res) ...@@ -264,15 +287,7 @@ static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
static void io_req_rw_complete(struct io_kiocb *req, bool *locked) static void io_req_rw_complete(struct io_kiocb *req, bool *locked)
{ {
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); io_req_io_end(req);
if (rw->kiocb.ki_flags & IOCB_WRITE) {
kiocb_end_write(req);
fsnotify_modify(req->file);
} else {
fsnotify_access(req->file);
}
io_req_task_complete(req, locked); io_req_task_complete(req, locked);
} }
...@@ -317,6 +332,11 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, ...@@ -317,6 +332,11 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
req->file->f_pos = rw->kiocb.ki_pos; req->file->f_pos = rw->kiocb.ki_pos;
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) { if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
if (!__io_complete_rw_common(req, ret)) { if (!__io_complete_rw_common(req, ret)) {
/*
* Safe to call io_end from here as we're inline
* from the submission path.
*/
io_req_io_end(req);
io_req_set_res(req, final_ret, io_req_set_res(req, final_ret,
io_put_kbuf(req, issue_flags)); io_put_kbuf(req, issue_flags));
return IOU_OK; return IOU_OK;
...@@ -916,7 +936,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) ...@@ -916,7 +936,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
goto copy_iov; goto copy_iov;
if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
struct io_async_rw *rw; struct io_async_rw *io;
trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
req->cqe.res, ret2); req->cqe.res, ret2);
...@@ -929,9 +949,9 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) ...@@ -929,9 +949,9 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
iov_iter_save_state(&s->iter, &s->iter_state); iov_iter_save_state(&s->iter, &s->iter_state);
ret = io_setup_async_rw(req, iovec, s, true); ret = io_setup_async_rw(req, iovec, s, true);
rw = req->async_data; io = req->async_data;
if (rw) if (io)
rw->bytes_done += ret2; io->bytes_done += ret2;
if (kiocb->ki_flags & IOCB_WRITE) if (kiocb->ki_flags & IOCB_WRITE)
kiocb_end_write(req); kiocb_end_write(req);
......
...@@ -91,32 +91,12 @@ __cold int io_uring_alloc_task_context(struct task_struct *task, ...@@ -91,32 +91,12 @@ __cold int io_uring_alloc_task_context(struct task_struct *task,
return 0; return 0;
} }
static int io_register_submitter(struct io_ring_ctx *ctx) int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
{
int ret = 0;
mutex_lock(&ctx->uring_lock);
if (!ctx->submitter_task)
ctx->submitter_task = get_task_struct(current);
else if (ctx->submitter_task != current)
ret = -EEXIST;
mutex_unlock(&ctx->uring_lock);
return ret;
}
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter)
{ {
struct io_uring_task *tctx = current->io_uring; struct io_uring_task *tctx = current->io_uring;
struct io_tctx_node *node; struct io_tctx_node *node;
int ret; int ret;
if ((ctx->flags & IORING_SETUP_SINGLE_ISSUER) && submitter) {
ret = io_register_submitter(ctx);
if (ret)
return ret;
}
if (unlikely(!tctx)) { if (unlikely(!tctx)) {
ret = io_uring_alloc_task_context(current, ctx); ret = io_uring_alloc_task_context(current, ctx);
if (unlikely(ret)) if (unlikely(ret))
...@@ -150,8 +130,22 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter) ...@@ -150,8 +130,22 @@ int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter)
list_add(&node->ctx_node, &ctx->tctx_list); list_add(&node->ctx_node, &ctx->tctx_list);
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
} }
if (submitter) return 0;
tctx->last = ctx; }
int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
{
int ret;
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
&& ctx->submitter_task != current)
return -EEXIST;
ret = __io_uring_add_tctx_node(ctx);
if (ret)
return ret;
current->io_uring->last = ctx;
return 0; return 0;
} }
...@@ -259,7 +253,7 @@ int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, ...@@ -259,7 +253,7 @@ int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
return -EINVAL; return -EINVAL;
mutex_unlock(&ctx->uring_lock); mutex_unlock(&ctx->uring_lock);
ret = __io_uring_add_tctx_node(ctx, false); ret = __io_uring_add_tctx_node(ctx);
mutex_lock(&ctx->uring_lock); mutex_lock(&ctx->uring_lock);
if (ret) if (ret)
return ret; return ret;
......
...@@ -9,7 +9,8 @@ struct io_tctx_node { ...@@ -9,7 +9,8 @@ struct io_tctx_node {
int io_uring_alloc_task_context(struct task_struct *task, int io_uring_alloc_task_context(struct task_struct *task,
struct io_ring_ctx *ctx); struct io_ring_ctx *ctx);
void io_uring_del_tctx_node(unsigned long index); void io_uring_del_tctx_node(unsigned long index);
int __io_uring_add_tctx_node(struct io_ring_ctx *ctx, bool submitter); int __io_uring_add_tctx_node(struct io_ring_ctx *ctx);
int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx);
void io_uring_clean_tctx(struct io_uring_task *tctx); void io_uring_clean_tctx(struct io_uring_task *tctx);
void io_uring_unreg_ringfd(void); void io_uring_unreg_ringfd(void);
...@@ -27,5 +28,6 @@ static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx) ...@@ -27,5 +28,6 @@ static inline int io_uring_add_tctx_node(struct io_ring_ctx *ctx)
if (likely(tctx && tctx->last == ctx)) if (likely(tctx && tctx->last == ctx))
return 0; return 0;
return __io_uring_add_tctx_node(ctx, true);
return __io_uring_add_tctx_node_from_submit(ctx);
} }
...@@ -204,6 +204,7 @@ void wait_for_unix_gc(void) ...@@ -204,6 +204,7 @@ void wait_for_unix_gc(void)
/* The external entry point: unix_gc() */ /* The external entry point: unix_gc() */
void unix_gc(void) void unix_gc(void)
{ {
struct sk_buff *next_skb, *skb;
struct unix_sock *u; struct unix_sock *u;
struct unix_sock *next; struct unix_sock *next;
struct sk_buff_head hitlist; struct sk_buff_head hitlist;
...@@ -297,11 +298,30 @@ void unix_gc(void) ...@@ -297,11 +298,30 @@ void unix_gc(void)
spin_unlock(&unix_gc_lock); spin_unlock(&unix_gc_lock);
/* We need io_uring to clean its registered files, ignore all io_uring
* originated skbs. It's fine as io_uring doesn't keep references to
* other io_uring instances and so killing all other files in the cycle
* will put all io_uring references forcing it to go through normal
* release.path eventually putting registered files.
*/
skb_queue_walk_safe(&hitlist, skb, next_skb) {
if (skb->scm_io_uring) {
__skb_unlink(skb, &hitlist);
skb_queue_tail(&skb->sk->sk_receive_queue, skb);
}
}
/* Here we are. Hitlist is filled. Die. */ /* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist); __skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock); spin_lock(&unix_gc_lock);
/* There could be io_uring registered files, just push them back to
* the inflight list
*/
list_for_each_entry_safe(u, next, &gc_candidates, link)
list_move_tail(&u->link, &gc_inflight_list);
/* All candidates should have been detached by now. */ /* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates)); BUG_ON(!list_empty(&gc_candidates));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment