Commit bc212465 authored by David Howells's avatar David Howells Committed by Jakub Kicinski

rxrpc: Fix a race between socket set up and I/O thread creation

In rxrpc_open_socket(), it sets up the socket and then sets up the I/O
thread that will handle it.  This is a problem, however, as there's a gap
between the two phases in which a packet may come into rxrpc_encap_rcv()
from the UDP packet but we oops when trying to wake the not-yet created I/O
thread.

As a quick fix, just make rxrpc_encap_rcv() discard the packet if there's
no I/O thread yet.

A better, but more intrusive fix would perhaps be to rearrange things such
that the socket creation is done by the I/O thread.

Fixes: a275da62 ("rxrpc: Create a per-local endpoint receive queue and I/O thread")
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: yuxuanzhe@outlook.com
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Simon Horman <horms@kernel.org>
cc: linux-afs@lists.infradead.org
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20241001132702.3122709-2-dhowells@redhat.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 9af25dd9
...@@ -1056,7 +1056,7 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why, ...@@ -1056,7 +1056,7 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
int rxrpc_io_thread(void *data); int rxrpc_io_thread(void *data);
static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local) static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
{ {
wake_up_process(local->io_thread); wake_up_process(READ_ONCE(local->io_thread));
} }
static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why) static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
......
...@@ -27,11 +27,17 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb) ...@@ -27,11 +27,17 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
{ {
struct sk_buff_head *rx_queue; struct sk_buff_head *rx_queue;
struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk); struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
struct task_struct *io_thread;
if (unlikely(!local)) { if (unlikely(!local)) {
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
io_thread = READ_ONCE(local->io_thread);
if (!io_thread) {
kfree_skb(skb);
return 0;
}
if (skb->tstamp == 0) if (skb->tstamp == 0)
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
...@@ -47,7 +53,7 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb) ...@@ -47,7 +53,7 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
#endif #endif
skb_queue_tail(rx_queue, skb); skb_queue_tail(rx_queue, skb);
rxrpc_wake_up_io_thread(local); wake_up_process(io_thread);
return 0; return 0;
} }
...@@ -565,7 +571,7 @@ int rxrpc_io_thread(void *data) ...@@ -565,7 +571,7 @@ int rxrpc_io_thread(void *data)
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
rxrpc_see_local(local, rxrpc_local_stop); rxrpc_see_local(local, rxrpc_local_stop);
rxrpc_destroy_local(local); rxrpc_destroy_local(local);
local->io_thread = NULL; WRITE_ONCE(local->io_thread, NULL);
rxrpc_see_local(local, rxrpc_local_stopped); rxrpc_see_local(local, rxrpc_local_stopped);
return 0; return 0;
} }
...@@ -232,7 +232,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) ...@@ -232,7 +232,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
} }
wait_for_completion(&local->io_thread_ready); wait_for_completion(&local->io_thread_ready);
local->io_thread = io_thread; WRITE_ONCE(local->io_thread, io_thread);
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment