[PATCH net 1/2] rxrpc: Fix a race between socket set up and I/O thread creation
David Howells
dhowells at redhat.com
Tue Oct 1 06:26:58 PDT 2024
In rxrpc_open_socket(), it sets up the socket and then sets up the I/O
thread that will handle it. This is a problem, however, as there's a gap
between the two phases in which a packet may come into rxrpc_encap_rcv()
from the UDP packet but we oops when trying to wake the not-yet created I/O
thread.
As a quick fix, just make rxrpc_encap_rcv() discard the packet if there's
no I/O thread yet.
A better, but more intrusive fix would perhaps be to rearrange things such
that the socket creation is done by the I/O thread.
Fixes: a275da62e8c1 ("rxrpc: Create a per-local endpoint receive queue and I/O thread")
Signed-off-by: David Howells <dhowells at redhat.com>
cc: yuxuanzhe at outlook.com
cc: Marc Dionne <marc.dionne at auristor.com>
cc: "David S. Miller" <davem at davemloft.net>
cc: Eric Dumazet <edumazet at google.com>
cc: Jakub Kicinski <kuba at kernel.org>
cc: Paolo Abeni <pabeni at redhat.com>
cc: Simon Horman <horms at kernel.org>
cc: linux-afs at lists.infradead.org
cc: netdev at vger.kernel.org
---
net/rxrpc/ar-internal.h | 2 +-
net/rxrpc/io_thread.c | 10 ++++++++--
net/rxrpc/local_object.c | 2 +-
3 files changed, 10 insertions(+), 4 deletions(-)
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 80d682f89b23..d0fd37bdcfe9 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -1056,7 +1056,7 @@ bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
int rxrpc_io_thread(void *data);
static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
{
- wake_up_process(local->io_thread);
+ wake_up_process(READ_ONCE(local->io_thread));
}
static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
index 0300baa9afcd..07c74c77d802 100644
--- a/net/rxrpc/io_thread.c
+++ b/net/rxrpc/io_thread.c
@@ -27,11 +27,17 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
{
struct sk_buff_head *rx_queue;
struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
+ struct task_struct *io_thread;
if (unlikely(!local)) {
kfree_skb(skb);
return 0;
}
+ io_thread = READ_ONCE(local->io_thread);
+ if (!io_thread) {
+ kfree_skb(skb);
+ return 0;
+ }
if (skb->tstamp == 0)
skb->tstamp = ktime_get_real();
@@ -47,7 +53,7 @@ int rxrpc_encap_rcv(struct sock *udp_sk, struct sk_buff *skb)
#endif
skb_queue_tail(rx_queue, skb);
- rxrpc_wake_up_io_thread(local);
+ wake_up_process(io_thread);
return 0;
}
@@ -565,7 +571,7 @@ int rxrpc_io_thread(void *data)
__set_current_state(TASK_RUNNING);
rxrpc_see_local(local, rxrpc_local_stop);
rxrpc_destroy_local(local);
- local->io_thread = NULL;
+ WRITE_ONCE(local->io_thread, NULL);
rxrpc_see_local(local, rxrpc_local_stopped);
return 0;
}
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 504453c688d7..f9623ace2201 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -232,7 +232,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
}
wait_for_completion(&local->io_thread_ready);
- local->io_thread = io_thread;
+ WRITE_ONCE(local->io_thread, io_thread);
_leave(" = 0");
return 0;
More information about the linux-afs
mailing list