[SRU][B][PATCH 1/1] afs: Fix lock recursion

Alessio Faina alessio.faina at canonical.com
Mon Oct 20 01:17:23 PDT 2025


From: David Howells <dhowells at redhat.com>

afs_wake_up_async_call() can incur lock recursion.  The problem is that it
is called from AF_RXRPC whilst holding the ->notify_lock, but it tries to
take a ref on the afs_call struct in order to pass it to a work queue - but
if the afs_call is already queued, we then have an extraneous ref that must
be put... calling afs_put_call() may call back down into AF_RXRPC through
rxrpc_kernel_shutdown_call(), however, which might try taking the
->notify_lock again.

This case isn't very common, however, so defer it to a workqueue.  The oops
looks something like:

  BUG: spinlock recursion on CPU#0, krxrpcio/7001/1646
   lock: 0xffff888141399b30, .magic: dead4ead, .owner: krxrpcio/7001/1646, .owner_cpu: 0
  CPU: 0 UID: 0 PID: 1646 Comm: krxrpcio/7001 Not tainted 6.12.0-rc2-build3+ #4351
  Hardware name: ASUS All Series/H97-PLUS, BIOS 2306 10/09/2014
  Call Trace:
   <TASK>
   dump_stack_lvl+0x47/0x70
   do_raw_spin_lock+0x3c/0x90
   rxrpc_kernel_shutdown_call+0x83/0xb0
   afs_put_call+0xd7/0x180
   rxrpc_notify_socket+0xa0/0x190
   rxrpc_input_split_jumbo+0x198/0x1d0
   rxrpc_input_data+0x14b/0x1e0
   ? rxrpc_input_call_packet+0xc2/0x1f0
   rxrpc_input_call_event+0xad/0x6b0
   rxrpc_input_packet_on_conn+0x1e1/0x210
   rxrpc_input_packet+0x3f2/0x4d0
   rxrpc_io_thread+0x243/0x410
   ? __pfx_rxrpc_io_thread+0x10/0x10
   kthread+0xcf/0xe0
   ? __pfx_kthread+0x10/0x10
   ret_from_fork+0x24/0x40
   ? __pfx_kthread+0x10/0x10
   ret_from_fork_asm+0x1a/0x30
   </TASK>

Signed-off-by: David Howells <dhowells at redhat.com>
Link: https://lore.kernel.org/r/1394602.1729162732@warthog.procyon.org.uk
cc: Marc Dionne <marc.dionne at auristor.com>
cc: linux-afs at lists.infradead.org
cc: linux-fsdevel at vger.kernel.org
Signed-off-by: Christian Brauner <brauner at kernel.org>
(backported from commit 610a79ffea02102899a1373fe226d949944a7ed6)
[alessiofaina: removed call to rxrpc_kernel_put_peer,
substituted inexistent rxrpc_kernel_shutdown_call/rxrpc_kernel_put_call
with rxrpc_kernel_end_call,
fixed trace_afs_call first parameter
fixed afs_deferred_put_call conflict between __refcount_dec_and_test
and atomic_dec_return
fixed use of non existent afs_unuse_server_notime with previously used
afs_put_server/afs_put_cb_interest/afs_put_addrlist]
CVE-2024-53090
Signed-off-by: Alessio Faina <alessio.faina at canonical.com>
---
 fs/afs/internal.h |  2 ++
 fs/afs/rxrpc.c    | 78 ++++++++++++++++++++++++++++++++++-------------
 2 files changed, 58 insertions(+), 22 deletions(-)

diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index f564b09db87b..8552f9897578 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -84,6 +84,7 @@ struct afs_call {
 	wait_queue_head_t	waitq;		/* processes awaiting completion */
 	struct work_struct	async_work;	/* async I/O processor */
 	struct work_struct	work;		/* actual work processor */
+	struct work_struct	free_work;	/* Deferred free processor */
 	struct rxrpc_call	*rxcall;	/* RxRPC call handle */
 	struct key		*key;		/* security for this call */
 	struct afs_net		*net;		/* The network namespace */
@@ -793,6 +794,7 @@ extern void afs_charge_preallocation(struct work_struct *);
 extern void afs_put_call(struct afs_call *);
 extern int afs_queue_call_work(struct afs_call *);
 extern long afs_make_call(struct afs_addr_cursor *, struct afs_call *, gfp_t, bool);
+void afs_deferred_put_call(struct afs_call *call);
 extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
 					    const struct afs_call_type *,
 					    size_t, size_t);
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 8a3ac5816ad0..03cd92cb3820 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -19,6 +19,7 @@
 
 struct workqueue_struct *afs_async_calls;
 
+static void afs_deferred_free_worker(struct work_struct *work);
 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
 static long afs_wait_for_call_to_complete(struct afs_call *, struct afs_addr_cursor *);
 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
@@ -140,6 +141,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
 	call->net = net;
 	atomic_set(&call->usage, 1);
 	INIT_WORK(&call->async_work, afs_process_async_call);
+	INIT_WORK(&call->free_work, afs_deferred_free_worker);
 	init_waitqueue_head(&call->waitq);
 	spin_lock_init(&call->state_lock);
 
@@ -149,6 +151,35 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
 	return call;
 }
 
+static void afs_free_call(struct afs_call *call)
+{
+	struct afs_net *net = call->net;
+	int o;
+
+	ASSERT(!work_pending(&call->async_work));
+	ASSERT(call->type->name != NULL);
+
+	if (call->rxcall) {
+		rxrpc_kernel_end_call(net->socket, call->rxcall);
+		call->rxcall = NULL;
+	}
+	if (call->type->destructor)
+		call->type->destructor(call);
+
+	afs_put_server(call->net, call->cm_server);
+	afs_put_cb_interest(call->net, call->cbi);
+	kfree(call->request);
+
+	o = atomic_read(&net->nr_outstanding_calls);
+	trace_afs_call(call, afs_call_trace_free, 0, o,
+		       __builtin_return_address(0));
+	kfree(call);
+
+	o = atomic_dec_return(&net->nr_outstanding_calls);
+	if (o == 0)
+		wake_up_atomic_t(&net->nr_outstanding_calls);
+}
+
 /*
  * Dispose of a reference on a call.
  */
@@ -160,30 +191,32 @@ void afs_put_call(struct afs_call *call)
 
 	trace_afs_call(call, afs_call_trace_put, n, o,
 		       __builtin_return_address(0));
+	if (o == 0)
+		afs_free_call(call);
+}
 
-	ASSERTCMP(n, >=, 0);
-	if (n == 0) {
-		ASSERT(!work_pending(&call->async_work));
-		ASSERT(call->type->name != NULL);
+static void afs_deferred_free_worker(struct work_struct *work)
+{
+	struct afs_call *call = container_of(work, struct afs_call, free_work);
 
-		if (call->rxcall) {
-			rxrpc_kernel_end_call(net->socket, call->rxcall);
-			call->rxcall = NULL;
-		}
-		if (call->type->destructor)
-			call->type->destructor(call);
+	afs_free_call(call);
+}
 
-		afs_put_server(call->net, call->cm_server);
-		afs_put_cb_interest(call->net, call->cbi);
-		kfree(call->request);
-		kfree(call);
+/*
+ * Dispose of a reference on a call, deferring the cleanup to a workqueue
+ * to avoid lock recursion.
+ */
+void afs_deferred_put_call(struct afs_call *call)
+{
+	struct afs_net *net = call->net;
+	int n, o;
 
-		o = atomic_dec_return(&net->nr_outstanding_calls);
-		trace_afs_call(call, afs_call_trace_free, 0, o,
-			       __builtin_return_address(0));
-		if (o == 0)
-			wake_up_atomic_t(&net->nr_outstanding_calls);
-	}
+	n = atomic_dec_return(&call->usage);
+	o = atomic_read(&net->nr_outstanding_calls);
+	trace_afs_call(call, afs_call_trace_put, n, o,
+		       __builtin_return_address(0));
+	if (o == 0)
+		schedule_work(&call->free_work);
 }
 
 /*
@@ -636,7 +669,8 @@ static void afs_wake_up_call_waiter(struct sock *sk, struct rxrpc_call *rxcall,
 }
 
 /*
- * wake up an asynchronous call
+ * Wake up an asynchronous call.  The caller is holding the call notify
+ * spinlock around this, so we can't call afs_put_call().
  */
 static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
 				   unsigned long call_user_ID)
@@ -654,7 +688,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
 			       __builtin_return_address(0));
 
 		if (!queue_work(afs_async_calls, &call->async_work))
-			afs_put_call(call);
+			afs_deferred_put_call(call);
 	}
 }
 
-- 
2.43.0




More information about the linux-afs mailing list