[PATCH] nvme-tcp: proper handling of tcp socket closing flows
Grupi, Elad
Elad.Grupi at dell.com
Thu Jan 28 18:07:12 EST 2021
That will work if sk_state_change is called under sk_callback_lock.
In addition, need to flush_work(&queue->port->accept_work) in nvmet_tcp_release_queue_work because accept_work is still using queue struct after unlocking sk_callback_lock.
-----Original Message-----
From: Sagi Grimberg <sagi at grimberg.me>
Sent: Friday, 29 January 2021 0:03
To: Grupi, Elad; linux-nvme at lists.infradead.org
Subject: Re: [PATCH] nvme-tcp: proper handling of tcp socket closing flows
[EXTERNAL EMAIL]
> ---
> drivers/nvme/target/tcp.c | 24 ++++++++++++++++++++----
> 1 file changed, 20 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
> index d535080b781f..dac737bac874 100644
> --- a/drivers/nvme/target/tcp.c
> +++ b/drivers/nvme/target/tcp.c
> @@ -647,7 +647,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
> struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
> int ret = 0;
>
> - if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
> + if (!cmd) {
> cmd = nvmet_tcp_fetch_cmd(queue);
> if (unlikely(!cmd))
> return 0;
> @@ -1453,9 +1453,27 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
> sock->sk->sk_state_change = nvmet_tcp_state_change;
> queue->write_space = sock->sk->sk_write_space;
> sock->sk->sk_write_space = nvmet_tcp_write_space;
> +
> + switch (sk->sk_state) {
> + case TCP_FIN_WAIT1:
> + case TCP_CLOSE_WAIT:
> + case TCP_CLOSE:
> + /* FALLTHRU */
> + sock->sk->sk_data_ready = queue->data_ready;
> + sock->sk->sk_state_change = queue->state_change;
> + sock->sk->sk_write_space = queue->write_space;
> + sk->sk_user_data = NULL;
> + queue->state = NVMET_TCP_Q_DISCONNECTING;
> + ret = -ENOTCONN;
> + break;
> + default:
> + queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
> + ret = 0;
> + }
> +
> write_unlock_bh(&sock->sk->sk_callback_lock);
>
> - return 0;
> + return ret;
> }
>
> static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, @@
> -1506,8 +1524,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
> if (ret)
> goto out_destroy_sq;
>
> - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work);
> -
> return 0;
> out_destroy_sq:
> mutex_lock(&nvmet_tcp_queue_mutex);
>
What about this instead?
--
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index c41902f7ce39..6388d18ca7c2 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1494,16 +1494,28 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
ip_sock_set_tos(sock->sk, inet->rcv_tos);
write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = queue;
- queue->data_ready = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = nvmet_tcp_data_ready;
- queue->state_change = sock->sk->sk_state_change;
- sock->sk->sk_state_change = nvmet_tcp_state_change;
- queue->write_space = sock->sk->sk_write_space;
- sock->sk->sk_write_space = nvmet_tcp_write_space;
+ switch (sk->sk_state) {
+ case TCP_FIN_WAIT1:
+ case TCP_CLOSE_WAIT:
+ case TCP_CLOSE:
+ /*
+ * If the socket is already closing, don't even start
+ * consuming it
+ */
+ ret = -ENOTCONN;
+ break;
+ default:
+ sock->sk->sk_user_data = queue;
+ queue->data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = nvmet_tcp_data_ready;
+ queue->state_change = sock->sk->sk_state_change;
+ sock->sk->sk_state_change = nvmet_tcp_state_change;
+ queue->write_space = sock->sk->sk_write_space;
+ sock->sk->sk_write_space = nvmet_tcp_write_space;
+ }
write_unlock_bh(&sock->sk->sk_callback_lock);
- return 0;
+ return ret;
}
--
More information about the Linux-nvme
mailing list