[PATCH 14/18] nvmet-tcp: allocate socket file
Sagi Grimberg
sagi at grimberg.me
Thu Mar 30 09:08:22 PDT 2023
On 3/29/23 16:59, Hannes Reinecke wrote:
> When using the TLS upcall we need to allocate a socket file such
> that the userspace daemon is able to use the socket.
>
> Signed-off-by: Hannes Reinecke <hare at suse.de>
> ---
> drivers/nvme/target/tcp.c | 51 +++++++++++++++++++++++++++++----------
> 1 file changed, 38 insertions(+), 13 deletions(-)
>
> diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
> index 66e8f9fd0ca7..5931971d715f 100644
> --- a/drivers/nvme/target/tcp.c
> +++ b/drivers/nvme/target/tcp.c
> @@ -96,12 +96,14 @@ struct nvmet_tcp_cmd {
>
> enum nvmet_tcp_queue_state {
> NVMET_TCP_Q_CONNECTING,
> + NVMET_TCP_Q_TLS_HANDSHAKE,
> NVMET_TCP_Q_LIVE,
> NVMET_TCP_Q_DISCONNECTING,
> };
>
> struct nvmet_tcp_queue {
> struct socket *sock;
> + struct file *sock_file;
> struct nvmet_tcp_port *port;
> struct work_struct io_work;
> struct nvmet_cq nvme_cq;
> @@ -1406,6 +1408,19 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
> write_unlock_bh(&sock->sk->sk_callback_lock);
> }
>
> +static void nvmet_tcp_close_sock(struct nvmet_tcp_queue *queue)
> +{
> + if (queue->sock_file) {
> + fput(queue->sock_file);
> + queue->sock_file = NULL;
> + queue->sock = NULL;
> + } else {
> + WARN_ON(!queue->sock->ops);
> + sock_release(queue->sock);
> + queue->sock = NULL;
> + }
> +}
> +
> static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
> {
> struct nvmet_tcp_cmd *cmd = queue->cmds;
> @@ -1455,12 +1470,11 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
> nvmet_sq_destroy(&queue->nvme_sq);
> cancel_work_sync(&queue->io_work);
> nvmet_tcp_free_cmd_data_in_buffers(queue);
> - sock_release(queue->sock);
> + nvmet_tcp_close_sock(queue);
> nvmet_tcp_free_cmds(queue);
> if (queue->hdr_digest || queue->data_digest)
> nvmet_tcp_free_crypto(queue);
> ida_free(&nvmet_tcp_queue_ida, queue->idx);
> -
> page = virt_to_head_page(queue->pf_cache.va);
> __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
> kfree(queue);
> @@ -1583,7 +1597,7 @@ static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
> return ret;
> }
>
> -static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
> +static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
> struct socket *newsock)
> {
> struct nvmet_tcp_queue *queue;
> @@ -1591,7 +1605,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
>
> queue = kzalloc(sizeof(*queue), GFP_KERNEL);
> if (!queue)
> - return -ENOMEM;
> + return;
>
> INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
> INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
> @@ -1599,15 +1613,28 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
> queue->port = port;
> queue->nr_cmds = 0;
> spin_lock_init(&queue->state_lock);
> - queue->state = NVMET_TCP_Q_CONNECTING;
> + if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
> + NVMF_TCP_SECTYPE_TLS13)
> + queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
> + else
> + queue->state = NVMET_TCP_Q_CONNECTING;
> INIT_LIST_HEAD(&queue->free_list);
> init_llist_head(&queue->resp_list);
> INIT_LIST_HEAD(&queue->resp_send_list);
>
> + if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
> + queue->sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
> + if (IS_ERR(queue->sock_file)) {
> + ret = PTR_ERR(queue->sock_file);
> + queue->sock_file = NULL;
> + goto out_free_queue;
> + }
> + }
Why not always allocate a sock_file? Like in the host?
More information about the Linux-nvme
mailing list