kernel panic in nvmet_tcp when receiving malformed admin command

Grupi, Elad Elad.Grupi at dell.com
Thu Jan 14 06:49:23 EST 2021


Hi Sagi

This patch did not solve the below issue. We still see the same kernel panic.
We reproduce the issue by adding a validation to the resv1 field in connect command.

Few comments about your patch:

In nvmet_tcp_unmap_cmds, need to use queue->connect instead of cmd:
+               /* failed in connect */
+               nvmet_tcp_unmap_pdu_iovec(cmd);
+               nvmet_tcp_unmap_data(cmd);

Also in nvmet_tcp_uninit_data_in_cmds same issue:
                 /* failed in connect */
-               nvmet_tcp_finish_cmd(&queue->connect);
+               nvmet_req_uninit(&cmd->req);

Elad

-----Original Message-----
From: Linux-nvme <linux-nvme-bounces at lists.infradead.org> On Behalf Of Sagi Grimberg
Sent: Friday, 8 January 2021 22:24
To: Engel, Amit; Anner, Ran; linux-nvme at lists.infradead.org
Cc: Zinger, Eldad; Grupi, Elad
Subject: Re: kernel panic in nvmet_tcp when receiving malformed admin command


[EXTERNAL EMAIL] 


>> Hi Sagi, is the patch addressing the below issue is already out there?
> 
> No, thanks for reminding me, I'll have a look into this.

Amit, Ran,

Does this solve your issue?
This needs to split to patches but would be good to understand if this works.
--
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index dc1f0f647189..c41902f7ce39 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -204,7 +204,6 @@ nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
         cmd->rbytes_done = cmd->wbytes_done = 0;
         cmd->pdu_len = 0;
         cmd->pdu_recv = 0;
-       cmd->iov = NULL;
         cmd->flags = 0;
         return cmd;
  }
@@ -294,6 +293,7 @@ static void nvmet_tcp_unmap_pdu_iovec(struct nvmet_tcp_cmd *cmd)

         for (i = 0; i < cmd->nr_mapped; i++)
                 kunmap(sg_page(&sg[i]));
+       cmd->nr_mapped = 0;
  }

  static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) @@ -341,6 +341,14 @@ static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
                 nvmet_tcp_fatal_error(queue);
  }

+static void nvmet_tcp_unmap_data(struct nvmet_tcp_cmd *cmd) {
+       kfree(cmd->iov);
+       cmd->iov = NULL;
+       sgl_free(cmd->req.sg);
+       cmd->req.sg = NULL;
+}
+
  static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
  {
         struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl; @@ -375,6 +383,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
         return 0;
  err:
         sgl_free(cmd->req.sg);
+       cmd->req.sg = NULL;
         return NVME_SC_INTERNAL;
  }

@@ -571,17 +580,13 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
         } else {
                 if (queue->nvme_sq.sqhd_disabled) {
                         cmd->queue->snd_cmd = NULL;
+                       nvmet_tcp_unmap_data(cmd);
                         nvmet_tcp_put_cmd(cmd);
                 } else {
                         nvmet_setup_response_pdu(cmd);
                 }
         }

-       if (queue->nvme_sq.sqhd_disabled) {
-               kfree(cmd->iov);
-               sgl_free(cmd->req.sg);
-       }
-
         return 1;

  }
@@ -609,9 +614,8 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
         if (left)
                 return -EAGAIN;

-       kfree(cmd->iov);
-       sgl_free(cmd->req.sg);
         cmd->queue->snd_cmd = NULL;
+       nvmet_tcp_unmap_data(cmd);
         nvmet_tcp_put_cmd(cmd);
         return 1;
  }
@@ -665,6 +669,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)

         if (queue->nvme_sq.sqhd_disabled) {
                 cmd->queue->snd_cmd = NULL;
+               nvmet_tcp_unmap_data(cmd);
                 nvmet_tcp_put_cmd(cmd);
         } else {
                 nvmet_setup_response_pdu(cmd); @@ -1082,15 +1087,16 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)

         nvmet_tcp_unmap_pdu_iovec(cmd);

-       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
-           cmd->rbytes_done == cmd->req.transfer_len) {
-               if (queue->data_digest) {
-                       nvmet_tcp_prep_recv_ddgst(cmd);
-                       return 0;
-               }
-               cmd->req.execute(&cmd->req);
+       if (queue->data_digest) {
+               nvmet_tcp_prep_recv_ddgst(cmd);
+               return 0;
         }

+       if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
+               nvmet_tcp_finish_cmd(cmd);
+       else if (cmd->rbytes_done == cmd->req.transfer_len)
+               cmd->req.execute(&cmd->req);
+
         nvmet_prepare_receive_pdu(queue);
         return 0;
  }
@@ -1120,14 +1126,16 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
                         queue->idx, cmd->req.cmd->common.command_id,
                         queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
                         le32_to_cpu(cmd->exp_ddgst));
+               nvmet_req_uninit(&cmd->req);
                 nvmet_tcp_finish_cmd(cmd);
                 nvmet_tcp_fatal_error(queue);
                 ret = -EPROTO;
                 goto out;
         }

-       if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
-           cmd->rbytes_done == cmd->req.transfer_len)
+       if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
+               nvmet_tcp_finish_cmd(cmd);
+       else if (cmd->rbytes_done == cmd->req.transfer_len)
                 cmd->req.execute(&cmd->req);
         ret = 0;
  out:
@@ -1139,7 +1147,8 @@ static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
  {
         int result = 0;

-       if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
+       if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR ||
+                    queue->state == NVMET_TCP_Q_DISCONNECTING))
                 return 0;

         if (queue->rcv_state == NVMET_TCP_RECV_PDU) { @@ -1333,10 +1342,26 @@ static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)

  static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
  {
-       nvmet_req_uninit(&cmd->req);
         nvmet_tcp_unmap_pdu_iovec(cmd);
-       kfree(cmd->iov);
-       sgl_free(cmd->req.sg);
+       nvmet_tcp_unmap_data(cmd);
+       nvmet_tcp_put_cmd(cmd);
+}
+
+static void nvmet_tcp_unmap_cmds(struct nvmet_tcp_queue *queue) {
+       struct nvmet_tcp_cmd *cmd = queue->cmds;
+       int i;
+
+       for (i = 0; i < queue->nr_cmds; i++, cmd++) {
+               nvmet_tcp_unmap_pdu_iovec(cmd);
+               nvmet_tcp_unmap_data(cmd);
+       }
+
+       if (!queue->nr_cmds) {
+               /* failed in connect */
+               nvmet_tcp_unmap_pdu_iovec(cmd);
+               nvmet_tcp_unmap_data(cmd);
+       }
  }

  static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue) @@ -1346,12 +1371,12 @@ static void nvmet_tcp_uninit_data_in_cmds(struct
nvmet_tcp_queue *queue)

         for (i = 0; i < queue->nr_cmds; i++, cmd++) {
                 if (nvmet_tcp_need_data_in(cmd))
-                       nvmet_tcp_finish_cmd(cmd);
+                       nvmet_req_uninit(&cmd->req);
         }

         if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
                 /* failed in connect */
-               nvmet_tcp_finish_cmd(&queue->connect);
+               nvmet_req_uninit(&cmd->req);
         }
  }

@@ -1370,6 +1395,7 @@ static void nvmet_tcp_release_queue_work(struct
work_struct *w)
         nvmet_tcp_uninit_data_in_cmds(queue);
         nvmet_sq_destroy(&queue->nvme_sq);
         cancel_work_sync(&queue->io_work);
+       nvmet_tcp_unmap_cmds(queue);
         sock_release(queue->sock);
         nvmet_tcp_free_cmds(queue);
         if (queue->hdr_digest || queue->data_digest)
--

_______________________________________________
Linux-nvme mailing list
Linux-nvme at lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-nvme


More information about the Linux-nvme mailing list