[PATCH] nvmet-tcp: fix data digest calculation for multiple H2CData PDUs

Sagi Grimberg sagi at grimberg.me
Mon Jul 31 06:48:06 PDT 2023


> On receiving H2CData PDU current code calculates data digest for the entire
> transfer length, it works if host sends only one H2CData PDU per cmd, if
> host sends multiple H2CData PDU per cmd then target reports data digest
> error.
> 
> This issue was fixed in commit fda871c0ba5d ("nvmet-tcp: fix receive data
> digest calculation for multiple h2cdata PDUs").
> 
> commit ed0691cf5514 ("nvmet-tcp: fix regression in data_digest
> calculation") reverted the fix done in the above commit.
> 
> Fixes: ed0691cf5514 ("nvmet-tcp: fix regression in data_digest calculation")
> Signed-off-by: Rakshana Sridhar <rakshanas at chelsio.com>
> Signed-off-by: Varun Prakash <varun at chelsio.com>
> ---
>   drivers/nvme/target/tcp.c | 31 +++++++++++++++++++++++++++----
>   1 file changed, 27 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
> index 05163751f2e5..e5e3498675af 100644
> --- a/drivers/nvme/target/tcp.c
> +++ b/drivers/nvme/target/tcp.c
> @@ -415,8 +415,8 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
>   	return NVME_SC_INTERNAL;
>   }
>   
> -static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
> -		struct nvmet_tcp_cmd *cmd)
> +static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
> +				 struct nvmet_tcp_cmd *cmd)
>   {
>   	ahash_request_set_crypt(hash, cmd->req.sg,
>   		(void *)&cmd->exp_ddgst, cmd->req.transfer_len);
> @@ -447,7 +447,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
>   
>   	if (queue->data_digest) {
>   		pdu->hdr.flags |= NVME_TCP_F_DDGST;
> -		nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
> +		nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
>   	}
>   
>   	if (cmd->queue->hdr_digest) {
> @@ -1152,11 +1152,34 @@ static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
>   	return nvmet_tcp_done_recv_pdu(queue);
>   }
>   
> +static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
> +				 struct nvmet_tcp_cmd *cmd)
> +{
> +	struct bio_vec *iov = cmd->iov;
> +	struct scatterlist sg;
> +	u32 data_length = cmd->pdu_len;
> +
> +	sg_init_table(&sg, 1);
> +	crypto_ahash_init(hash);
> +
> +	while (data_length) {
> +		sg_set_page(&sg, iov->bv_page, iov->bv_len, iov->bv_offset);
> +		ahash_request_set_crypt(hash, &sg, NULL, iov->bv_len);
> +		crypto_ahash_update(hash);
> +
> +		data_length -= iov->bv_len;
> +		iov++;
> +	}
> +
> +	ahash_request_set_crypt(hash, NULL, (void *)&cmd->exp_ddgst, 0);
> +	crypto_ahash_final(hash);
> +}

Maybe instead of (re)coding crypto_ahash_digest, maybe we can do a
little hack to pass the request sg from cmd->sg_idx which is already
set when we build the pdu?

Something like:
--
+static void nvmet_tcp_calc_recv_ddgst(struct ahash_request *hash,
+               struct nvmet_tcp_cmd *cmd)
+{
+       struct scatterlist *sg;
+       u32 sg_offset;
+
+       sg = &cmd->req.sg[cmd->sg_idx];
+
+       /*
+        * adjust the offset for the first sg element if the received bytes
+        * is not aligned to page size (and adjust back after the ddgst
+        * calculation). A bit hacky but we don't need to do the hash digest
+        * update per sg entry this way.
+        */
+       sg_offset = cmd->rbytes_done % PAGE_SIZE;
+       sg->offset -= sg_offset;
+       __nvmet_tcp_calc_ddgst(hash, sg, (void *)&cmd->exp_ddgst, 
cmd->pdu_len);
+       sg->offset += sg_offset;
  }
--

untested but I think it should be ok. It's kinda hacky but prevents
us from doing it entry by entry. Maybe it's too ugly.



More information about the Linux-nvme mailing list