[PATCH v2] nvmet-tcp: fix data digest calculation for multiple H2CData PDUs
Sagi Grimberg
sagi at grimberg.me
Mon Aug 7 04:32:52 PDT 2023
On 8/3/23 13:58, Varun Prakash wrote:
> On receiving H2CData PDU current code calculates data digest for the entire
> transfer length, it works if host sends only one H2CData PDU per cmd, if
> host sends multiple H2CData PDU per cmd then target reports data digest
> error.
>
> This issue was fixed in commit fda871c0ba5d ("nvmet-tcp: fix receive data
> digest calculation for multiple h2cdata PDUs").
>
> commit bac04454ef9f ("nvmet-tcp: fix kmap leak when data digest in use")
> unmaps iovec before calculating data digest and
> commit 69b85e1f1d1d ("nvmet-tcp: add an helper to free the cmd buffers")
> sets cmd->nr_mapped = 0 after unmapping iovec, both of these commits
> caused regression in data digest calculation.
>
> To fix this regression commit ed0691cf5514 ("nvmet-tcp: fix regression in
> data_digest calculation") changed the logic to use sg directly instead of
> iovec, it calculates data digest for the entire transfer length this
> caused another regression for multiple H2CData PDU per cmd case.
>
> Fixes: ed0691cf5514 ("nvmet-tcp: fix regression in data_digest calculation")
> Signed-off-by: Rakshana Sridhar <rakshanas at chelsio.com>
> Signed-off-by: Varun Prakash <varun at chelsio.com>
> ---
>
> v2:
> - used sg instead of iovec for data digest calculation
> - updated commit message
>
> drivers/nvme/target/tcp.c | 42 ++++++++++++++++++++++++++++++++++++---
> 1 file changed, 39 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
> index cd92d7ddf5ed..4d72b4b2b205 100644
> --- a/drivers/nvme/target/tcp.c
> +++ b/drivers/nvme/target/tcp.c
> @@ -415,7 +415,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
> return NVME_SC_INTERNAL;
> }
>
> -static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
> +static void nvmet_tcp_send_ddgst(struct ahash_request *hash,
> struct nvmet_tcp_cmd *cmd)
We should call it nvmet_tcp_calc_send_ddgst
> {
> ahash_request_set_crypt(hash, cmd->req.sg,
> @@ -447,7 +447,7 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
>
> if (queue->data_digest) {
> pdu->hdr.flags |= NVME_TCP_F_DDGST;
> - nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
> + nvmet_tcp_send_ddgst(queue->snd_hash, cmd);
> }
>
> if (cmd->queue->hdr_digest) {
> @@ -1152,11 +1152,47 @@ static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
> return nvmet_tcp_done_recv_pdu(queue);
> }
>
> +static void nvmet_tcp_recv_ddgst(struct ahash_request *hash,
> + struct nvmet_tcp_cmd *cmd)
We should call it nvmet_tcp_calc_recv_ddgst
> +{
> + struct scatterlist *sg = &cmd->req.sg[cmd->sg_idx];
> + struct bio_vec *iov = cmd->iov;
> + u32 data_length = cmd->pdu_len;
> +
> + crypto_ahash_init(hash);
> +
> + if (iov->bv_offset) {
> + struct scatterlist first_sg;
> +
> + sg_init_table(&first_sg, 1);
> + sg_set_page(&first_sg, sg_page(sg), iov->bv_len,
> + iov->bv_offset);
> +
> + data_length -= iov->bv_len;
> +
> + if (data_length) {
> + ahash_request_set_crypt(hash, &first_sg, NULL,
> + iov->bv_len);
> + crypto_ahash_update(hash);
> + sg = sg_next(sg);
> + } else {
> + ahash_request_set_crypt(hash, &first_sg,
> + (void *)&cmd->exp_ddgst, iov->bv_len);
> + crypto_ahash_finup(hash);
> + return;
> + }
What is the case for data_length=0 ? I'm not entirely clear how this can
happen.
> + }
> +
> + ahash_request_set_crypt(hash, sg, (void *)&cmd->exp_ddgst,
> + data_length);
> + crypto_ahash_finup(hash);
> +}
> +
> static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
> {
> struct nvmet_tcp_queue *queue = cmd->queue;
>
> - nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
> + nvmet_tcp_recv_ddgst(queue->rcv_hash, cmd);
> queue->offset = 0;
> queue->left = NVME_TCP_DIGEST_LENGTH;
> queue->rcv_state = NVMET_TCP_RECV_DDGST;
More information about the Linux-nvme
mailing list