[PATCH 1/2] nvme-tcp: validate R2T PDU in nvme_tcp_handle_r2t()
Sagi Grimberg
sagi at grimberg.me
Mon Nov 22 02:30:06 PST 2021
> If maxh2cdata < r2t_length then driver will form multiple
> H2CData PDUs, validate R2T PDU in nvme_tcp_handle_r2t() to
> reuse nvme_tcp_setup_h2c_data_pdu().
This patch does more than it is documenting.
>
> Signed-off-by: Varun Prakash <varun at chelsio.com>
> ---
> drivers/nvme/host/tcp.c | 79 +++++++++++++++++++++++++++----------------------
> 1 file changed, 44 insertions(+), 35 deletions(-)
>
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index 33bc83d..92e07d2 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -44,7 +44,9 @@ struct nvme_tcp_request {
> u32 data_len;
> u32 pdu_len;
> u32 pdu_sent;
> - u16 ttag;
> + u32 h2cdata_left;
> + u32 h2cdata_offset;
> + u16 h2cdata_ttag;
All of these are unused in this patch - they need to move
to the next patch.
Let's limit this patch to just moving the checks to
nvme_tcp_handle_r2t().
> __le16 status;
> struct list_head entry;
> struct llist_node lentry;
> @@ -572,8 +574,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
> return ret;
> }
>
> -static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
> - struct nvme_tcp_r2t_pdu *pdu)
> +static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
> {
> struct nvme_tcp_data_pdu *data = req->pdu;
> struct nvme_tcp_queue *queue = req->queue;
> @@ -581,35 +582,11 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
> u8 hdgst = nvme_tcp_hdgst_len(queue);
> u8 ddgst = nvme_tcp_ddgst_len(queue);
>
> - req->pdu_len = le32_to_cpu(pdu->r2t_length);
> + req->pdu_len = req->h2cdata_left;
> req->pdu_sent = 0;
>
> - if (unlikely(!req->pdu_len)) {
> - dev_err(queue->ctrl->ctrl.device,
> - "req %d r2t len is %u, probably a bug...\n",
> - rq->tag, req->pdu_len);
> - return -EPROTO;
> - }
> -
> - if (unlikely(req->data_sent + req->pdu_len > req->data_len)) {
> - dev_err(queue->ctrl->ctrl.device,
> - "req %d r2t len %u exceeded data len %u (%zu sent)\n",
> - rq->tag, req->pdu_len, req->data_len,
> - req->data_sent);
> - return -EPROTO;
> - }
> -
> - if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) {
> - dev_err(queue->ctrl->ctrl.device,
> - "req %d unexpected r2t offset %u (expected %zu)\n",
> - rq->tag, le32_to_cpu(pdu->r2t_offset),
> - req->data_sent);
> - return -EPROTO;
> - }
> -
> memset(data, 0, sizeof(*data));
> data->hdr.type = nvme_tcp_h2c_data;
> - data->hdr.flags = NVME_TCP_F_DATA_LAST;
> if (queue->hdr_digest)
> data->hdr.flags |= NVME_TCP_F_HDGST;
> if (queue->data_digest)
> @@ -618,11 +595,16 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
> data->hdr.pdo = data->hdr.hlen + hdgst;
> data->hdr.plen =
> cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
> - data->ttag = pdu->ttag;
> + data->ttag = req->h2cdata_ttag;
> data->command_id = nvme_cid(rq);
> - data->data_offset = pdu->r2t_offset;
> + data->data_offset = cpu_to_le32(req->h2cdata_offset);
> data->data_length = cpu_to_le32(req->pdu_len);
> - return 0;
> +
> + req->h2cdata_left -= req->pdu_len;
> + req->h2cdata_offset += req->pdu_len;
> +
> + if (!req->h2cdata_left)
> + data->hdr.flags |= NVME_TCP_F_DATA_LAST;
> }
>
> static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
> @@ -630,7 +612,8 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
> {
> struct nvme_tcp_request *req;
> struct request *rq;
> - int ret;
> + u32 r2t_length = le32_to_cpu(pdu->r2t_length);
> + u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
>
> rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
> if (!rq) {
> @@ -641,10 +624,33 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
> }
> req = blk_mq_rq_to_pdu(rq);
>
> - ret = nvme_tcp_setup_h2c_data_pdu(req, pdu);
> - if (unlikely(ret))
> - return ret;
> + if (unlikely(!r2t_length)) {
> + dev_err(queue->ctrl->ctrl.device,
> + "req %d r2t len is %u, probably a bug...\n",
> + rq->tag, r2t_length);
> + return -EPROTO;
> + }
> +
> + if (unlikely(req->data_sent + r2t_length > req->data_len)) {
> + dev_err(queue->ctrl->ctrl.device,
> + "req %d r2t len %u exceeded data len %u (%zu sent)\n",
> + rq->tag, r2t_length, req->data_len,
> + req->data_sent);
> + return -EPROTO;
> + }
> +
> + if (unlikely(r2t_offset < req->data_sent)) {
> + dev_err(queue->ctrl->ctrl.device,
> + "req %d unexpected r2t offset %u (expected %zu)\n",
> + rq->tag, r2t_offset, req->data_sent);
> + return -EPROTO;
> + }
> +
> + req->h2cdata_left = r2t_length;
> + req->h2cdata_offset = r2t_offset;
> + req->h2cdata_ttag = pdu->ttag;
>
> + nvme_tcp_setup_h2c_data_pdu(req);
> req->state = NVME_TCP_SEND_H2C_PDU;
> req->offset = 0;
In order to make stuff easier, let's move this setting to
nvme_tcp_setup_h2c_data_pdu(), it is also consistent with
nvme_tcp_setup_cmd_pdu() that is setting req->state and req->offset...
More information about the Linux-nvme
mailing list