[PATCH RFC 1/1] nvmet-tcp: add support for C2HTermReq
Maurizio Lombardi
mlombard at redhat.com
Wed Mar 19 09:24:49 PDT 2025
Send a C2HTermReq PDU to the host when an header digest error is
detected, when receiving ICReq PDUs with invalid parameters or
when H2CData PDUs have invalid data offsets or sizes.
Signed-off-by: Maurizio Lombardi <mlombard at redhat.com>
---
drivers/nvme/target/tcp.c | 70 +++++++++++++++++++++++++++++++++++++++
1 file changed, 70 insertions(+)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 4f9cac8a5abe..c9aac35c90c3 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -217,6 +217,10 @@ static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
+static void nvmet_send_c2h_term(struct nvmet_tcp_queue *queue,
+ void *pdu,
+ enum nvme_tcp_fatal_error_status fes,
+ u32 field_offset);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
@@ -322,6 +326,8 @@ static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
exp_digest = *(__le32 *)(pdu + hdr->hlen);
if (recv_digest != exp_digest) {
+ /* Restore the original value for C2HTermReq */
+ *(__le32 *)(pdu + hdr->hlen) = recv_digest;
pr_err("queue %d: header digest error: recv %#x expected %#x\n",
queue->idx, le32_to_cpu(recv_digest),
le32_to_cpu(exp_digest));
@@ -910,11 +916,17 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
}
if (icreq->pfv != NVME_TCP_PFV_1_0) {
+ nvmet_send_c2h_term(queue, icreq,
+ NVME_TCP_FES_UNSUPPORTED_PARAM,
+ offsetof(struct nvme_tcp_icreq_pdu, pfv));
pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
return -EPROTO;
}
if (icreq->hpda != 0) {
+ nvmet_send_c2h_term(queue, icreq,
+ NVME_TCP_FES_UNSUPPORTED_PARAM,
+ offsetof(struct nvme_tcp_icreq_pdu, hpda));
pr_err("queue %d: unsupported hpda %d\n", queue->idx,
icreq->hpda);
return -EPROTO;
@@ -1006,6 +1018,8 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
data->ttag, le32_to_cpu(data->data_offset),
cmd->rbytes_done);
+ nvmet_send_c2h_term(queue, data,
+ NVME_TCP_FES_DATA_OUT_OF_RANGE, 0);
goto err_proto;
}
@@ -1019,6 +1033,11 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
cmd->pdu_len == 0 ||
cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+ if (cmd->pdu_len > NVMET_TCP_MAXH2CDATA) {
+ nvmet_send_c2h_term(queue, data,
+ NVME_TCP_FES_DATA_LIMIT_EXCEEDED,
+ 0);
+ }
goto err_proto;
}
cmd->pdu_recv = 0;
@@ -1180,6 +1199,55 @@ static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
return ret;
}
+static void nvmet_send_c2h_term(struct nvmet_tcp_queue *queue, void *pdu,
+ enum nvme_tcp_fatal_error_status fes,
+ u32 field_offset)
+{
+ struct nvme_tcp_term_pdu *term_pdu;
+ struct nvme_tcp_hdr *hdr = pdu;
+ size_t cmd_size = nvmet_tcp_pdu_size(hdr->type);
+ size_t pdu_size = sizeof(*term_pdu) + cmd_size;
+ struct msghdr msg = { .msg_flags = MSG_EOR | MSG_DONTWAIT };
+ struct bio_vec bvec;
+ __le32 fei;
+
+ if (!cmd_size)
+ return;
+
+ term_pdu = kzalloc(pdu_size, GFP_KERNEL);
+ if (!term_pdu)
+ return;
+
+ switch (fes) {
+ case NVME_TCP_FES_INVALID_PDU_HDR:
+ case NVME_TCP_FES_UNSUPPORTED_PARAM:
+ fei = field_offset;
+ break;
+ case NVME_TCP_FES_HDR_DIGEST_ERR:
+ fei = le32_to_cpu(*(__le32 *)((u8 *)hdr + hdr->hlen));
+ break;
+ default:
+ fei = 0;
+ break;
+ }
+ term_pdu->feil = cpu_to_le16(lower_16_bits(fei));
+ term_pdu->feiu = cpu_to_le16(upper_16_bits(fei));
+ term_pdu->fes = cpu_to_le16(fes);
+
+ memcpy((u8 *)term_pdu + sizeof(*term_pdu), pdu, cmd_size);
+
+ term_pdu->hdr.type = nvme_tcp_c2h_term;
+ term_pdu->hdr.flags = 0;
+ term_pdu->hdr.hlen = sizeof(*term_pdu);
+ term_pdu->hdr.plen = cpu_to_le32(pdu_size);
+
+ bvec_set_virt(&bvec, (void *)term_pdu, pdu_size);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, pdu_size);
+ sock_sendmsg(queue->sock, &msg);
+
+ kfree(term_pdu);
+}
+
static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
@@ -1230,6 +1298,8 @@ static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
if (queue->hdr_digest &&
nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
+ nvmet_send_c2h_term(queue, &queue->pdu,
+ NVME_TCP_FES_HDR_DIGEST_ERR, 0);
nvmet_tcp_fatal_error(queue); /* fatal */
return -EPROTO;
}
--
2.43.5
More information about the Linux-nvme
mailing list