[PATCH] nvme-tcp: add basic support for the C2HTermReq PDU
Maurizio Lombardi
mlombard at bsdbackstore.eu
Mon Feb 24 06:38:16 PST 2025
On Mon Feb 17, 2025 at 11:56 AM CET, Hannes Reinecke wrote:
> Can you add support for nvmet, too, such that we can test the patch?
> (And maybe even a blktest script for it?)
Possible target-side implementation here.
Question is if it's acceptable to send the packet in
blocking mode (MSG_DONTWAIT unset).
Errors in nvmet_send_c2h_term() can be safely ignored because in any
case we are going to perform a fatal error recovery immediately after.
Example of dmesg in the target:
nvmet_tcp: queue 2: header digest error: recv 0xcf5f1cf7 expected 0x751607d3
nvmet: ctrl 1 fatal error occurred!
Example of dmesg in the host:
nvme nvme0: Received C2HTermReq (FES = Header Digest Error)
nvme nvme0: C2HTermReq: invalid digest = 0xcf5f1cf7
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 7c51c2a8c109..16438f2a624d 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -217,6 +217,9 @@ static struct workqueue_struct *nvmet_tcp_wq;
static const struct nvmet_fabrics_ops nvmet_tcp_ops;
static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
+static void nvmet_send_c2h_term(struct nvmet_tcp_queue *queue,
+ enum nvme_tcp_fatal_error_status fes,
+ u32 field_offset);
static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd)
@@ -322,6 +325,8 @@ static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
exp_digest = *(__le32 *)(pdu + hdr->hlen);
if (recv_digest != exp_digest) {
+ /* Restore the original value for C2HTermReq */
+ *(__le32 *)(pdu + hdr->hlen) = recv_digest;
pr_err("queue %d: header digest error: recv %#x expected %#x\n",
queue->idx, le32_to_cpu(recv_digest),
le32_to_cpu(exp_digest));
@@ -999,6 +1004,7 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
pr_err("ttag %u unexpected data offset %u (expected %u)\n",
data->ttag, le32_to_cpu(data->data_offset),
cmd->rbytes_done);
+ nvmet_send_c2h_term(queue, NVME_TCP_FES_DATA_OUT_OF_RANGE, 0);
goto err_proto;
}
@@ -1012,6 +1018,8 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
cmd->pdu_len == 0 ||
cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
+ if (cmd->pdu_len > NVMET_TCP_MAXH2CDATA)
+ nvmet_send_c2h_term(queue, NVME_TCP_FES_DATA_LIMIT_EXCEEDED, 0);
goto err_proto;
}
cmd->pdu_recv = 0;
@@ -1173,6 +1181,55 @@ static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
return ret;
}
+static void nvmet_send_c2h_term(struct nvmet_tcp_queue *queue,
+ enum nvme_tcp_fatal_error_status fes,
+ u32 field_offset)
+{
+ struct nvme_tcp_cmd_pdu *cmd = &queue->pdu.cmd;
+ struct nvme_tcp_term_pdu *term_pdu;
+ size_t cmd_size = nvmet_tcp_pdu_size(cmd->hdr.type);
+ size_t pdu_size = sizeof(*term_pdu) + cmd_size;
+ struct msghdr msg = { .msg_flags = MSG_EOR };
+ __le32 fei;
+ struct bio_vec bvec;
+
+ if (!cmd_size)
+ return;
+
+ term_pdu = kzalloc(pdu_size, GFP_KERNEL);
+ if (!term_pdu)
+ return;
+
+ switch (fes) {
+ case NVME_TCP_FES_INVALID_PDU_HDR:
+ case NVME_TCP_FES_UNSUPPORTED_PARAM:
+ fei = field_offset;
+ break;
+ case NVME_TCP_FES_HDR_DIGEST_ERR:
+ fei = le32_to_cpu(*(__le32 *)((u8 *)cmd + cmd->hdr.hlen));
+ break;
+ default:
+ fei = 0;
+ break;
+ }
+ term_pdu->feil = cpu_to_le16(lower_16_bits(fei));
+ term_pdu->feiu = cpu_to_le16(upper_16_bits(fei));
+ term_pdu->fes = cpu_to_le16(fes);
+
+ memcpy((u8 *)term_pdu + sizeof(*term_pdu), cmd, cmd_size);
+
+ term_pdu->hdr.type = nvme_tcp_c2h_term;
+ term_pdu->hdr.flags = 0;
+ term_pdu->hdr.hlen = sizeof(*term_pdu);
+ term_pdu->hdr.plen = cpu_to_le32(pdu_size);
+
+ bvec_set_virt(&bvec, (void *)term_pdu, pdu_size);
+ iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, pdu_size);
+ sock_sendmsg(queue->sock, &msg);
+
+ kfree(term_pdu);
+}
+
static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
{
struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
@@ -1223,6 +1280,7 @@ static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
if (queue->hdr_digest &&
nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
+ nvmet_send_c2h_term(queue, NVME_TCP_FES_HDR_DIGEST_ERR, 0);
nvmet_tcp_fatal_error(queue); /* fatal */
return -EPROTO;
}
--
2.43.5
More information about the Linux-nvme
mailing list