[PATCH AUTOSEL 6.6 16/16] nvmet: always initialize cqe.result
Sasha Levin
sashal at kernel.org
Sun Jun 23 06:44:45 PDT 2024
From: Daniel Wagner <dwagner at suse.de>
[ Upstream commit cd0c1b8e045a8d2785342b385cb2684d9b48e426 ]
The spec doesn't mandate that the first two double words (aka results)
for the command queue entry need to be set to 0 when they are not
used (not specified). Though, the target implemention returns 0 for TCP
and FC but not for RDMA.
Let's make RDMA behave the same and thus explicitly initializing the
result field. This prevents leaking any data from the stack.
Signed-off-by: Daniel Wagner <dwagner at suse.de>
Reviewed-by: Christoph Hellwig <hch at lst.de>
Signed-off-by: Keith Busch <kbusch at kernel.org>
Signed-off-by: Sasha Levin <sashal at kernel.org>
---
drivers/nvme/target/core.c | 1 +
drivers/nvme/target/fabrics-cmd-auth.c | 3 ---
drivers/nvme/target/fabrics-cmd.c | 6 ------
3 files changed, 1 insertion(+), 9 deletions(-)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ce7e945cb4f7e..61bf1bc1ac243 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -936,6 +936,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->metadata_sg_cnt = 0;
req->transfer_len = 0;
req->metadata_len = 0;
+ req->cqe->result.u64 = 0;
req->cqe->status = 0;
req->cqe->sq_head = 0;
req->ns = NULL;
diff --git a/drivers/nvme/target/fabrics-cmd-auth.c b/drivers/nvme/target/fabrics-cmd-auth.c
index 1d9854484e2e8..c103eba96350e 100644
--- a/drivers/nvme/target/fabrics-cmd-auth.c
+++ b/drivers/nvme/target/fabrics-cmd-auth.c
@@ -332,7 +332,6 @@ void nvmet_execute_auth_send(struct nvmet_req *req)
pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
__func__, ctrl->cntlid, req->sq->qid,
status, req->error_loc);
- req->cqe->result.u64 = 0;
if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
@@ -515,8 +514,6 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
status = nvmet_copy_to_sgl(req, 0, d, al);
kfree(d);
done:
- req->cqe->result.u64 = 0;
-
if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
nvmet_auth_sq_free(req->sq);
else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index d8da840a1c0ed..fa9e8dc921539 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -225,9 +225,6 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
@@ -305,9 +302,6 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (status)
goto out;
- /* zero out initial completion result, assign values as needed */
- req->cqe->result.u32 = 0;
-
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
le16_to_cpu(c->recfmt));
--
2.43.0
More information about the Linux-nvme
mailing list