[PATCH RFC v3 10/11] nvmet: command tracking
Maurizio Lombardi
mlombard at redhat.com
Mon Mar 24 03:23:09 PDT 2025
From: Chris Leech <cleech at redhat.com>
Enable tracking of all outstanding requests in an XArray
Signed-off-by: Chris Leech <cleech at redhat.com>
---
drivers/nvme/target/core.c | 29 ++++++++++++++++++++++++++++-
drivers/nvme/target/nvmet.h | 4 ++++
2 files changed, 32 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 5622b0608d55..b446db3804f1 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -809,10 +809,23 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
nvmet_put_namespace(ns);
}
+#if IS_ENABLED(CONFIG_NVME_TARGET_DELAY_REQUESTS)
+static void nvmet_delayed_execute_req(struct work_struct *work);
+#endif
+
void nvmet_req_complete(struct nvmet_req *req, u16 status)
{
struct nvmet_sq *sq = req->sq;
+#if IS_ENABLED(CONFIG_NVME_TARGET_DELAY_REQUESTS)
+ unsigned long flags;
+ /* only need to update the xarray if this was a delayed request */
+ if (req->req_work.work.func == nvmet_delayed_execute_req) {
+ xa_lock_irqsave(&sq->outstanding_requests, flags);
+ __xa_erase(&sq->outstanding_requests, req->cmd->common.command_id);
+ xa_unlock_irqrestore(&sq->outstanding_requests, flags);
+ }
+#endif
__nvmet_req_complete(req, status);
percpu_ref_put(&sq->ref);
}
@@ -984,7 +997,9 @@ int nvmet_sq_init(struct nvmet_sq *sq)
init_completion(&sq->free_done);
init_completion(&sq->confirm_done);
nvmet_auth_sq_init(sq);
-
+#if IS_ENABLED(CONFIG_NVME_TARGET_DELAY_REQUESTS)
+ xa_init_flags(&sq->outstanding_requests, XA_FLAGS_LOCK_IRQ);
+#endif
return 0;
}
EXPORT_SYMBOL_GPL(nvmet_sq_init);
@@ -1766,6 +1781,7 @@ void nvmet_execute_request(struct nvmet_req *req) {
struct nvmet_ctrl *ctrl = req->sq->ctrl;
int delay_count;
u32 delay_msec;
+ unsigned long flags;
if (unlikely(req->sq->qid == 0))
return req->execute(req);
@@ -1777,6 +1793,17 @@ void nvmet_execute_request(struct nvmet_req *req) {
if (!(ctrl && delay_count && delay_msec))
return req->execute(req);
+ xa_lock_irqsave(&req->sq->outstanding_requests, flags);
+ int ret = __xa_insert(&req->sq->outstanding_requests,
+ req->cmd->common.command_id, req, GFP_KERNEL);
+ xa_unlock_irqrestore(&req->sq->outstanding_requests, flags);
+
+ if (ret) {
+ pr_err("nvmet: failure to delay command %d",
+ req->cmd->common.command_id);
+ return req->execute(req);
+ }
+
INIT_DELAYED_WORK(&req->req_work, nvmet_delayed_execute_req);
queue_delayed_work(nvmet_wq, &req->req_work, msecs_to_jiffies(delay_msec));
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 1bfdddceda15..fc61f28c63da 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -172,6 +172,9 @@ struct nvmet_sq {
#endif
struct completion free_done;
struct completion confirm_done;
+#if IS_ENABLED(CONFIG_NVME_TARGET_DELAY_REQUESTS)
+ struct xarray outstanding_requests;
+#endif
};
struct nvmet_ana_group {
@@ -573,6 +576,7 @@ size_t nvmet_req_transfer_len(struct nvmet_req *req);
bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
+void nvmet_req_complete_delayed(struct nvmet_req *req, u16 status);
int nvmet_req_alloc_sgls(struct nvmet_req *req);
void nvmet_req_free_sgls(struct nvmet_req *req);
--
2.43.5
More information about the Linux-nvme
mailing list