[PATCH 3/5] nvme: move result handling into nvme_execute_rq()
Hannes Reinecke
hare at suse.de
Thu Feb 9 06:38:18 PST 2023
and simplify the calling convention.
Signed-off-by: Hannes Reinecke <hare at suse.de>
---
drivers/nvme/host/core.c | 16 ++++++++++------
drivers/nvme/host/ioctl.c | 5 +++--
drivers/nvme/host/nvme.h | 3 ++-
drivers/nvme/target/passthru.c | 3 +--
4 files changed, 16 insertions(+), 11 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 78da9c6cbba8..11faebe87764 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1004,15 +1004,21 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* >0: nvme controller's cqe status response
* <0: kernel error in lieu of controller response
*/
-int nvme_execute_rq(struct request *rq, bool at_head)
+int nvme_execute_rq(struct request *rq, union nvme_result *result,
+ bool at_head)
{
blk_status_t status;
+ int ret;
status = blk_execute_rq(rq, at_head);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
return -EINTR;
- if (nvme_req(rq)->status)
- return nvme_req(rq)->status;
+ ret = nvme_req(rq)->status;
+ if (ret) {
+ if (result && ret >= 0)
+ *result = nvme_req(rq)->result;
+ return ret;
+ }
return blk_status_to_errno(status);
}
EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
@@ -1048,9 +1054,7 @@ int __nvme_submit_sync_cmd(struct request *req, union nvme_result *result,
if (ret)
goto out;
- ret = nvme_execute_rq(req, at_head);
- if (result && ret >= 0)
- *result = nvme_req(req)->result;
+ ret = nvme_execute_rq(req, result, at_head);
out:
blk_mq_free_request(req);
return ret;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 723e7d5b778f..cdba543cbd02 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -220,6 +220,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
u64 *result, unsigned timeout, unsigned int flags)
{
struct nvme_ns *ns = q->queuedata;
+ union nvme_result res;
struct nvme_ctrl *ctrl;
struct request *req;
void *meta = NULL;
@@ -243,9 +244,9 @@ static int nvme_submit_user_cmd(struct request_queue *q,
ctrl = nvme_req(req)->ctrl;
effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- ret = nvme_execute_rq(req, false);
+ ret = nvme_execute_rq(req, &res, false);
if (result)
- *result = le64_to_cpu(nvme_req(req)->result.u64);
+ *result = le64_to_cpu(res.u64);
if (meta)
ret = nvme_finish_user_metadata(req, meta_buffer, meta,
meta_len, ret);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9b4b3bb69e27..a63650b00d1a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -1071,7 +1071,8 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
-int nvme_execute_rq(struct request *rq, bool at_head);
+int nvme_execute_rq(struct request *rq, union nvme_result *result,
+ bool at_head);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index aae6d5bb4fd8..f5ab4d7a8c1f 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -221,7 +221,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
int status;
effects = nvme_passthru_start(ctrl, ns, req->cmd->common.opcode);
- status = nvme_execute_rq(rq, false);
+ status = nvme_execute_rq(rq, &req->cqe->result, false);
if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
@@ -238,7 +238,6 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
} else if (status < 0)
status = NVME_SC_INTERNAL;
- req->cqe->result = nvme_req(rq)->result;
nvmet_req_complete(req, status);
blk_mq_free_request(rq);
--
2.35.3
More information about the Linux-nvme
mailing list