[PATCH V2 8/8] nvme-core: fix nvme_submit_sync_cmd() args

Chaitanya Kulkarni kch at nvidia.com
Sun Mar 26 23:04:18 PDT 2023


The __nvme_submit_sync_cmd() function currently has a long list of
eight arguments, which can make it difficult to read and debug,
particularly when the function is commonly used from different parts
of the codebase. This complexity can also increase when extending the
functionality of the function, resulting in more arguments being added.

To address this issue, we can create a new structure called
nvme_submit_cmd_args and move all the existing arguments into this
structure. By doing this, we can pass the structure as a single argument
to the __nvme_submit_sync_cmd() function. This approach simplifies the
code and makes it more readable, and also provides a way to add future
arguments to the structure without increasing the number of function
arguments.

This pattern of using a structure for arguments is commonly used in the
block layer code:-

blk_mq_rq_cache_fill()
	struct blk_mq_allocate_data data = { ... }
	__blk_mq_alloc_request(data)
		blk_mq_rq_ctx_init(data)

blk_mq_alloc_and_init_hctx()
	blk_mq_alloc_request_hctx()
		struct blk_mq_allocate_data data = { ... }
 		blk_mq_rq_ctx_init(data)

Signed-off-by: Chaitanya Kulkarni <kch at nvidia.com>
---
 drivers/nvme/host/auth.c    | 13 ++++++--
 drivers/nvme/host/core.c    | 61 +++++++++++++++++++++++++------------
 drivers/nvme/host/fabrics.c | 60 ++++++++++++++++++++++++++++--------
 drivers/nvme/host/nvme.h    | 16 +++++++---
 4 files changed, 112 insertions(+), 38 deletions(-)

diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index ea16a0aba679..a18c0c7ce0b7 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -64,6 +64,15 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
 	struct nvme_command cmd = {};
 	blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid);
 	struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid);
+	struct nvme_submit_cmd_args d = {
+		.q = q,
+		.cmd = &cmd,
+		.buffer = data,
+		.bufflen = data_len,
+		.qid = qid == 0 ? NVME_QID_ANY : qid,
+		.at_head = false,
+		.flags = flags,
+	};
 	int ret;
 
 	cmd.auth_common.opcode = nvme_fabrics_command;
@@ -78,9 +87,7 @@ static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
 		cmd.auth_receive.al = cpu_to_le32(data_len);
 	}
 
-	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
-				     qid == 0 ? NVME_QID_ANY : qid,
-				     0, flags);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (ret > 0)
 		dev_warn(ctrl->device,
 			"qid %d auth_send failed with status %d\n", qid, ret);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index fda851e8b7c6..105c5655410f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1025,32 +1025,30 @@ EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
  * if the result is positive, it's an NVM Express status code
  */
-int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-		union nvme_result *result, void *buffer, unsigned bufflen,
-		int qid, int at_head, blk_mq_req_flags_t flags)
+int __nvme_submit_sync_cmd(struct nvme_submit_cmd_args *d)
 {
 	struct request *req;
 	int ret;
 
-	if (qid == NVME_QID_ANY)
-		req = blk_mq_alloc_request(q, nvme_req_op(cmd), flags);
+	if (d->qid == NVME_QID_ANY)
+		req = blk_mq_alloc_request(d->q, nvme_req_op(d->cmd), d->flags);
 	else
-		req = blk_mq_alloc_request_hctx(q, nvme_req_op(cmd), flags,
-						qid - 1);
+		req = blk_mq_alloc_request_hctx(d->q, nvme_req_op(d->cmd), d->flags,
+						d->qid - 1);
 
 	if (IS_ERR(req))
 		return PTR_ERR(req);
-	nvme_init_request(req, cmd);
+	nvme_init_request(req, d->cmd);
 
-	if (buffer && bufflen) {
-		ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL);
+	if (d->buffer && d->bufflen) {
+		ret = blk_rq_map_kern(d->q, req, d->buffer, d->bufflen, GFP_KERNEL);
 		if (ret)
 			goto out;
 	}
 
-	ret = nvme_execute_rq(req, at_head);
-	if (result && ret >= 0)
-		*result = nvme_req(req)->result;
+	ret = nvme_execute_rq(req, d->at_head);
+	if (d->result && ret >= 0)
+		*(d->result) = nvme_req(req)->result;
  out:
 	blk_mq_free_request(req);
 	return ret;
@@ -1060,8 +1058,16 @@ EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd);
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void *buffer, unsigned bufflen)
 {
-	return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen,
-			NVME_QID_ANY, 0, 0);
+	struct nvme_submit_cmd_args d = {
+		.q = q,
+		.cmd = cmd,
+		.buffer = buffer,
+		.bufflen = bufflen,
+		.qid = NVME_QID_ANY,
+		.at_head = false,
+		.flags = 0,
+	};
+	return __nvme_submit_sync_cmd(&d);
 }
 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
 
@@ -1480,14 +1486,23 @@ static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
 {
 	union nvme_result res = { 0 };
 	struct nvme_command c = { };
+	struct nvme_submit_cmd_args d = {
+		.q = dev->admin_q,
+		.cmd = &c,
+		.result = &res,
+		.buffer = buffer,
+		.bufflen = buflen,
+		.qid = NVME_QID_ANY,
+		.at_head = false,
+		.flags = 0,
+	};
 	int ret;
 
 	c.features.opcode = op;
 	c.features.fid = cpu_to_le32(fid);
 	c.features.dword11 = cpu_to_le32(dword11);
 
-	ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &res,
-			buffer, buflen, NVME_QID_ANY, 0, 0);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (ret >= 0 && result)
 		*result = le32_to_cpu(res.u32);
 	return ret;
@@ -2209,6 +2224,15 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
 {
 	struct nvme_ctrl *ctrl = data;
 	struct nvme_command cmd = { };
+	struct nvme_submit_cmd_args d = {
+		.q = ctrl->admin_q,
+		.cmd = &cmd,
+		.buffer = buffer,
+		.bufflen = len,
+		.qid = NVME_QID_ANY,
+		.at_head = true,
+		.flags = 0,
+	};
 
 	if (send)
 		cmd.common.opcode = nvme_admin_security_send;
@@ -2218,8 +2242,7 @@ static int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t l
 	cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
 	cmd.common.cdw11 = cpu_to_le32(len);
 
-	return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
-			NVME_QID_ANY, 1, 0);
+	return __nvme_submit_sync_cmd(&d);
 }
 
 static void nvme_configure_opal(struct nvme_ctrl *ctrl, bool was_suspended)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index bbaa04a0c502..4036da49a18d 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -146,14 +146,21 @@ int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val)
 {
 	struct nvme_command cmd = { };
 	union nvme_result res;
+	struct nvme_submit_cmd_args d = {
+		.q = ctrl->fabrics_q,
+		.cmd = &cmd,
+		.result = &res,
+		.qid = NVME_QID_ANY,
+		.at_head = false,
+		.flags = 0,
+	};
 	int ret;
 
 	cmd.prop_get.opcode = nvme_fabrics_command;
 	cmd.prop_get.fctype = nvme_fabrics_type_property_get;
 	cmd.prop_get.offset = cpu_to_le32(off);
 
-	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
-			NVME_QID_ANY, 0, 0);
+	ret = __nvme_submit_sync_cmd(&d);
 
 	if (ret >= 0)
 		*val = le64_to_cpu(res.u64);
@@ -191,6 +198,14 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 {
 	struct nvme_command cmd = { };
 	union nvme_result res;
+	struct nvme_submit_cmd_args d = {
+		.q = ctrl->fabrics_q,
+		.cmd = &cmd,
+		.result = &res,
+		.qid = NVME_QID_ANY,
+		.at_head = false,
+		.flags = 0,
+	};
 	int ret;
 
 	cmd.prop_get.opcode = nvme_fabrics_command;
@@ -198,8 +213,7 @@ int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 	cmd.prop_get.attrib = 1;
 	cmd.prop_get.offset = cpu_to_le32(off);
 
-	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res, NULL, 0,
-			NVME_QID_ANY, 0, 0);
+	ret = __nvme_submit_sync_cmd(&d);
 
 	if (ret >= 0)
 		*val = le64_to_cpu(res.u64);
@@ -235,6 +249,13 @@ EXPORT_SYMBOL_GPL(nvmf_reg_read64);
 int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
 {
 	struct nvme_command cmd = { };
+	struct nvme_submit_cmd_args d = {
+		.q = ctrl->fabrics_q,
+		.cmd = &cmd,
+		.qid = NVME_QID_ANY,
+		.at_head = false,
+		.flags = 0,
+	};
 	int ret;
 
 	cmd.prop_set.opcode = nvme_fabrics_command;
@@ -243,8 +264,7 @@ int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
 	cmd.prop_set.offset = cpu_to_le32(off);
 	cmd.prop_set.value = cpu_to_le64(val);
 
-	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, NULL, NULL, 0,
-			NVME_QID_ANY, 0, 0);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (unlikely(ret))
 		dev_err(ctrl->device,
 			"Property Set error: %d, offset %#x\n",
@@ -374,6 +394,15 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	struct nvme_command cmd = { };
 	union nvme_result res;
 	struct nvmf_connect_data *data;
+	struct nvme_submit_cmd_args d = {
+		.q = ctrl->fabrics_q,
+		.cmd = &cmd,
+		.result = &res,
+		.bufflen = sizeof(*data),
+		.qid = NVME_QID_ANY,
+		.at_head = true,
+		.flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT,
+	};
 	int ret;
 	u32 result;
 
@@ -394,14 +423,13 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
 	if (!data)
 		return -ENOMEM;
 
+	d.buffer = data;
 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
 	data->cntlid = cpu_to_le16(0xffff);
 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
-	ret = __nvme_submit_sync_cmd(ctrl->fabrics_q, &cmd, &res,
-			data, sizeof(*data), NVME_QID_ANY, 1,
-			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (ret) {
 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
 				       &cmd, data);
@@ -465,6 +493,15 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 	struct nvme_command cmd = { };
 	struct nvmf_connect_data *data;
 	union nvme_result res;
+	struct nvme_submit_cmd_args d = {
+		.q = ctrl->connect_q,
+		.cmd = &cmd,
+		.result = &res,
+		.bufflen = sizeof(*data),
+		.qid = qid,
+		.at_head = true,
+		.flags = BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT,
+	};
 	int ret;
 	u32 result;
 
@@ -480,14 +517,13 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
 	if (!data)
 		return -ENOMEM;
 
+	d.buffer = data;
 	uuid_copy(&data->hostid, &ctrl->opts->host->id);
 	data->cntlid = cpu_to_le16(ctrl->cntlid);
 	strncpy(data->subsysnqn, ctrl->opts->subsysnqn, NVMF_NQN_SIZE);
 	strncpy(data->hostnqn, ctrl->opts->host->nqn, NVMF_NQN_SIZE);
 
-	ret = __nvme_submit_sync_cmd(ctrl->connect_q, &cmd, &res,
-			data, sizeof(*data), qid, 1,
-			BLK_MQ_REQ_RESERVED | BLK_MQ_REQ_NOWAIT);
+	ret = __nvme_submit_sync_cmd(&d);
 	if (ret) {
 		nvmf_log_connect_error(ctrl, ret, le32_to_cpu(res.u32),
 				       &cmd, data);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bf46f122e9e1..0b0b6e0468aa 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -811,12 +811,20 @@ static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
 		(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
 }
 
+struct nvme_submit_cmd_args {
+	struct request_queue *q;
+	struct nvme_command *cmd;
+	union nvme_result *result;
+	void *buffer;
+	unsigned int bufflen;
+	int qid;
+	bool at_head;
+	blk_mq_req_flags_t flags;
+};
+
 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 		void *buf, unsigned bufflen);
-int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
-		union nvme_result *result, void *buffer, unsigned bufflen,
-		int qid, int at_head,
-		blk_mq_req_flags_t flags);
+int __nvme_submit_sync_cmd(struct nvme_submit_cmd_args *d);
 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
 		      unsigned int dword11, void *buffer, size_t buflen,
 		      u32 *result);
-- 
2.29.0




More information about the Linux-nvme mailing list