[PATCH 1/1] nvmet: allow user to set req alloc flag
Chaitanya Kulkarni
chaitanya.kulkarni at wdc.com
Mon Oct 19 21:44:17 EDT 2020
By default, we set the passthru request allocation flag such that it
returns the error in the following code patch and we fail the I/O when
BLK_MQ_REQ_NOWAIT is set:-
nvme_alloc_request()
 blk_mq_alloc_request()
 blk_mq_queue_enter()
  if (flag & BLK_MQ_REQ_NOWAIT)
    return -EBUSY; <-- return if busy.
On some controllers using BLK_MQ_REQ_NOWAIT ends up in I/O error where
the controller is perfectly healthy and not in a degraded state.
Block layer request allocation does allow us to wait instead of
immediately returning the error when we BLK_MQ_REQ_NOWAIT flag is not
used. This has shown to fix the I/O error problem reported under
heavy random write workload.
This patch fixes the problem with the request allocation by adding
a new configfs attribute so that user can optionally decide whether
to use BLK_MQ_REQ_NOWAIT or not. We retain the default behavior by
using BLK_MQ_REQ_NOWAIT when creating the nvmet passthru subsystem.
Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni at wdc.com>
---
drivers/nvme/target/configfs.c | 36 ++++++++++++++++++++++++++++++++++
drivers/nvme/target/core.c | 3 +++
drivers/nvme/target/nvmet.h | 1 +
drivers/nvme/target/passthru.c | 3 ++-
4 files changed, 42 insertions(+), 1 deletion(-)
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 37e1d7784e17..3892b8fb0ff6 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/blk-mq.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/ctype.h>
@@ -736,9 +737,44 @@ static ssize_t nvmet_passthru_enable_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_passthru_, enable);
+static ssize_t nvmet_passthru_req_nowait_show(struct config_item *item,
+ char *page)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", subsys->req_nowait);
+}
+
+static ssize_t nvmet_passthru_req_nowait_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ bool req_nowait;
+ int ret = 0;
+
+ mutex_lock(&subsys->lock);
+ if (subsys->passthru_ctrl) {
+ pr_err("disable passthru ctrl before setting req_nowait\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (strtobool(page, &req_nowait)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ subsys->req_nowait = req_nowait ? BLK_MQ_REQ_NOWAIT : 0;
+out:
+ mutex_unlock(&subsys->lock);
+ return ret ? ret : count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, req_nowait);
+
static struct configfs_attribute *nvmet_passthru_attrs[] = {
&nvmet_passthru_attr_device_path,
&nvmet_passthru_attr_enable,
+ &nvmet_passthru_attr_req_nowait,
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index aafcbc424b7a..50634945bd42 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1511,6 +1511,9 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
}
subsys->cntlid_min = NVME_CNTLID_MIN;
subsys->cntlid_max = NVME_CNTLID_MAX;
+#ifdef NVME_TARGET_PASSTHRU
+ subsys->req_nowait = BLK_MQ_REQ_NOWAIT;
+#endif
kref_init(&subsys->ref);
mutex_init(&subsys->lock);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 559a15ccc322..a7ca8f5860c9 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -249,6 +249,7 @@ struct nvmet_subsys {
struct nvme_ctrl *passthru_ctrl;
char *passthru_ctrl_path;
struct config_group passthru_group;
+ blk_mq_req_flags_t req_nowait;
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
};
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 56c571052216..7a2ce00f7a25 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -215,6 +215,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
{
+ blk_mq_req_flags_t req_nowait = req->sq->ctrl->subsys->req_nowait;
struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
struct request_queue *q = ctrl->admin_q;
struct nvme_ns *ns = NULL;
@@ -236,7 +237,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
q = ns->queue;
}
- rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY);
+ rq = nvme_alloc_request(q, req->cmd, req_nowait, NVME_QID_ANY);
if (IS_ERR(rq)) {
status = NVME_SC_INTERNAL;
goto out_put_ns;
--
2.22.1
More information about the Linux-nvme
mailing list