[PATCH 1/2] nvme: sanitize KATO setting
Hannes Reinecke
hare at suse.de
Thu Mar 11 10:37:26 GMT 2021
According to the NVMe base spec the KATO commands should be sent
at half of the KATO interval, to properly account for round-trip
times.
As we now will only ever send one KATO command per connection we
can easily use the recommended values.
This also fixes a potential issue where the request timeout for
the KATO command does not match the value in the connect command,
which might be causing spurious connection drops from the target.
Signed-off-by: Hannes Reinecke <hare at suse.de>
---
drivers/nvme/host/core.c | 6 +++---
drivers/nvme/host/fabrics.c | 4 +---
drivers/nvme/host/nvme.h | 16 +++++++++++++++-
3 files changed, 19 insertions(+), 7 deletions(-)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 82ad5eef9d0c..b56903982b27 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1223,7 +1223,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
}
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -1253,7 +1253,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");
ctrl->comp_seen = false;
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
return;
}
@@ -1270,7 +1270,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
- queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ nvme_queue_keep_alive_work(ctrl);
}
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 604ab0e5a2ad..13c2747e3d00 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -379,10 +379,8 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
/*
* Set keep-alive timeout in seconds granularity (ms * 1000)
- * and add a grace period for controller kato enforcement
*/
- cmd.connect.kato = ctrl->kato ?
- cpu_to_le32((ctrl->kato + NVME_KATO_GRACE) * 1000) : 0;
+ cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 07b34175c6ce..cc74b9f8d5ff 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -27,7 +27,15 @@ extern unsigned int admin_timeout;
#define NVME_ADMIN_TIMEOUT (admin_timeout * HZ)
#define NVME_DEFAULT_KATO 5
-#define NVME_KATO_GRACE 10
+
+/*
+ * Recommended frequency for KATO commands
+ *
+ * NVMe 1.4 section 7.12.1 states:
+ * The host should send Keep Alive commands at half of the
+ * Keep Alive Timeout accounting for transport roundtrip times [..].
+ */
+#define NVME_KATO_DELAY(k) (((k) > 1)?(k) >> 1:(k))
#ifdef CONFIG_ARCH_NO_SG_CHAIN
#define NVME_INLINE_SG_CNT 0
@@ -583,6 +591,12 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
}
+static inline void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
+{
+ queue_delayed_work(nvme_wq, &ctrl->ka_work,
+ NVME_KATO_DELAY(ctrl->kato * HZ));
+}
+
void nvme_complete_rq(struct request *req);
blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
--
2.29.2
More information about the Linux-nvme
mailing list