[PATCH 3/4] nvme: store the actual queue size in ctrl->sqsize
Christoph Hellwig
hch at lst.de
Sun Dec 25 02:32:33 PST 2022
Convert from the strange on the wire 0s based value to a regular size
when reading the filed to avoid adjustments all over.
Signed-off-by: Christoph Hellwig <hch at lst.de>
---
drivers/nvme/host/apple.c | 3 +--
drivers/nvme/host/core.c | 19 +++++++++++++++++--
drivers/nvme/host/fabrics.c | 2 +-
drivers/nvme/host/fc.c | 12 ++++++------
drivers/nvme/host/pci.c | 4 ++--
drivers/nvme/host/rdma.c | 23 +++++++++++------------
drivers/nvme/host/tcp.c | 12 ++++++------
7 files changed, 44 insertions(+), 31 deletions(-)
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index e36aeb50b4edc1..073a1e8c021c14 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1080,8 +1080,7 @@ static void apple_nvme_reset_work(struct work_struct *work)
writeq(anv->ioq.tcb_dma_addr,
anv->mmio_nvme + APPLE_NVMMU_IOSQ_TCB_BASE);
- anv->ctrl.sqsize =
- APPLE_ANS_MAX_QUEUE_DEPTH - 1; /* 0's based queue depth */
+ anv->ctrl.sqsize = APPLE_ANS_MAX_QUEUE_DEPTH;
anv->ctrl.cap = readq(anv->mmio_nvme + NVME_REG_CAP);
dev_dbg(anv->dev, "Enabling controller now");
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cda1361e6d4fbb..f2b7bb8bcc31fa 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3242,7 +3242,8 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
return ret;
}
- ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
+ /* CAP.MQES is 0s based */
+ ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap) - 1, ctrl->sqsize);
if (ctrl->vs >= NVME_VS(1, 1, 0))
ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
@@ -3505,9 +3506,18 @@ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
nvme_show_int_function(cntlid);
nvme_show_int_function(numa_node);
nvme_show_int_function(queue_count);
-nvme_show_int_function(sqsize);
nvme_show_int_function(kato);
+static ssize_t sqsize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ /* Report the 0s based register value for historic reasons: */
+ return sysfs_emit(buf, "%d\n", ctrl->sqsize - 1);
+}
+static DEVICE_ATTR(sqsize, S_IRUGO, sqsize_show, NULL);
+
static ssize_t nvme_sysfs_delete(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -4897,7 +4907,12 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
memset(set, 0, sizeof(*set));
set->ops = ops;
+
+ /*
+ * Leave an empty slot in the SQ to deal with queue wrap conditions.
+ */
set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
+
/*
* Some Apple controllers requires tags to be unique across admin and
* the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ce27276f552dad..bb3c611136c90c 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -464,7 +464,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
cmd.connect.opcode = nvme_fabrics_command;
cmd.connect.fctype = nvme_fabrics_type_connect;
cmd.connect.qid = cpu_to_le16(qid);
- cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize);
+ cmd.connect.sqsize = cpu_to_le16(ctrl->sqsize - 1);
if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 4564f16a0b203c..81278750b8c45d 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2922,11 +2922,11 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
return ret;
- ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize);
if (ret)
goto out_cleanup_tagset;
- ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize);
if (ret)
goto out_delete_hw_queues;
@@ -2982,11 +2982,11 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
}
- ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize);
if (ret)
goto out_free_io_queues;
- ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
+ ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize);
if (ret)
goto out_delete_hw_queues;
@@ -3148,7 +3148,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
"to maxcmd\n",
opts->queue_size, ctrl->ctrl.maxcmd);
opts->queue_size = ctrl->ctrl.maxcmd;
- ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.sqsize = opts->queue_size;
}
ret = nvme_fc_init_aen_ops(ctrl);
@@ -3509,7 +3509,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
lport->ops->max_hw_queues);
ctrl->ctrl.queue_count++; /* +1 for admin queue */
- ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.sqsize = opts->queue_size;
ctrl->ctrl.kato = opts->kato;
ctrl->ctrl.cntlid = 0xffff;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b13baccedb4a95..a621dd7a0a8efa 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2335,7 +2335,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
sizeof(struct nvme_command));
if (result > 0) {
dev->q_depth = result;
- dev->ctrl.sqsize = result - 1;
+ dev->ctrl.sqsize = result;
} else {
dev->cmb_use_sqes = false;
}
@@ -2579,7 +2579,7 @@ static int nvme_pci_enable(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
dev->q_depth);
}
- dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
+ dev->ctrl.sqsize = dev->q_depth;
nvme_map_cmb(dev);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index bbad26b82b56dd..26306f7b80c2e9 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -773,8 +773,7 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
}
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
- ret = nvme_rdma_alloc_queue(ctrl, i,
- ctrl->ctrl.sqsize + 1);
+ ret = nvme_rdma_alloc_queue(ctrl, i, ctrl->ctrl.sqsize);
if (ret)
goto out_free_queues;
}
@@ -1059,24 +1058,24 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
goto destroy_admin;
}
- if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
+ if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize) {
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
- ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
+ ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize);
}
- if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ if (ctrl->ctrl.sqsize > NVME_RDMA_MAX_QUEUE_SIZE) {
dev_warn(ctrl->ctrl.device,
"ctrl sqsize %u > max queue size %u, clamping down\n",
- ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
- ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ ctrl->ctrl.sqsize, NVME_RDMA_MAX_QUEUE_SIZE);
+ ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE;
}
- if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+ if (ctrl->ctrl.sqsize > ctrl->ctrl.maxcmd) {
dev_warn(ctrl->ctrl.device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
- ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
- ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
+ ctrl->ctrl.sqsize, ctrl->ctrl.maxcmd);
+ ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd;
}
if (ctrl->ctrl.sgls & (1 << 20))
@@ -1891,7 +1890,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
* 1's based representation of sqsize.
*/
priv.hrqsize = cpu_to_le16(queue->queue_size);
- priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
+ priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize - 1);
}
ret = rdma_connect_locked(queue->cm_id, ¶m);
@@ -2338,7 +2337,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
opts->nr_poll_queues + 1;
- ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.sqsize = opts->queue_size;
ctrl->ctrl.kato = opts->kato;
ret = -ENOMEM;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index b69b89166b6b96..37a96cbd8929db 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2047,16 +2047,16 @@ static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
goto destroy_admin;
}
- if (opts->queue_size > ctrl->sqsize + 1)
+ if (opts->queue_size > ctrl->sqsize)
dev_warn(ctrl->device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
- opts->queue_size, ctrl->sqsize + 1);
+ opts->queue_size, ctrl->sqsize);
- if (ctrl->sqsize + 1 > ctrl->maxcmd) {
+ if (ctrl->sqsize > ctrl->maxcmd) {
dev_warn(ctrl->device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
- ctrl->sqsize + 1, ctrl->maxcmd);
- ctrl->sqsize = ctrl->maxcmd - 1;
+ ctrl->sqsize, ctrl->maxcmd);
+ ctrl->sqsize = ctrl->maxcmd;
}
if (ctrl->queue_count > 1) {
@@ -2562,7 +2562,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
ctrl->ctrl.opts = opts;
ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
opts->nr_poll_queues + 1;
- ctrl->ctrl.sqsize = opts->queue_size - 1;
+ ctrl->ctrl.sqsize = opts->queue_size;
ctrl->ctrl.kato = opts->kato;
INIT_DELAYED_WORK(&ctrl->connect_work,
--
2.35.1
More information about the Linux-nvme
mailing list