[PATCHv2 2/2] nvme-pci: remove cached shadow doorbell offsets
Keith Busch
kbusch at kernel.org
Thu Oct 14 09:45:43 PDT 2021
Real nvme hardware doesn't support the shadow doorbell feature. Remove
the overhead of saving this special feature per-queue and instead obtain
the address offsets from device providing it.
And when this feature is in use, the specification requires all queue
updates use this mechanism, so don't don't treat the admin queue
differently.
Signed-off-by: Keith Busch <kbusch at kernel.org>
---
drivers/nvme/host/pci.c | 100 ++++++++++++++++------------------------
1 file changed, 41 insertions(+), 59 deletions(-)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9dd173bfa57b..65c0e925944c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -209,10 +209,6 @@ struct nvme_queue {
#define NVMEQ_SQ_CMB 1
#define NVMEQ_DELETE_ERROR 2
#define NVMEQ_POLLED 3
- u32 *dbbuf_sq_db;
- u32 *dbbuf_cq_db;
- u32 *dbbuf_sq_ei;
- u32 *dbbuf_cq_ei;
struct completion delete_done;
};
@@ -289,29 +285,6 @@ static void nvme_dbbuf_dma_free(struct nvme_dev *dev)
}
}
-static void nvme_dbbuf_init(struct nvme_dev *dev,
- struct nvme_queue *nvmeq, int qid)
-{
- if (!dev->dbbuf_dbs || !qid)
- return;
-
- nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)];
- nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)];
- nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)];
- nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)];
-}
-
-static void nvme_dbbuf_free(struct nvme_queue *nvmeq)
-{
- if (!nvmeq->qid)
- return;
-
- nvmeq->dbbuf_sq_db = NULL;
- nvmeq->dbbuf_cq_db = NULL;
- nvmeq->dbbuf_sq_ei = NULL;
- nvmeq->dbbuf_cq_ei = NULL;
-}
-
static void nvme_dbbuf_set(struct nvme_dev *dev)
{
struct nvme_command c = { };
@@ -328,13 +301,10 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
dev_warn(dev->ctrl.device, "unable to set dbbuf\n");
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
-
- for (i = 1; i <= dev->online_queues; i++)
- nvme_dbbuf_free(&dev->queues[i]);
}
}
-static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
+static inline bool nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
{
return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old);
}
@@ -343,31 +313,48 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
volatile u32 *dbbuf_ei)
{
- if (dbbuf_db) {
- u16 old_value;
+ u16 old_value;
- /*
- * Ensure that the queue is written before updating
- * the doorbell in memory
- */
- wmb();
+ /*
+ * Ensure that the queue is written before updating the doorbell in
+ * memory
+ */
+ wmb();
- old_value = *dbbuf_db;
- *dbbuf_db = value;
+ old_value = *dbbuf_db;
+ *dbbuf_db = value;
- /*
- * Ensure that the doorbell is updated before reading the event
- * index from memory. The controller needs to provide similar
- * ordering to ensure the envent index is updated before reading
- * the doorbell.
- */
- mb();
+ /*
+ * Ensure that the doorbell is updated before reading the event index
+ * from memory. The controller needs to provide similar ordering to
+ * ensure the envent index is updated before reading the doorbell.
+ */
+ mb();
+ return nvme_dbbuf_need_event(*dbbuf_ei, value, old_value);
+}
- if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
- return false;
- }
+static bool nvme_dbbuf_update_sq(struct nvme_queue *nvmeq)
+{
+ struct nvme_dev *dev = nvmeq->dev;
- return true;
+ if (!dev->dbbuf_dbs)
+ return true;
+
+ return nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
+ &dev->dbbuf_dbs[sq_idx(nvmeq->qid, dev->db_stride)],
+ &dev->dbbuf_eis[sq_idx(nvmeq->qid, dev->db_stride)]);
+}
+
+static bool nvme_dbbuf_update_cq(struct nvme_queue *nvmeq)
+{
+ struct nvme_dev *dev = nvmeq->dev;
+
+ if (!dev->dbbuf_dbs)
+ return true;
+
+ return nvme_dbbuf_update_and_check_event(nvmeq->cq_head,
+ &dev->dbbuf_dbs[cq_idx(nvmeq->qid, dev->db_stride)],
+ &dev->dbbuf_eis[cq_idx(nvmeq->qid, dev->db_stride)]);
}
/*
@@ -494,8 +481,7 @@ static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
return;
}
- if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail,
- nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei))
+ if (nvme_dbbuf_update_sq(nvmeq))
writel(nvmeq->sq_tail, nvmeq->q_db);
nvmeq->last_sq_tail = nvmeq->sq_tail;
}
@@ -989,11 +975,8 @@ static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
{
- u16 head = nvmeq->cq_head;
-
- if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db,
- nvmeq->dbbuf_cq_ei))
- writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
+ if (nvme_dbbuf_update_cq(nvmeq))
+ writel(nvmeq->cq_head, nvmeq->q_db + nvmeq->dev->db_stride);
}
static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
@@ -1556,7 +1539,6 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq));
- nvme_dbbuf_init(dev, nvmeq, qid);
dev->online_queues++;
wmb(); /* ensure the first interrupt sees the initialization */
}
--
2.25.4
More information about the Linux-nvme
mailing list