[PATCH 01/14] nvme-core: remove duplicate kfree in init identify

Chaitanya Kulkarni Chaitanya.Kulkarni at wdc.com
Wed Feb 24 17:35:49 EST 2021


On 2/24/21 08:35, Christoph Hellwig wrote:
> On Tue, Feb 16, 2021 at 04:10:19PM -0800, Chaitanya Kulkarni wrote:
>> In nvme_init_identify() once we initialize nvme_mpath_init() we free
>> the id with kfree(). This is needed since we just return from all the
>> following calls to nvme_mpath_init(), that also makes kfree() call
>> duplicate.
>>
>> Instead of returning after nvme_mpath_init() jump to the lable
>> out_free and remove the duplicate call to the kfree() since out_free
>> label already has one..
> Hmm.  I'd rather split nvme_init_identify.  Rename the publically
> called function to something like nvme_init_ctrl_finish, and
> the split out a nvme_init_identify for just the section that deals
> with the id_ns data.
>

How about following patch in [1] ? If you like [1] please suggest
the patch distribution. I'd add a prep patch to rename and second patch
to split the helper.

Shorten the original list to avoid churn, along with the documentation fixes
planning to keep following in V2, is that okay:-

  nvme-core: mark nvme_setup_passsthru() inline
  nvme-core: use likely in nvme_init_request()
  nvme-core: don't check nvme_req flags for new req
  nvme-fc: fix the function documentation comment
  nvmet-fc: update function documentation


[1] nvme-core: split out id_ns into helper

>From 4b4ec36b0c660fc54e22aa4d712db1b595b2e90d Mon Sep 17 00:00:00 2001
From: Chaitanya Kulkarni <chaitanya.kulkarni at wdc.com>
Date: Wed, 24 Feb 2021 14:14:05 -0800
Subject: [PATCH] nvme-core: split out id_ns into helper

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni at wdc.com>
---
 drivers/nvme/host/core.c   | 65 ++++++++++++++++++++------------------
 drivers/nvme/host/fc.c     |  2 +-
 drivers/nvme/host/nvme.h   |  2 +-
 drivers/nvme/host/pci.c    |  2 +-
 drivers/nvme/host/rdma.c   |  2 +-
 drivers/nvme/host/tcp.c    |  2 +-
 drivers/nvme/target/loop.c |  2 +-
 7 files changed, 41 insertions(+), 36 deletions(-)

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index e68a8c4ac5a6..68b0022bc60d 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1119,7 +1119,7 @@ static void nvme_passthru_end(struct nvme_ctrl
*ctrl, u32 effects)
         mutex_unlock(&ctrl->scan_lock);
     }
     if (effects & NVME_CMD_EFFECTS_CCC)
-        nvme_init_identify(ctrl);
+        nvme_init_ctrl_finish(ctrl);
     if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) {
         nvme_queue_scan(ctrl);
         flush_work(&ctrl->scan_work);
@@ -1978,7 +1978,7 @@ static void nvme_config_write_zeroes(struct
gendisk *disk, struct nvme_ns *ns)
      * In order to be more cautious use controller's max_hw_sectors value
      * to configure the maximum sectors for the write-zeroes which is
      * configured based on the controller's MDTS field in the
-     * nvme_init_identify() if available.
+     * nvme_init_ctrl_finish() if available.
      */
     if (ns->ctrl->max_hw_sectors == UINT_MAX)
         max_blocks = (u64)USHRT_MAX + 1;
@@ -3059,28 +3059,12 @@ static int nvme_get_effects_log(struct nvme_ctrl
*ctrl, u8 csi,
     return 0;
 }
 
-/*
- * Initialize the cached copies of the Identify data and various controller
- * register in our nvme_ctrl structure.  This should be called as soon as
- * the admin queue is fully up and running.
- */
 int nvme_init_identify(struct nvme_ctrl *ctrl)
 {
     struct nvme_id_ctrl *id;
+    bool prev_apst_enabled;
     int ret, page_shift;
     u32 max_hw_sectors;
-    bool prev_apst_enabled;
-
-    ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
-    if (ret) {
-        dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
-        return ret;
-    }
-    page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
-    ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
-
-    if (ctrl->vs >= NVME_VS(1, 1, 0))
-        ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
 
     ret = nvme_identify_ctrl(ctrl, &id);
     if (ret) {
@@ -3098,7 +3082,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
         ctrl->cntlid = le16_to_cpu(id->cntlid);
 
     if (!ctrl->identified) {
-        int i;
+        unsigned int i;
 
         ret = nvme_init_subsystem(ctrl, id);
         if (ret)
@@ -3136,6 +3120,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
 
     atomic_set(&ctrl->abort_limit, id->acl + 1);
     ctrl->vwc = id->vwc;
+    page_shift = NVME_CAP_MPSMIN(ctrl->cap) + 12;
     if (id->mdts)
         max_hw_sectors = 1 << (id->mdts + page_shift - 9);
     else
@@ -3210,16 +3195,40 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
         ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
     }
 
+    if (ctrl->apst_enabled && !prev_apst_enabled)
+        dev_pm_qos_expose_latency_tolerance(ctrl->device);
+    else if (!ctrl->apst_enabled && prev_apst_enabled)
+        dev_pm_qos_hide_latency_tolerance(ctrl->device);
+
     ret = nvme_mpath_init(ctrl, id);
+
+out_free:
     kfree(id);
+    return ret;
+}
 
-    if (ret < 0)
+/*
+ * Initialize the cached copies of the Identify data and various controller
+ * register in our nvme_ctrl structure.  This should be called as soon as
+ * the admin queue is fully up and running.
+ */
+int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
+{
+    int ret;
+
+    ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
+    if (ret) {
+        dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
         return ret;
+    }
+    ctrl->sqsize = min_t(u16, NVME_CAP_MQES(ctrl->cap), ctrl->sqsize);
 
-    if (ctrl->apst_enabled && !prev_apst_enabled)
-        dev_pm_qos_expose_latency_tolerance(ctrl->device);
-    else if (!ctrl->apst_enabled && prev_apst_enabled)
-        dev_pm_qos_hide_latency_tolerance(ctrl->device);
+    if (ctrl->vs >= NVME_VS(1, 1, 0))
+        ctrl->subsystem = NVME_CAP_NSSRC(ctrl->cap);
+
+    ret = nvme_init_ctrl_finish(ctrl);
+    if (ret < 0)
+        return ret;
 
     ret = nvme_configure_apst(ctrl);
     if (ret < 0)
@@ -3246,12 +3255,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
     ctrl->identified = true;
 
     return 0;
-
-out_free:
-    kfree(id);
-    return ret;
 }
-EXPORT_SYMBOL_GPL(nvme_init_identify);
+EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
 
 static int nvme_dev_open(struct inode *inode, struct file *file)
 {
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 20dadd86e981..962987a330d6 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -3085,7 +3085,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
 
     blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
-    ret = nvme_init_identify(&ctrl->ctrl);
+    ret = nvme_init_ctrl_finish(&ctrl->ctrl);
     if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
         goto out_disconnect_admin_queue;
 
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 07b34175c6ce..624fc4334a2c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -599,7 +599,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct
device *dev,
 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
-int nvme_init_identify(struct nvme_ctrl *ctrl);
+int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl);
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
 
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 207137a0ed8e..b8f0bfaa8f2a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2652,7 +2652,7 @@ static void nvme_reset_work(struct work_struct *work)
      */
     dev->ctrl.max_integrity_segments = 1;
 
-    result = nvme_init_identify(&dev->ctrl);
+    result = nvme_init_ctrl_finish(&dev->ctrl);
     if (result)
         goto out;
 
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 53ac4d7442ba..9c710839b03a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -917,7 +917,7 @@ static int nvme_rdma_configure_admin_queue(struct
nvme_rdma_ctrl *ctrl,
 
     blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
-    error = nvme_init_identify(&ctrl->ctrl);
+    error = nvme_init_ctrl_finish(&ctrl->ctrl);
     if (error)
         goto out_quiesce_queue;
 
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 69f59d2c5799..735e768f9f43 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1875,7 +1875,7 @@ static int nvme_tcp_configure_admin_queue(struct
nvme_ctrl *ctrl, bool new)
 
     blk_mq_unquiesce_queue(ctrl->admin_q);
 
-    error = nvme_init_identify(ctrl);
+    error = nvme_init_ctrl_finish(ctrl);
     if (error)
         goto out_quiesce_queue;
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index cb6f86572b24..a7f97c8b2f77 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -396,7 +396,7 @@ static int nvme_loop_configure_admin_queue(struct
nvme_loop_ctrl *ctrl)
 
     blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
-    error = nvme_init_identify(&ctrl->ctrl);
+    error = nvme_init_ctrl_finish(&ctrl->ctrl);
     if (error)
         goto out_cleanup_queue;
 
-- 
2.22.1






More information about the Linux-nvme mailing list