[PATCH V4 2/2] nvme-core: use xarray for ctrl ns tracking
Chaitanya Kulkarni
Chaitanya.Kulkarni at wdc.com
Mon Jul 20 17:32:20 EDT 2020
On 7/20/20 06:35, Christoph Hellwig wrote:
> On Sun, Jul 19, 2020 at 08:32:03PM -0700, Chaitanya Kulkarni wrote:
>> This patch replaces the ctrl->namespaces tracking from linked list to
>> xarray and improves the performance. For host even though
>> nvme_find_get_ns() doesn't fall into the fast path yet it does for
>> NVMeOF passthru patch-series which is currently under review. This
>> prepares us to improve performance for future NVMeOF passthru backend
>> since nvme_find_get_ns() uses same data structure as it does in target
>> to find namespace in I/O patch from nsid specified in the nvme_rw_cmd.
>
> І'm still not really sold on that rationale..
>
>> + if (xa_empty(&ctrl->namespaces)) {
>> ret = -ENOTTY;
>> - goto out_unlock;
>> + goto out;
>> }
>> -
>> - ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
>> - if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
>> - dev_warn(ctrl->device,
>> - "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
>> - ret = -EINVAL;
>> - goto out_unlock;
>> + xa_for_each(&ctrl->namespaces, idx, ns) {
>> + if (count > 0)
>> + goto err;
>> + count++;
>
> How about factoring the check into a little
> nvme_has_multiple_namespaces helper and avoid the backwards jumping
> goto later on?
>
Added to V5.
>> + rcu_read_lock();
>> + ns = xa_load(&ctrl->namespaces, nsid);
>> + ns = ns && kref_get_unless_zero(&ns->kref) ? ns : NULL;
>> + rcu_read_unlock();
>
> The canonical way to write this is:
>
Added to V5.
> rcu_read_lock();
> ns = xa_load(&ctrl->namespaces, nsid);
> if (ns && !kref_get_unless_zero(&ns->kref))
> ns = NULL;
> rcu_read_unlock();
>
>> + out_unregister_nvm:
>
> Maybe call this out_unregister_lightnvm to be a little more descriptive.
>
Added to V5.
>> +
>> + xa_lock(&ctrl->namespaces);
>> + xa_for_each(&ctrl->namespaces, idx, ns) {
>> + tnsid = ns->head->ns_id;
>> + if (tnsid > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) {
>> + __xa_erase(&ctrl->namespaces, tnsid);
>> + xa_unlock(&ctrl->namespaces);
>> + /* Even if insert fails keep going */
>> + ret = xa_insert(&rm_array, nsid, ns, GFP_KERNEL);
>> + if (ret)
>> + pr_err("xa_insert %d\n", ret);
>> + xa_lock(&ctrl->namespaces);
>> + }
>
> And this is where I'm still worried that we now need an insert
> as part of the namespace deletion. Either keep the list here as
> I suggested before, or we just need rework the deletion here first.
>
I really want to avoid increasing the size of struct nvme_ns as
explained in the last version's cover-letter just for the sake of
deletion especially when it is approaching cache-line size.
Can you elaborate more about what you have in mind for rework ?
We can get away with a call to nvme_ns_remove() outside of the lock
instead of xa_insert() in the xa_for_each() loop so no need for
rm_array and xa_insert(), is that what you are referring to ?
Basic tested patch on the top of this one see [1].
>
>> @@ -4119,11 +4114,18 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
>> if (ctrl->state == NVME_CTRL_DEAD)
>> nvme_kill_queues(ctrl);
>>
>> - down_write(&ctrl->namespaces_rwsem);
>> - list_splice_init(&ctrl->namespaces, &ns_list);
>> - up_write(&ctrl->namespaces_rwsem);
>> + xa_lock(&ctrl->namespaces);
>> + xa_for_each(&ctrl->namespaces, idx, ns) {
>> + __xa_erase(&ctrl->namespaces, ns->head->ns_id);
>> + xa_unlock(&ctrl->namespaces);
>> + ret = xa_insert(&rm_array, ns->head->ns_id, ns, GFP_KERNEL);
>> + if (ret)
>> + pr_err("xa_insert %d\n", ret);
>> + xa_lock(&ctrl->namespaces);
>> + }
>> + xa_unlock(&ctrl->namespaces);
>
> Same here.
>
Same here see [1].
[1] Get rid of the rm_xarray and xa_insert in the nvme_ns_remove()
path:-
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3f09774d2d54..59fb00027530 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3957,13 +3957,9 @@ static void nvme_validate_ns(struct nvme_ctrl
*ctrl, unsigned nsid)
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
unsigned nsid)
{
- struct xarray rm_array;
unsigned long tnsid;
struct nvme_ns *ns;
unsigned long idx;
- int ret;
-
- xa_init(&rm_array);
xa_lock(&ctrl->namespaces);
xa_for_each(&ctrl->namespaces, idx, ns) {
@@ -3971,17 +3967,11 @@ static void
nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
if (tnsid > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) {
__xa_erase(&ctrl->namespaces, tnsid);
xa_unlock(&ctrl->namespaces);
- /* Even if insert fails keep going */
- ret = xa_insert(&rm_array, nsid, ns, GFP_KERNEL);
- if (ret)
- pr_err("xa_insert %d\n", ret);
+ nvme_ns_remove(ns);
xa_lock(&ctrl->namespaces);
}
}
xa_unlock(&ctrl->namespaces);
-
- xa_for_each(&rm_array, idx, ns)
- nvme_ns_remove(ns);
}
static int nvme_scan_ns_list(struct nvme_ctrl *ctrl)
@@ -4088,12 +4078,8 @@ static void nvme_scan_work(struct work_struct *work)
*/
void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
- struct xarray rm_array;
struct nvme_ns *ns;
unsigned long idx;
- int ret;
-
- xa_init(&rm_array);
/*
* make sure to requeue I/O to all namespaces as these
@@ -4118,15 +4104,10 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
xa_for_each(&ctrl->namespaces, idx, ns) {
__xa_erase(&ctrl->namespaces, ns->head->ns_id);
xa_unlock(&ctrl->namespaces);
- ret = xa_insert(&rm_array, ns->head->ns_id, ns, GFP_KERNEL);
- if (ret)
- pr_err("xa_insert %d\n", ret);
+ nvme_ns_remove(ns);
xa_lock(&ctrl->namespaces);
}
xa_unlock(&ctrl->namespaces);
-
- xa_for_each(&rm_array, idx, ns)
- nvme_ns_remove(ns);
}
EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
More information about the Linux-nvme
mailing list