[PATCH V2 1/2] nvme-core: use xarray for ctrl ns tracking
Christoph Hellwig
hch at lst.de
Wed Jul 1 09:12:35 EDT 2020
[willy: a comment/request on the xa_load API below]
On Tue, Jun 30, 2020 at 07:25:16PM -0700, Chaitanya Kulkarni wrote:
> This patch replaces the ctrl->namespaces tracking from linked list to
> xarray and improves the performance.
The performance improvement needs to be clearly stated here.
> static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
> {
> + struct nvme_id_ctrl *id;
> struct nvme_ns *ns;
> + int ret = 0;
>
> + if (xa_empty(&ctrl->namespaces)) {
> ret = -ENOTTY;
> + goto out;
> }
>
> - ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
> - if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
> - dev_warn(ctrl->device,
> - "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
> + /* Let the scan work finish updating ctrl->namespaces */
> + flush_work(&ctrl->scan_work);
> + if (nvme_identify_ctrl(ctrl, &id)) {
> + dev_err(ctrl->device, "nvme_identify_ctrl() failed\n");
> ret = -EINVAL;
> - goto out_unlock;
> + goto out;
> + }
> + if (le32_to_cpu(id->nn) > 1) {
> + dev_warn(ctrl->device,
> + "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
> + goto out;
> }
This code doesn't make any sense at all. Why does a patch changing
data structures add new calls that go out on the wire?
> + struct nvme_ns *ns;
> + XA_STATE(xas, &ctrl->namespaces, nsid);
>
> + rcu_read_lock();
> + do {
> + ns = xas_load(&xas);
> + if (xa_is_zero(ns))
> + ns = NULL;
> + } while (xas_retry(&xas, ns));
> + ns = ns && kref_get_unless_zero(&ns->kref) ? ns : NULL;
> + rcu_read_unlock();
This looks pretty weird, but I think the problem is one in the xarray
API, as for the typical lookup pattern we'd want an xa_load with
external RCU locking:
rcu_read_lock();
ns = xa_load_rcu(&ctrl->namespaces, nsid);
if (ns && !kref_get_unless_zero(&ns->kref))
ns = NULL;
rcu_read_unlock();
instead of duplicating this fairly arcane loop in all kinds of callers.
> - down_write(&ctrl->namespaces_rwsem);
> - list_add_tail(&ns->list, &ctrl->namespaces);
> - up_write(&ctrl->namespaces_rwsem);
> + ret = xa_insert(&ctrl->namespaces, nsid, ns, GFP_KERNEL);
> + if (ret) {
> + switch (ret) {
> + case -ENOMEM:
> + dev_err(ctrl->device,
> + "xa insert memory allocation\n");
> + break;
> + case -EBUSY:
> + dev_err(ctrl->device,
> + "xa insert entry already present\n");
> + break;
> + }
> + }
No need for the switch and the detailed printks here, but we do need
actual error handling.
> static void nvme_ns_remove(struct nvme_ns *ns)
> {
> + struct xarray *xa = &ns->ctrl->namespaces;
> + bool free;
> +
> if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
> return;
>
> @@ -3740,12 +3749,14 @@ static void nvme_ns_remove(struct nvme_ns *ns)
> blk_integrity_unregister(ns->disk);
> }
>
> - down_write(&ns->ctrl->namespaces_rwsem);
> - list_del_init(&ns->list);
> - up_write(&ns->ctrl->namespaces_rwsem);
> + xa_lock(xa);
> + __xa_erase(xa, ns->head->ns_id);
> + free = refcount_dec_and_test(&ns->kref.refcount) ? true : false;
> + xa_unlock(xa);
>
> nvme_mpath_check_last_path(ns);
> - nvme_put_ns(ns);
> + if (free)
> + __nvme_free_ns(ns);
This looks very strange to me. Shoudn't this be a normal xa_erase
followed by a normal nvme_put_ns? For certain the driver code has
no business poking into the kref internals.
> static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
> unsigned nsid)
> {
> + struct xarray *namespaces = &ctrl->namespaces;
> + struct xarray rm_array;
> + unsigned long tnsid;
> + struct nvme_ns *ns;
> + unsigned long idx;
> + int ret;
>
> + xa_init(&rm_array);
> +
> + xa_lock(namespaces);
> + xa_for_each(namespaces, idx, ns) {
> + tnsid = ns->head->ns_id;
> + if (tnsid > nsid || test_bit(NVME_NS_DEAD, &ns->flags)) {
> + xa_unlock(namespaces);
> + xa_erase(namespaces, tnsid);
> + /* Even if insert fails keep going */
> + ret = xa_insert(&rm_array, nsid, ns, GFP_KERNEL);
> + switch (ret) {
> + case -ENOMEM:
> + pr_err("xa insert memory allocation failed\n");
> + break;
> + case -EBUSY:
> + pr_err("xa insert entry already present\n");
> + break;
> + }
> + xa_lock(namespaces);
> + }
> }
> + xa_unlock(namespaces);
I don't think you want an xarray for the delete list. Just keep the
list head for that now - once we moved to RCU read side locking some
of this could potentially be simplified later.
> */
> void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
> {
> - struct nvme_ns *ns, *next;
> - LIST_HEAD(ns_list);
> + struct xarray rm_array;
> + unsigned long tnsid;
> + struct nvme_ns *ns;
> + unsigned long idx;
> + int ret;
> +
> + xa_init(&rm_array);
>
> /*
> * make sure to requeue I/O to all namespaces as these
> @@ -3919,11 +3950,30 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
> if (ctrl->state == NVME_CTRL_DEAD)
> nvme_kill_queues(ctrl);
>
> - down_write(&ctrl->namespaces_rwsem);
> - list_splice_init(&ctrl->namespaces, &ns_list);
> - up_write(&ctrl->namespaces_rwsem);
> + xa_lock(&ctrl->namespaces);
> + xa_for_each(&ctrl->namespaces, idx, ns) {
> + tnsid = ns->head->ns_id;
> + xa_unlock(&ctrl->namespaces);
> + xa_erase(&ctrl->namespaces, tnsid);
> + /* Even if insert fails keep going */
> + ret = xa_insert(&rm_array, tnsid, ns, GFP_KERNEL);
> + if (ret) {
> + switch (ret) {
> + case -ENOMEM:
> + dev_err(ctrl->device,
> + "xa insert memory allocation\n");
> + break;
> + case -EBUSY:
> + dev_err(ctrl->device,
> + "xa insert entry already present\n");
> + break;
> + }
> + }
> + xa_lock(&ctrl->namespaces);
> + }
> + xa_unlock(&ctrl->namespaces);
Same here.
More information about the Linux-nvme
mailing list