[PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
Alan Adamson
alan.adamson at oracle.com
Tue Jun 21 11:40:49 PDT 2022
> On Jun 17, 2022, at 2:01 AM, Christoph Hellwig <hch at lst.de> wrote:
>
> On Wed, Jun 15, 2022 at 08:15:18PM +0000, Alan Adamson wrote:
>> Here are my latest changes. It includes an interface from nvme-cli to specify
>> to clear the IDs upon connect.
>>
>> Wasn’t sure the proper way to send a clear_ids argument to the target so I followed
>> what kato did. Not sure if that is appropriate for this case.
>>
>> This will require a nvme-cli change and blktests changes. I tested with both loop and tcp.
>
> Well, it changes the nvme over the wire protocol, we can't just do
> that.
>
> And the option really is a target side one, that is we want to configure
> it using configfs on the target, not the fabrics command line options.
>
> That is, add a new clear_ids attribute to nvmet_passthru_attrs, save
> the flag in struct nvmet_subsys, and default to true for loop controllers.
Latest changes. I like the idea setting clear_ids using configfs.
Alan
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index e44b2988759e..ff77c3d2354f 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -773,11 +773,31 @@ static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
+static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
+ char *page)
+{
+ return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
+}
+
+static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
+ unsigned int clear_ids;
+
+ if (kstrtouint(page, 0, &clear_ids))
+ return -EINVAL;
+ subsys->clear_ids = clear_ids;
+ return count;
+}
+CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
+
static struct configfs_attribute *nvmet_passthru_attrs[] = {
&nvmet_passthru_attr_device_path,
&nvmet_passthru_attr_enable,
&nvmet_passthru_attr_admin_timeout,
&nvmet_passthru_attr_io_timeout,
+ &nvmet_passthru_attr_clear_ids,
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 90e75324dae0..d4203d582343 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1374,6 +1374,10 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
ctrl->port = req->port;
ctrl->ops = req->ops;
+ /* By default, set loop targets to clear IDS by default */
+ if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
+ subsys->clear_ids = 1;
+
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..2b3e5719f24e 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -249,6 +249,7 @@ struct nvmet_subsys {
struct config_group passthru_group;
unsigned int admin_timeout;
unsigned int io_timeout;
+ unsigned int clear_ids;
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
#ifdef CONFIG_BLK_DEV_ZONED
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index b1f7efab3918..d0587b10895c 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,56 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
ctrl->cap &= ~(1ULL << 43);
}
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ void *data;
+ struct nvme_ns_id_desc *cur;
+ u16 status = NVME_SC_SUCCESS;
+ u8 csi;
+ int pos, len;
+ bool csi_seen;
+
+ if (!ctrl->subsys->clear_ids)
+ return status;
+
+ data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+ if (!data)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+ if (status)
+ goto out_free;
+
+ for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+ cur = data + pos;
+
+ if (cur->nidl == 0)
+ break;
+ len = cur->nidl;
+ if (cur->nidt == NVME_NIDT_CSI) {
+ memcpy(&csi, data + pos + sizeof(struct nvme_ns_id_desc), NVME_NIDT_CSI_LEN);
+ csi_seen = 1;
+ break;
+ }
+ len += sizeof(struct nvme_ns_id_desc);
+ }
+ if (csi_seen) {
+ cur = data;
+ cur->nidt = NVME_NIDT_CSI;
+ cur->nidl = NVME_NIDT_CSI_LEN;
+ memcpy(data + sizeof(struct nvme_ns_id_desc), &csi, NVME_NIDT_CSI_LEN);
+
+ cur = data + sizeof(struct nvme_ns_id_desc) + NVME_NIDT_CSI_LEN;
+ cur->nidt = 0;
+ cur->nidl = 0;
+ status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+ }
+out_free:
+ kfree(data);
+ return status;
+}
+
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -127,6 +177,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
u16 status = NVME_SC_SUCCESS;
struct nvme_id_ns *id;
int i;
@@ -152,6 +203,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
*/
id->mc = 0;
+ if (ctrl->subsys->clear_ids) {
+ memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+ memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+ }
+
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
out_free:
@@ -176,6 +232,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
case NVME_ID_CNS_NS:
nvmet_passthru_override_id_ns(req);
break;
+ case NVME_ID_CNS_NS_DESC_LIST:
+ nvmet_passthru_override_id_descs(req);
+ break;
}
} else if (status < 0)
status = NVME_SC_INTERNAL;
More information about the Linux-nvme
mailing list