[PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique

Alan Adamson alan.adamson at oracle.com
Thu Jun 9 17:27:24 PDT 2022



> On Jun 8, 2022, at 8:53 PM, Christoph Hellwig <hch at lst.de> wrote:
> 
> On Wed, Jun 08, 2022 at 01:04:48PM -0600, Keith Busch wrote:
>> On Wed, Jun 08, 2022 at 06:11:04PM +0000, Alan Adamson wrote:
>>>> On Jun 8, 2022, at 12:52 AM, Christoph Hellwig <hch at lst.de> wrote:
>>> 
>>> How do we get the clear_ids setting from the connect to the target?
>> 
>> I'm assuming something like this (untested):
> 
> This is a good start.  I think we still want to allow setting it
> in the fabrics opts to allow clearing it for say a local passthrough
> tcp connection.
> 
> And of course clear the identifiers in Identify Namespace as well.


This code works for nvme_trtype=loop.  Like Chris said, we should still have a nvme-cli
option to allow tcp to work.

Alan

diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..c7448c11c87f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,7 @@ enum {
        NVMF_OPT_FAIL_FAST_TMO  = 1 << 20,
        NVMF_OPT_HOST_IFACE     = 1 << 21,
        NVMF_OPT_DISCOVERY      = 1 << 22,
+       NVMF_OPT_CLEAR_IDS      = 1 << 23,
 };
 
 /**
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..6ba9e4bb011c 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -687,6 +687,7 @@ static void nvme_loop_remove_port(struct nvmet_port *port)
 static const struct nvmet_fabrics_ops nvme_loop_ops = {
        .owner          = THIS_MODULE,
        .type           = NVMF_TRTYPE_LOOP,
+       .flags          = NVMF_CLEAR_NS_DESCS,
        .add_port       = nvme_loop_add_port,
        .remove_port    = nvme_loop_remove_port,
        .queue_response = nvme_loop_queue_response,
@@ -697,7 +698,7 @@ static struct nvmf_transport_ops nvme_loop_transport = {
        .name           = "loop",
        .module         = THIS_MODULE,
        .create_ctrl    = nvme_loop_create_ctrl,
-       .allowed_opts   = NVMF_OPT_TRADDR,
+       .allowed_opts   = NVMF_OPT_TRADDR | NVMF_OPT_CLEAR_IDS,
 };
 
 static int __init nvme_loop_init_module(void)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..facd9706d67c 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -300,6 +300,7 @@ struct nvmet_fabrics_ops {
        unsigned int flags;
 #define NVMF_KEYED_SGLS                        (1 << 0)
 #define NVMF_METADATA_SUPPORTED                (1 << 1)
+#define        NVMF_CLEAR_NS_DESCS             (1 << 2)
        void (*queue_response)(struct nvmet_req *req);
        int (*add_port)(struct nvmet_port *port);
        void (*remove_port)(struct nvmet_port *port);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eb..2f182c7f35f5 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,37 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
                ctrl->cap &= ~(1ULL << 43);
 }
 
+
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
+       struct nvme_ns_id_desc *data, *cur;
+       u16 status = NVME_SC_SUCCESS;
+
+       if (!(ctrl->ops->flags & NVMF_CLEAR_NS_DESCS))
+               return status;
+
+       data = kzalloc(0x1000, GFP_KERNEL);
+       if (!data)
+               return NVME_SC_INTERNAL;
+
+       status = nvmet_copy_from_sgl(req, 0, data, 0x1000);
+       if (status)
+               goto out_free;
+
+       cur = data;
+       cur->nidt = NVME_NIDT_CSI;
+       cur->nidl = NVME_NIDT_CSI_LEN;
+       cur++;
+       cur->nidt = 0;
+
+       status = nvmet_copy_to_sgl(req, 0, data, 0x1000);
+
+out_free:
+       kfree(data);
+       return status;
+}
+
 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 {
        struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -127,6 +158,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
 
 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
 {
+       struct nvmet_ctrl *ctrl = req->sq->ctrl;
        u16 status = NVME_SC_SUCCESS;
        struct nvme_id_ns *id;
        int i;
@@ -152,6 +184,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
         */
        id->mc = 0;
 
+       if (ctrl->ops->flags & NVMF_CLEAR_NS_DESCS) {
+               memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+               memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+       }
+
        status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
 
 out_free:
@@ -176,6 +213,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
                case NVME_ID_CNS_NS:
                        nvmet_passthru_override_id_ns(req);
                        break;
+               case NVME_ID_CNS_NS_DESC_LIST:
+                       nvmet_passthru_override_id_descs(req);
+                       break;
                }
        } else if (status < 0)
                status = NVME_SC_INTERNAL;





More information about the Linux-nvme mailing list