[PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique

Alan Adamson alan.adamson at oracle.com
Mon Jun 6 13:35:39 PDT 2022



> On Apr 20, 2022, at 12:36 AM, Christoph Hellwig <hch at lst.de> wrote:
> 
> On Mon, Apr 18, 2022 at 11:30:33PM +0000, Alan Adamson wrote:
>> I ran into an issue with blktests and my qemu setup when passthru is enabled.  The passthru tests
>> do not complete.  This was because the UUID for the passthru device is coming from the a device
>> from the same system since the fabric was setup as a loop and nvme_global_check_duplicate_ids() fails.
>> 
>> This is probably not a valid real life configuration, but since blktests try to test fabrics on a single
>> system, we have this issue.
>> 
>> I’ve hacked together a fix to manipulate Namespace Identifier’s to get the test to work.  Is there a
>> why for blktests to hardcode the IDs for the passthru devices?
> 
> Hmm.  I suspect the best thing would be to optionally just clear the
> IDS entirely.  Optionally as in maybe a fabrics connect argument,
> with it defaulting to true only for loop as that is per definition
> local.

Here are the changes to support a clear-ids nvme connect argument.  Want to float these changes prior
to sending a formal patch out.

Changes to the nvme driver, nvme-cli, and blktests are required.

Thanks,

Alan


nvme driver changes:


diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 72f7c955c707..ea1274cdefda 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3852,9 +3852,15 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid,
 
 	ret = nvme_global_check_duplicate_ids(ctrl->subsys, ids);
 	if (ret) {
-		dev_err(ctrl->device,
-			"globally duplicate IDs for nsid %d\n", nsid);
-		return ret;
+		if (ctrl->opts && ctrl->opts->clear_ids) {
+			uuid_copy(&ids->uuid, &uuid_null);
+			memset(&ids->nguid, 0, sizeof(ids->nguid));
+			memset(&ids->eui64, 0, sizeof(ids->eui64));
+		} else {
+			dev_err(ctrl->device,
+				"globally duplicate IDs for nsid %d\n", nsid);
+			return ret;
+		}
 	}
 
 	mutex_lock(&ctrl->subsys->lock);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..0022767a3a37 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
 	{ NVMF_OPT_TOS,			"tos=%d"		},
 	{ NVMF_OPT_FAIL_FAST_TMO,	"fast_io_fail_tmo=%d"	},
 	{ NVMF_OPT_DISCOVERY,		"discovery"		},
+	{ NVMF_OPT_CLEAR_IDS,		"clear_ids"		},
 	{ NVMF_OPT_ERR,			NULL			}
 };
 
@@ -571,6 +572,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 	opts->hdr_digest = false;
 	opts->data_digest = false;
 	opts->tos = -1; /* < 0 == use transport default */
+	opts->clear_ids = false;
 
 	options = o = kstrdup(buf, GFP_KERNEL);
 	if (!options)
@@ -593,6 +595,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 			}
 			kfree(opts->transport);
 			opts->transport = p;
+			if (!strcmp(p, "loop"))
+				opts->clear_ids = true;
 			break;
 		case NVMF_OPT_NQN:
 			p = match_strdup(args);
@@ -829,6 +833,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
 		case NVMF_OPT_DISCOVERY:
 			opts->discovery_nqn = true;
 			break;
+		case NVMF_OPT_CLEAR_IDS:
+			opts->clear_ids = true;
+			break;
 		default:
 			pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
 				p);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..ecf7f2e9fb4a 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,7 @@ enum {
 	NVMF_OPT_FAIL_FAST_TMO	= 1 << 20,
 	NVMF_OPT_HOST_IFACE	= 1 << 21,
 	NVMF_OPT_DISCOVERY	= 1 << 22,
+	NVMF_OPT_CLEAR_IDS	= 1 << 23,
 };
 
 /**
@@ -104,6 +105,7 @@ enum {
  * @nr_poll_queues: number of queues for polling I/O
  * @tos: type of service
  * @fast_io_fail_tmo: Fast I/O fail timeout in seconds
+ * @clear_ids: clear ids on connect
  */
 struct nvmf_ctrl_options {
 	unsigned		mask;
@@ -128,6 +130,7 @@ struct nvmf_ctrl_options {
 	unsigned int		nr_poll_queues;
 	int			tos;
 	int			fast_io_fail_tmo;
+	bool			clear_ids;
 };
 
 /*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f2a5e1ea508a..9e376bbf5eef 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2438,7 +2438,7 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
 	.allowed_opts	= NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
-			  NVMF_OPT_TOS,
+			  NVMF_OPT_TOS | NVMF_OPT_CLEAR_IDS,
 	.create_ctrl	= nvme_rdma_create_ctrl,
 };
 
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bb67538d241b..7624baa11d19 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2691,7 +2691,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
 			  NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
 			  NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
 			  NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
-			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
+			  NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE |
+			  NVMF_OPT_CLEAR_IDS,
 	.create_ctrl	= nvme_tcp_create_ctrl,
 };
 
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..1222f4c88aa4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -697,7 +697,7 @@ static struct nvmf_transport_ops nvme_loop_transport = {
 	.name		= "loop",
 	.module		= THIS_MODULE,
 	.create_ctrl	= nvme_loop_create_ctrl,
-	.allowed_opts	= NVMF_OPT_TRADDR,
+	.allowed_opts	= NVMF_OPT_TRADDR | NVMF_OPT_CLEAR_IDS,
 };
 
 static int __init nvme_loop_init_module(void)



nvme-cli changes:

diff --git a/fabrics.c b/fabrics.c
index b89de252f58d..b347e5f2706f 100644
--- a/fabrics.c
+++ b/fabrics.c
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Copyright (C) 2016 Intel Corporation. All rights reserved.
  * Copyright (c) 2016 HGST, a Western Digital Company.
@@ -78,6 +77,7 @@ static const char *nvmf_dup_connect   = "allow duplicate connections between same
 static const char *nvmf_disable_sqflow = "disable controller sq flow control (default false)";
 static const char *nvmf_hdr_digest     = "enable transport protocol header digest (TCP transport)";
 static const char *nvmf_data_digest    = "enable transport protocol data digest (TCP transport)";
+static const char *nvmf_clear_ids      = "Clear Namespace Identifiers upon connect";
 static const char *nvmf_config_file    = "Use specified JSON configuration file or 'none' to disable";
 
 #define NVMF_OPTS(c)                                                                   \
@@ -102,7 +102,8 @@ static const char *nvmf_config_file = "Use specified JSON configuration file or
        OPT_FLAG("duplicate-connect", 'D', &c.duplicate_connect,  nvmf_dup_connect),    \
        OPT_FLAG("disable-sqflow",    'd', &c.disable_sqflow,     nvmf_disable_sqflow), \
        OPT_FLAG("hdr-digest",        'g', &c.hdr_digest,         nvmf_hdr_digest),     \
-       OPT_FLAG("data-digest",       'G', &c.data_digest,        nvmf_data_digest)     \
+       OPT_FLAG("data-digest",       'G', &c.data_digest,        nvmf_data_digest),    \
+       OPT_FLAG("clear-ids",         'e', &c.clear_ids,          nvmf_clear_ids)       \
 
 struct tr_config {
        char *subsysnqn;

blktest changes (phase 1):

Since it will take a while for a new clear-ids enabled version of nvme-cli is out, we can just limit
the passthru tests to execute for loop.


diff --git a/tests/nvme/033 b/tests/nvme/033
index c6a3f7feb50e..5d6dc1fc2676 100755
--- a/tests/nvme/033
+++ b/tests/nvme/033
@@ -11,6 +11,7 @@ QUICK=1
 requires() {
        _nvme_requires
        _have_kernel_option NVME_TARGET_PASSTHRU
+       _require_nvme_trtype_is_loop
 }
 
 nvme_info() {
diff --git a/tests/nvme/034 b/tests/nvme/034
index f92e5e20865b..3fa194466ad8 100755
--- a/tests/nvme/034
+++ b/tests/nvme/034
@@ -12,6 +12,7 @@ requires() {
        _nvme_requires
        _have_kernel_option NVME_TARGET_PASSTHRU
        _have_fio
+       _require_nvme_trtype_is_loop
 }
 
 test_device() {
diff --git a/tests/nvme/035 b/tests/nvme/035
index ee78a7586f35..160d39db11b7 100755
--- a/tests/nvme/035
+++ b/tests/nvme/035
@@ -14,6 +14,7 @@ requires() {
        _have_kernel_option NVME_TARGET_PASSTHRU
        _have_xfs
        _have_fio
+       _require_nvme_trtype_is_loop
 }
 
 test_device() {
diff --git a/tests/nvme/036 b/tests/nvme/036
index 8218c6538dfd..4afb5684f2cf 100755
--- a/tests/nvme/036
+++ b/tests/nvme/036
@@ -11,6 +11,7 @@ QUICK=1
 requires() {
        _nvme_requires
        _have_kernel_option NVME_TARGET_PASSTHRU
+       _require_nvme_trtype_is_loop
 }
 
 test_device() {
diff --git a/tests/nvme/037 b/tests/nvme/037
index fc6c21343652..2e70ad19c0e0 100755
--- a/tests/nvme/037
+++ b/tests/nvme/037
@@ -10,6 +10,7 @@ DESCRIPTION="test deletion of NVMeOF passthru controllers immediately after setu
 requires() {
        _nvme_requires
        _have_kernel_option NVME_TARGET_PASSTHRU
+       _require_nvme_trtype_is_loop
 }
 


blktest changes (phase 2):

Once a clear-ids enabled version of nvme-cli is available, the above
changes can be reverted and the below changes will work for all
trtypes.


diff --git a/tests/nvme/rc b/tests/nvme/rc
index ccdccf9cbf9a..3271e0e22c5b 100644
--- a/tests/nvme/rc
+++ b/tests/nvme/rc
@@ -172,7 +172,7 @@ _nvme_connect_subsys() {
        local traddr="${3:-$def_traddr}"
        local trsvcid="${4:-$def_trsvcid}"
 
-       ARGS=(-t "${trtype}" -n "${subsysnqn}")
+       ARGS=(-t "${trtype}" -n "${subsysnqn}" "${5}")
        if [[ "${trtype}" != "loop" ]]; then
                ARGS+=(-a "${traddr}" -s "${trsvcid}")
        fi
@@ -345,7 +345,9 @@ _nvmet_passthru_target_connect() {
        local trtype=$1
        local subsys_name=$2
 
-       _nvme_connect_subsys "${trtype}" "${subsys_name}" || return
+       # XXX - We will need to check nvme-cli version before using clear-ids
+       _nvme_connect_subsys "${trtype}" "${subsys_name}" "${def_traddr}" \
+           "${def_trsvcid}" "--clear-ids" || return
        nsdev=$(_find_nvme_passthru_loop_dev "${subsys_name}")
 
        # The following tests can race with the creation







More information about the Linux-nvme mailing list