[PATCH 4/4] nvme: check that EUI/GUID/UUID are globally unique
Alan Adamson
alan.adamson at oracle.com
Wed Jun 15 13:15:18 PDT 2022
Here are my latest changes. It includes an interface from nvme-cli to specify
to clear the IDs upon connect.
Wasn’t sure the proper way to send a clear_ids argument to the target so I followed
what kato did. Not sure if that is appropriate for this case.
This will require a nvme-cli change and blktests changes. I tested with both loop and tcp.
Alan
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ee79a6d639b4..773da1fae961 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -376,6 +376,8 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
*/
cmd.connect.kato = cpu_to_le32(ctrl->kato * 1000);
+ cmd.connect.clear_ids = ctrl->opts->clear_ids;
+
if (ctrl->opts->disable_sqflow)
cmd.connect.cattr |= NVME_CONNECT_DISABLE_SQFLOW;
@@ -548,6 +550,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
{ NVMF_OPT_DISCOVERY, "discovery" },
+ { NVMF_OPT_CLEAR_IDS, "clear_ids" },
{ NVMF_OPT_ERR, NULL }
};
@@ -571,6 +574,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
opts->hdr_digest = false;
opts->data_digest = false;
opts->tos = -1; /* < 0 == use transport default */
+ opts->clear_ids = false;
options = o = kstrdup(buf, GFP_KERNEL);
if (!options)
@@ -593,6 +597,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
kfree(opts->transport);
opts->transport = p;
+ /* By default, clear the ids for loop passthru */
+ if (!strcmp(p, "loop"))
+ opts->clear_ids = true;
break;
case NVMF_OPT_NQN:
p = match_strdup(args);
@@ -829,6 +836,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
case NVMF_OPT_DISCOVERY:
opts->discovery_nqn = true;
break;
+ case NVMF_OPT_CLEAR_IDS:
+ opts->clear_ids = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 46d6e194ac2b..0fc08465e8a3 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -68,6 +68,7 @@ enum {
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
NVMF_OPT_DISCOVERY = 1 << 22,
+ NVMF_OPT_CLEAR_IDS = 1 << 23,
};
/**
@@ -128,6 +129,7 @@ struct nvmf_ctrl_options {
unsigned int nr_poll_queues;
int tos;
int fast_io_fail_tmo;
+ bool clear_ids;
};
/*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f2a5e1ea508a..9e376bbf5eef 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -2438,7 +2438,7 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
- NVMF_OPT_TOS,
+ NVMF_OPT_TOS | NVMF_OPT_CLEAR_IDS,
.create_ctrl = nvme_rdma_create_ctrl,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index bb67538d241b..7624baa11d19 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -2691,7 +2691,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
- NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
+ NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE |
+ NVMF_OPT_CLEAR_IDS,
.create_ctrl = nvme_tcp_create_ctrl,
};
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 70fb587e9413..b10c915829b8 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -212,6 +212,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
+ ctrl->clear_ids = c->clear_ids;
uuid_copy(&ctrl->hostid, &d->hostid);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 59024af2da2e..1222f4c88aa4 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -697,7 +697,7 @@ static struct nvmf_transport_ops nvme_loop_transport = {
.name = "loop",
.module = THIS_MODULE,
.create_ctrl = nvme_loop_create_ctrl,
- .allowed_opts = NVMF_OPT_TRADDR,
+ .allowed_opts = NVMF_OPT_TRADDR | NVMF_OPT_CLEAR_IDS,
};
static int __init nvme_loop_init_module(void)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 69818752a33a..3f5b5a9b2e54 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -209,6 +209,7 @@ struct nvmet_ctrl {
u64 err_counter;
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
bool pi_support;
+ bool clear_ids;
};
struct nvmet_subsys {
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 5247c24538eb..9498ba417a9e 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -30,6 +30,56 @@ void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
ctrl->cap &= ~(1ULL << 43);
}
+static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ void *data;
+ struct nvme_ns_id_desc *cur;
+ u16 status = NVME_SC_SUCCESS;
+ u8 csi;
+ int pos, len;
+ bool csi_seen;
+
+ if (!ctrl->clear_ids)
+ return status;
+
+ data = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
+ if (!data)
+ return NVME_SC_INTERNAL;
+
+ status = nvmet_copy_from_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+ if (status)
+ goto out_free;
+
+ for (pos = 0; pos < NVME_IDENTIFY_DATA_SIZE; pos += len) {
+ cur = data + pos;
+
+ if (cur->nidl == 0)
+ break;
+ len = cur->nidl;
+ if (cur->nidt == NVME_NIDT_CSI) {
+ memcpy(&csi, data + pos + sizeof(struct nvme_ns_id_desc), NVME_NIDT_CSI_LEN);
+ csi_seen = 1;
+ break;
+ }
+ len += sizeof(struct nvme_ns_id_desc);
+ }
+ if (csi_seen) {
+ cur = data;
+ cur->nidt = NVME_NIDT_CSI;
+ cur->nidl = NVME_NIDT_CSI_LEN;
+ memcpy(data + sizeof(struct nvme_ns_id_desc), &csi, NVME_NIDT_CSI_LEN);
+
+ cur = data + sizeof(struct nvme_ns_id_desc) + NVME_NIDT_CSI_LEN;
+ cur->nidt = 0;
+ cur->nidl = 0;
+ status = nvmet_copy_to_sgl(req, 0, data, NVME_IDENTIFY_DATA_SIZE);
+ }
+out_free:
+ kfree(data);
+ return status;
+}
+
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -127,6 +177,7 @@ static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
u16 status = NVME_SC_SUCCESS;
struct nvme_id_ns *id;
int i;
@@ -152,6 +203,11 @@ static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
*/
id->mc = 0;
+ if (ctrl->clear_ids) {
+ memset(id->nguid, 0, NVME_NIDT_NGUID_LEN);
+ memset(id->eui64, 0, NVME_NIDT_EUI64_LEN);
+ }
+
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
out_free:
@@ -176,6 +232,9 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
case NVME_ID_CNS_NS:
nvmet_passthru_override_id_ns(req);
break;
+ case NVME_ID_CNS_NS_DESC_LIST:
+ nvmet_passthru_override_id_descs(req);
+ break;
}
} else if (status < 0)
status = NVME_SC_INTERNAL;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 29ec3e3481ff..f45e96413ffb 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -1467,7 +1467,8 @@ struct nvmf_connect_command {
__u8 cattr;
__u8 resv3;
__le32 kato;
- __u8 resv4[12];
+ __u8 clear_ids;
+ __u8 resv4[11];
};
struct nvmf_connect_data {
> On Jun 10, 2022, at 7:12 AM, Keith Busch <kbusch at kernel.org> wrote:
>
> On Fri, Jun 10, 2022 at 12:27:24AM +0000, Alan Adamson wrote:
>> +static u16 nvmet_passthru_override_id_descs(struct nvmet_req *req)
>> +{
>> + struct nvmet_ctrl *ctrl = req->sq->ctrl;
>> + struct nvme_ns_id_desc *data, *cur;
>> + u16 status = NVME_SC_SUCCESS;
>> +
>> + if (!(ctrl->ops->flags & NVMF_CLEAR_NS_DESCS))
>> + return status;
>> +
>> + data = kzalloc(0x1000, GFP_KERNEL);
>> + if (!data)
>> + return NVME_SC_INTERNAL;
>> +
>> + status = nvmet_copy_from_sgl(req, 0, data, 0x1000);
>> + if (status)
>> + goto out_free;
>> +
>> + cur = data;
>> + cur->nidt = NVME_NIDT_CSI;
>> + cur->nidl = NVME_NIDT_CSI_LEN;
>> + cur++;
>> + cur->nidt = 0;
>
> I don't think the above is correct without setting the CSI value. It's just
> going to get whatever the controller happened to return at this offset, which
> may be a completely differnt identifier type. I think you'd actually need to
> search the descriptor list for the NIDT_CSI field and then copy just that one
> into what gets returned.
>
> And the "cur++" is just going to move the pointer past the descriptor header,
> but doesn't include the descriptor's total length, so setting cur->nidt is
> going to corrupt the actual descriptor. You have to add the previous's NIDL to
> the cur address.
>
> Otherwise, the rest looks fine.
More information about the Linux-nvme
mailing list