[PATCH 4/4] nvmet: add --qid-max support for subsystem creation and setup

Max Gurtovoy mgurtovoy at nvidia.com
Sun Sep 28 04:51:25 PDT 2025


Introduce support for the --qid-max argument to both
_create_nvmet_subsystem and _nvmet_target_setup functions, allowing
configuration of the maximum queue ID through the "attr_qid_max" sysfs
attribute when available. This enables more flexible queue management
for NVMe subsystems.

Additionally, update test 048 to set qid_max via the new --qid-max
workflow. This will follow the new logic added to the NVMe target
driver, which forbid changing the attr_qid_max for a discovered
subsystem.

Also increase the sleep durations to adjust real RDMA HW devices.

Signed-off-by: Max Gurtovoy <mgurtovoy at nvidia.com>
---
 common/nvme    | 18 ++++++++++++++++++
 tests/nvme/048 |  7 ++++---
 2 files changed, 22 insertions(+), 3 deletions(-)

diff --git a/common/nvme b/common/nvme
index 01d054d..3a883ff 100644
--- a/common/nvme
+++ b/common/nvme
@@ -752,6 +752,7 @@ _create_nvmet_subsystem() {
 	local blkdev
 	local uuid="${def_subsys_uuid}"
 	local resv_enable
+	local qid_max
 	local cfs_path
 	local -a ARGS
 
@@ -773,6 +774,10 @@ _create_nvmet_subsystem() {
 				resv_enable="--resv_enable";
 				shift 1
 				;;
+			--qid-max)
+				qid_max="$2"
+				shift 2
+				;;
 			*)
 				echo "WARNING: unknown argument: $1"
 				shift
@@ -794,6 +799,11 @@ _create_nvmet_subsystem() {
 	if [[ -n "$resv_enable" ]]; then
 		ARGS+=("${resv_enable}")
 	fi
+	if [[ -f "${cfs_path}/attr_qid_max" ]]; then
+		if [[ -n "$qid_max" ]]; then
+			echo "${qid_max}" > "${cfs_path}/attr_qid_max"
+		fi
+	fi
 	_create_nvmet_ns "${ARGS[@]}" > /dev/null
 }
 
@@ -971,6 +981,7 @@ _nvmet_target_setup() {
 	local port p
 	local resv_enable=""
 	local num_ports=1
+	local qid_max
 	local tls="none"
 	local -a ARGS
 
@@ -1012,6 +1023,10 @@ _nvmet_target_setup() {
 				tls="required"
 				shift 1
 				;;
+			--qid-max)
+				qid_max="$2"
+				shift 2
+				;;
 			*)
 				echo "WARNING: unknown argument: $1"
 				shift
@@ -1054,6 +1069,9 @@ _nvmet_target_setup() {
 	if [[ -n "${resv_enable}" ]]; then
 		ARGS+=("${resv_enable}")
 	fi
+	if [[ -n "${qid_max}" ]]; then
+		ARGS+=(--qid-max "${qid_max}")
+	fi
 	_create_nvmet_subsystem "${ARGS[@]}"
 
 	p=0
diff --git a/tests/nvme/048 b/tests/nvme/048
index afd9272..c6f4d9f 100755
--- a/tests/nvme/048
+++ b/tests/nvme/048
@@ -37,7 +37,7 @@ nvmf_check_queue_count() {
 			return 1
 		fi
 
-		sleep 1
+		sleep 2
 
 		retries=$((retries - 1))
 		queue_count_file=$(cat /sys/class/nvme-fabrics/ctl/"${nvmedev}"/queue_count)
@@ -58,9 +58,10 @@ set_qid_max() {
 	local subsys_name="$1"
 	local qid_max="$2"
 
-	set_nvmet_attr_qid_max "${subsys_name}" "${qid_max}"
+	_nvmet_target_cleanup
+	_nvmet_target_setup --blkdev file --qid-max "${qid_max}"
 	nvmf_check_queue_count "${subsys_name}" "${qid_max}" || return 1
-	_nvmf_wait_for_state "${subsys_name}" "live" || return 1
+	_nvmf_wait_for_state "${subsys_name}" "live" 10 || return 1
 
 	return 0
 }
-- 
2.18.1




More information about the Linux-nvme mailing list