[PATCH blktest] blktests: add nvmet memory backend support

Chaitanya Kulkarni ckulkarnilinux at gmail.com
Tue Nov 4 00:14:36 PST 2025


Add support for testing nvmet memory backend across all transport
types. This allows tests using _set_combined_conditions with
_set_nvmet_blkdev_type to automatically test memory backend alongside
device and file backends.

The memory backend provides RAM-based volatile storage for NVMe
namespaces, useful for high-performance testing without disk I/O.

- Add "mem" to NVMET_BLKDEV_TYPES default value
- Add _require_nvme_mem_backend() prerequisite check
- Add _create_nvmet_ns_mem() helper for memory namespace creation
- Modify _nvmet_target_setup() to handle memory backend type

All existing tests that support multiple backends (device, file) will
now automatically run with memory backend as well, providing 3x test
coverage: device, file, and mem backends across loop, tcp, and rdma
transports.

Signed-off-by: Chaitanya Kulkarni <ckulkarnilinux at gmail.com>
---
 common/nvme | 135 ++++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 125 insertions(+), 10 deletions(-)

diff --git a/common/nvme b/common/nvme
index 3d43790..a558943 100644
--- a/common/nvme
+++ b/common/nvme
@@ -23,7 +23,7 @@ _check_conflict_and_set_default NVMET_TRTYPES nvme_trtype "loop"
 _check_conflict_and_set_default NVME_IMG_SIZE nvme_img_size 1G
 _check_conflict_and_set_default NVME_NUM_ITER nvme_num_iter 1000
 nvmet_blkdev_type=${nvmet_blkdev_type:-"device"}
-NVMET_BLKDEV_TYPES=${NVMET_BLKDEV_TYPES:-"device file"}
+NVMET_BLKDEV_TYPES=${NVMET_BLKDEV_TYPES:-"device file mem"}
 nvme_target_control="${NVME_TARGET_CONTROL:-}"
 NVMET_CFS="/sys/kernel/config/nvmet/"
 # shellcheck disable=SC2034
@@ -62,6 +62,35 @@ _require_nvme_trtype_is_fabrics() {
 	return 0
 }
 
+_require_nvme_mem_backend() {
+	# Check if memory backend is supported in kernel
+	local test_subsys="${NVMET_CFS}/subsystems/blktests-mem-test-$$"
+	local test_ns="${test_subsys}/namespaces/1"
+
+	if ! mkdir -p "${test_subsys}" 2>/dev/null; then
+		SKIP_REASONS+=("cannot create test subsystem in configfs")
+		return 1
+	fi
+
+	if ! mkdir -p "${test_ns}" 2>/dev/null; then
+		rmdir "${test_subsys}"
+		SKIP_REASONS+=("cannot create test namespace in configfs")
+		return 1
+	fi
+
+	# Try to set mem_size attribute
+	if ! echo "1073741824" > "${test_ns}/mem_size" 2>/dev/null; then
+		rmdir "${test_ns}"
+		rmdir "${test_subsys}"
+		SKIP_REASONS+=("nvmet memory backend not supported")
+		return 1
+	fi
+
+	rmdir "${test_ns}"
+	rmdir "${test_subsys}"
+	return 0
+}
+
 _have_nvme_cli_with_json_support() {
 	_have_program nvme || return $?
 
@@ -726,6 +755,71 @@ _create_nvmet_ns() {
 	echo "${uuid}"
 }
 
+_create_nvmet_ns_mem() {
+	local subsysnqn="${def_subsysnqn}"
+	local nsid="${def_nsid}"
+	local grpid="1"
+	local mem_size="${NVME_IMG_SIZE}"
+	local uuid
+	local subsys_path
+	local ns_path
+
+	while [[ $# -gt 0 ]]; do
+		case $1 in
+			--subsysnqn)
+				subsysnqn="$2"
+				shift 2
+				;;
+			--nsid)
+				nsid="$2"
+				shift 2
+				;;
+			--mem-size)
+				mem_size="$2"
+				shift 2
+				;;
+			--uuid)
+				uuid="$2"
+				shift 2
+				;;
+			--grpid)
+				grpid="$2"
+				shift 2
+				;;
+			*)
+				echo "WARNING: unknown argument: $1"
+				shift
+				;;
+		esac
+	done
+
+	subsys_path="${NVMET_CFS}/subsystems/${subsysnqn}"
+	ns_path="${subsys_path}/namespaces/${nsid}"
+
+	mkdir "${ns_path}"
+
+	# Memory backend uses mem_size instead of device_path
+	# Convert size string (e.g., "1G") to bytes
+	local mem_size_bytes
+	mem_size_bytes=$(numfmt --from=iec "${mem_size}")
+	printf "%s" "${mem_size_bytes}" > "${ns_path}/mem_size"
+
+	# Set UUID if provided, otherwise read generated one
+	if [[ -n "${uuid}" ]]; then
+		printf "%s" "${uuid}" > "${ns_path}/device_uuid"
+	else
+		uuid=$(cat "${ns_path}/device_uuid")
+	fi
+
+	# Set ANA group if not default
+	if (( grpid != 1 )); then
+		printf "%d" "${grpid}" > "${ns_path}/ana_grpid"
+	fi
+
+	printf "%d" 1 > "${ns_path}/enable"
+	echo "${uuid}"
+}
+
 _setup_nvmet_ns_ana() {
 	local nvmet_subsystem="$1"
 	local nsid="$2"
@@ -956,6 +1050,7 @@ _find_nvme_ns() {
 _nvmet_target_setup() {
 	local blkdev_type="${nvmet_blkdev_type}"
 	local blkdev
+	local mem_size="${NVME_IMG_SIZE}"
 	local ctrlkey=""
 	local hostkey=""
 	local subsysnqn="${def_subsysnqn}"
@@ -1011,7 +1106,12 @@ _nvmet_target_setup() {
 		esac
 	done
 
-	if [[ "${blkdev_type}" != "none" ]]; then
+	# Handle backend-specific setup
+	if [[ "${blkdev_type}" == "mem" || "${blkdev_type}" == "memory" ]]; then
+		# Memory backend - no file or device needed
+		blkdev=""
+	elif [[ "${blkdev_type}" != "none" ]]; then
+		# Device or file backend - create backing file
 		truncate -s "${NVME_IMG_SIZE}" "$(_nvme_def_file_path)"
 		if [[ "${blkdev_type}" == "device" ]]; then
 			blkdev="$(losetup -f --show "$(_nvme_def_file_path)")"
@@ -1036,17 +1136,32 @@ _nvmet_target_setup() {
 		return
 	fi
 
+	# Handle namespace creation based on backend type
 	ARGS=(--subsysnqn "${subsysnqn}")
-	if [[ -n "${blkdev}" ]]; then
+
+	if [[ "${blkdev_type}" == "mem" || "${blkdev_type}" == "memory" ]]; then
+		# Memory backend: create subsystem without namespace first
+		_create_nvmet_subsystem "${ARGS[@]}"
+		# Then create memory namespace separately
+		ARGS=(--subsysnqn "${subsysnqn}" --mem-size "${mem_size}")
+		if [[ -n "${subsys_uuid}" ]]; then
+			ARGS+=(--uuid "${subsys_uuid}")
+		fi
+		def_subsys_uuid=$(_create_nvmet_ns_mem "${ARGS[@]}")
+	elif [[ -n "${blkdev}" ]]; then
+		# Device or file backend: use existing flow
 		ARGS+=(--blkdev "${blkdev}")
+		if [[ -n "${subsys_uuid}" ]]; then
+			ARGS+=(--uuid "${subsys_uuid}")
+		fi
+		if [[ -n "${resv_enable}" ]]; then
+			ARGS+=("${resv_enable}")
+		fi
+		_create_nvmet_subsystem "${ARGS[@]}"
+	else
+		# No backend (none type): just create subsystem
+		_create_nvmet_subsystem "${ARGS[@]}"
 	fi
-	if [[ -n "${subsys_uuid}" ]]; then
-		ARGS+=(--uuid "${subsys_uuid}")
-	fi
-	if [[ -n "${resv_enable}" ]]; then
-		ARGS+=("${resv_enable}")
-	fi
-	_create_nvmet_subsystem "${ARGS[@]}"
 
 	p=0
 	while (( p < num_ports )); do
-- 
2.40.0




More information about the Linux-nvme mailing list