[PATCH blktest V2] nvme: test nvmet-wq sysfs interface
Chaitanya Kulkarni
kch at nvidia.com
Mon Nov 11 17:29:04 PST 2024
Add a test that randomly sets the cpumask from available CPUs for
the nvmet-wq while running the fio workload. This patch has been
tested on nvme-loop and nvme-tcp transport.
Signed-off-by: Chaitanya Kulkarni <kch at nvidia.com>
---
V2:-
1. Remove the space.
2. Add set_conditions.
3. Use read -a numbers -d '' < <(seq $min_cpus $max_cpus)
4. Use : ${TIMEOUT:=60}
5. Remove pgrep use kill -0
6. Simplify cpumask calculation
7. Remove killall
tests/nvme/055 | 94 ++++++++++++++++++++++++++++++++++++++++++++++
tests/nvme/055.out | 3 ++
2 files changed, 97 insertions(+)
create mode 100755 tests/nvme/055
create mode 100644 tests/nvme/055.out
diff --git a/tests/nvme/055 b/tests/nvme/055
new file mode 100755
index 0000000..24b72c8
--- /dev/null
+++ b/tests/nvme/055
@@ -0,0 +1,94 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright (C) 2024 Chaitanya Kulkarni
+#
+# Test nvmet-wq cpumask sysfs attribute with NVMe-oF and fio workload
+#
+
+. tests/nvme/rc
+
+DESCRIPTION="Test nvmet-wq cpumask sysfs attribute with fio on NVMe-oF device"
+TIMED=1
+
+requires() {
+ _nvme_requires
+ _have_fio && _have_loop
+ _require_nvme_trtype_is_fabrics
+}
+
+set_conditions() {
+ _set_nvme_trtype "$@"
+}
+
+
+cleanup_setup() {
+ _nvme_disconnect_subsys
+ _nvmet_target_cleanup
+}
+
+test() {
+ local cpumask_path="/sys/devices/virtual/workqueue/nvmet-wq/cpumask"
+ local restored_cpumask
+ local original_cpumask
+ local ns
+
+ echo "Running ${TEST_NAME}"
+
+ _setup_nvmet
+ _nvmet_target_setup
+ _nvme_connect_subsys
+
+ if [ ! -f "$cpumask_path" ]; then
+ SKIP_REASONS+=("nvmet_wq cpumask sysfs attribute not found.")
+ cleanup_setup
+ return 1
+ fi
+
+ ns=$(_find_nvme_ns "${def_subsys_uuid}")
+
+ original_cpumask=$(cat "$cpumask_path")
+
+ #shellcheck disable=SC2010
+ CPUS="$(ls -1 /sys/devices/system/cpu | grep -E '^cpu[0-9]+$' | sed 's/cpu//')"
+
+ : "${TIMEOUT:=60}"
+ _run_fio_rand_io --filename="/dev/${ns}" \
+ --iodepth=8 --size="${NVME_IMG_SIZE}" &> "$FULL" &
+
+ # Let the fio settle down else we will break in the loop for fio check
+ sleep 1
+
+ for cpu in $CPUS; do
+
+ if ! kill -0 $! 2> /dev/null ; then
+ break
+ fi
+
+ # Set the cpumask for nvmet-wq to only the current CPU
+ echo "$cpu" > "$cpumask_path" 2>/dev/null
+ cpu_mask=$(cat "$cpumask_path")
+
+ if [[ "$(cat "$cpumask_path")" =~ ^[0,]*${cpu_mask}\n$ ]]; then
+ echo "Test Failed: cpumask was not set correctly "
+ echo "Expected ${cpu_mask} found $(cat "$cpumask_path")"
+ cleanup_setup
+ return 1
+ fi
+ sleep 3
+ done
+
+ kill -9 $! &> /dev/null
+
+ # Restore original cpumask
+ echo "$original_cpumask" > "$cpumask_path"
+ restored_cpumask=$(cat "$cpumask_path")
+
+ if [[ "$restored_cpumask" != "$original_cpumask" ]]; then
+ echo "Failed to restore original cpumask."
+ cleanup_setup
+ return 1
+ fi
+
+ cleanup_setup
+ echo "Test complete"
+}
diff --git a/tests/nvme/055.out b/tests/nvme/055.out
new file mode 100644
index 0000000..427dfee
--- /dev/null
+++ b/tests/nvme/055.out
@@ -0,0 +1,3 @@
+Running nvme/055
+disconnected 1 controller(s)
+Test complete
--
2.40.0
More information about the Linux-nvme
mailing list