[Libguestfs] [nbdkit PATCH 2/2] tests: Make parallel tests more robust

Eric Blake eblake at redhat.com
Wed Nov 22 02:39:11 UTC 2017


nbdkit simulates the NBD FUA flag by invoking flush() after
write.  Under a heavily-loaded system, especially when both
parallel tests are hammering the same file, this flush can
be expensive enough that the write lasts longer than the
corresponding read, in spite of the file plugin having a
shorter wdelay.  (Note that the tests write idempotent data
into the file, so there is no risk of the file contents
changing).  Switch the tests to favor reads as the second
but short operation, so that the test is not as sensitive
to the timing taken for a flush after write.

Signed-off-by: Eric Blake <eblake at redhat.com>
---
 tests/test-parallel-file.sh | 30 +++++++++++++++---------------
 tests/test-parallel-nbd.sh  | 30 +++++++++++++++---------------
 2 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/tests/test-parallel-file.sh b/tests/test-parallel-file.sh
index 50de9c2..79a60ac 100755
--- a/tests/test-parallel-file.sh
+++ b/tests/test-parallel-file.sh
@@ -37,30 +37,20 @@ test -f file-data || { echo "Missing file-data"; exit 77; }
 : ${QEMU_IO=qemu-io}

 # Sanity check that qemu-io can issue parallel requests
-$QEMU_IO -f raw -c "aio_read 0 1" -c "aio_write -P 2 1 1" -c aio_flush \
+$QEMU_IO -f raw -c "aio_write -P 2 1 1" -c "aio_read -P 1 0 1" -c aio_flush \
   file-data || { echo "'$QEMU_IO' can't drive parallel requests"; exit 77; }

 # Set up the file plugin to delay both reads and writes (for a good chance
-# that parallel requests are in flight), and with reads longer than writes
+# that parallel requests are in flight), and with writes longer than reads
 # (to more easily detect if out-of-order completion happens).  This test
 # may have spurious failures under heavy loads on the test machine, where
 # tuning the delays may help.

 trap 'rm -f test-parallel-file.out' 0 1 2 3 15

-# With --threads=1, the read should complete first because it was issued first
-nbdkit -v -t 1 -U - file file=file-data rdelay=2 wdelay=1 --run '
-  $QEMU_IO -f raw -c "aio_read 0 1" -c "aio_write -P 2 1 1" -c aio_flush $nbd
-' | tee test-parallel-file.out
-if test "$(grep '1/1' test-parallel-file.out)" != \
-"read 1/1 bytes at offset 0
-wrote 1/1 bytes at offset 1"; then
-  exit 1
-fi
-
-# With default --threads, the faster write should complete first
-nbdkit -v -U - file file=file-data rdelay=2 wdelay=1 --run '
-  $QEMU_IO -f raw -c "aio_read 0 1" -c "aio_write -P 2 1 1" -c aio_flush $nbd
+# With --threads=1, the write should complete first because it was issued first
+nbdkit -v -t 1 -U - file file=file-data wdelay=2 rdelay=1 --run '
+  $QEMU_IO -f raw -c "aio_write -P 2 1 1" -c "aio_read -P 1 0 1" -c aio_flush $nbd
 ' | tee test-parallel-file.out
 if test "$(grep '1/1' test-parallel-file.out)" != \
 "wrote 1/1 bytes at offset 1
@@ -68,4 +58,14 @@ read 1/1 bytes at offset 0"; then
   exit 1
 fi

+# With default --threads, the faster read should complete first
+nbdkit -v -U - file file=file-data wdelay=2 rdelay=1 --run '
+  $QEMU_IO -f raw -c "aio_write -P 2 1 1" -c "aio_read -P 1 0 1" -c aio_flush $nbd
+' | tee test-parallel-file.out
+if test "$(grep '1/1' test-parallel-file.out)" != \
+"read 1/1 bytes at offset 0
+wrote 1/1 bytes at offset 1"; then
+  exit 1
+fi
+
 exit 0
diff --git a/tests/test-parallel-nbd.sh b/tests/test-parallel-nbd.sh
index 233118b..f8e5071 100755
--- a/tests/test-parallel-nbd.sh
+++ b/tests/test-parallel-nbd.sh
@@ -37,7 +37,7 @@ test -f file-data || { echo "Missing file-data"; exit 77; }
 : ${QEMU_IO=qemu-io}

 # Sanity check that qemu-io can issue parallel requests
-$QEMU_IO -f raw -c "aio_read 0 1" -c "aio_write -P 2 1 1" -c aio_flush \
+$QEMU_IO -f raw -c "aio_write -P 2 1 1" -c "aio_read -P 1 0 1" -c aio_flush \
   file-data || { echo "'$QEMU_IO' can't drive parallel requests"; exit 77; }

 # We require --exit-with-parent to work
@@ -45,7 +45,7 @@ $QEMU_IO -f raw -c "aio_read 0 1" -c "aio_write -P 2 1 1" -c aio_flush \
   { echo "Missing --exit-with-parent support"; exit 77; }

 # Set up the file plugin to delay both reads and writes (for a good chance
-# that parallel requests are in flight), and with reads longer than writes
+# that parallel requests are in flight), and with writes longer than reads
 # (to more easily detect if out-of-order completion happens).  This test
 # may have spurious failures under heavy loads on the test machine, where
 # tuning the delays may help.
@@ -54,21 +54,11 @@ trap 'rm -f test-parallel-nbd.out test-parallel-nbd.sock' 0 1 2 3 15
 (
 rm -f test-parallel-nbd.sock
 nbdkit --exit-with-parent -v -U test-parallel-nbd.sock \
-  file file=file-data rdelay=2 wdelay=1 &
+  file file=file-data wdelay=2 rdelay=1 &

-# With --threads=1, the read should complete first because it was issued first
+# With --threads=1, the write should complete first because it was issued first
 nbdkit -v -t 1 -U - nbd socket=test-parallel-nbd.sock --run '
-  $QEMU_IO -f raw -c "aio_read 0 1" -c "aio_write -P 2 1 1" -c aio_flush $nbd
-' | tee test-parallel-nbd.out
-if test "$(grep '1/1' test-parallel-nbd.out)" != \
-"read 1/1 bytes at offset 0
-wrote 1/1 bytes at offset 1"; then
-  exit 1
-fi
-
-# With default --threads, the faster write should complete first
-nbdkit -v -U - nbd socket=test-parallel-nbd.sock --run '
-  $QEMU_IO -f raw -c "aio_read 0 1" -c "aio_write -P 2 1 1" -c aio_flush $nbd
+  $QEMU_IO -f raw -c "aio_write -P 2 1 1" -c "aio_read -P 1 0 1" -c aio_flush $nbd
 ' | tee test-parallel-nbd.out
 if test "$(grep '1/1' test-parallel-nbd.out)" != \
 "wrote 1/1 bytes at offset 1
@@ -76,6 +66,16 @@ read 1/1 bytes at offset 0"; then
   exit 1
 fi

+# With default --threads, the faster read should complete first
+nbdkit -v -U - nbd socket=test-parallel-nbd.sock --run '
+  $QEMU_IO -f raw -c "aio_write -P 2 1 1" -c "aio_read -P 1 0 1" -c aio_flush $nbd
+' | tee test-parallel-nbd.out
+if test "$(grep '1/1' test-parallel-nbd.out)" != \
+"read 1/1 bytes at offset 0
+wrote 1/1 bytes at offset 1"; then
+  exit 1
+fi
+
 ) || exit $?

 exit 0
-- 
2.13.6




More information about the Libguestfs mailing list