summaryrefslogtreecommitdiffstats
path: root/tests/bugs/bug-1099890.t
diff options
context:
space:
mode:
authorNiels de Vos <ndevos@redhat.com>2014-12-26 12:57:48 +0100
committerVijay Bellur <vbellur@redhat.com>2015-01-06 03:24:24 -0800
commit64954eb3c58f4ef077e54e8a3726fd2d27419b12 (patch)
tree52cd5a39bbfda7442a5f0955ac2800b74a45b58a /tests/bugs/bug-1099890.t
parentc4ab37c02e9edc23d0637e23d6f2b42d0827dad2 (diff)
tests: move all test-cases into component subdirectories
There are around 300 regression tests, 250 being in tests/bugs. Running partial set of tests/bugs is not easy because this is a flat directory with almost all tests inside. It would be valuable to make partial test/bugs easier, and allow the use of mulitple build hosts for a single commit, each running a subset of the tests for a quicker result. Additional changes made: - correct the include path for *.rc shell libraries and *.py utils - make the testcases pass checkpatch - arequal-checksum in afr/self-heal.t was never executed, now it is - include.rc now complains loudly if it fails to find env.rc Change-Id: I26ffd067e9853d3be1fd63b2f37d8aa0fd1b4fea BUG: 1178685 Reported-by: Emmanuel Dreyfus <manu@netbsd.org> Reported-by: Atin Mukherjee <amukherj@redhat.com> URL: http://www.gluster.org/pipermail/gluster-devel/2014-December/043414.html Signed-off-by: Niels de Vos <ndevos@redhat.com> Reviewed-on: http://review.gluster.org/9353 Reviewed-by: Kaleb KEITHLEY <kkeithle@redhat.com> Reviewed-by: Emmanuel Dreyfus <manu@netbsd.org> Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'tests/bugs/bug-1099890.t')
-rw-r--r--tests/bugs/bug-1099890.t125
1 files changed, 0 insertions, 125 deletions
diff --git a/tests/bugs/bug-1099890.t b/tests/bugs/bug-1099890.t
deleted file mode 100644
index c4be2cf56ba..00000000000
--- a/tests/bugs/bug-1099890.t
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-. $(dirname $0)/../dht.rc
-
-## TO-DO: Fix the following once the dht du refresh interval issue is fixed:
-## 1. Do away with sleep(1).
-## 2. Do away with creation of empty files.
-
-cleanup;
-
-TEST glusterd;
-TEST pidof glusterd;
-
-# Create 2 loop devices, one per brick.
-TEST truncate -s 100M $B0/brick1
-TEST truncate -s 100M $B0/brick2
-
-TEST L1=`SETUP_LOOP $B0/brick1`
-TEST MKFS_LOOP $L1
-
-TEST L2=`SETUP_LOOP $B0/brick2`
-TEST MKFS_LOOP $L2
-
-TEST mkdir -p $B0/${V0}{1,2}
-
-TEST MOUNT_LOOP $L1 $B0/${V0}1
-TEST MOUNT_LOOP $L2 $B0/${V0}2
-
-# Create a plain distribute volume with 2 subvols.
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-
-TEST $CLI volume start $V0;
-EXPECT "Started" volinfo_field $V0 'Status';
-
-TEST $CLI volume quota $V0 enable;
-
-TEST $CLI volume set $V0 features.quota-deem-statfs on
-
-TEST $CLI volume quota $V0 limit-usage / 150MB;
-
-TEST $CLI volume set $V0 cluster.min-free-disk 50%
-
-TEST glusterfs -s $H0 --volfile-id=$V0 $M0
-
-# Make sure quota-deem-statfs is working as expected
-EXPECT "150M" echo `df -h $M0 -P | tail -1 | awk {'print $2'}`
-
-# Create a new file 'foo' under the root of the volume, which hashes to subvol-0
-# of DHT, that consumes 40M
-TEST dd if=/dev/zero of=$M0/foo bs=5120k count=8
-
-TEST stat $B0/${V0}1/foo
-TEST ! stat $B0/${V0}2/foo
-
-# Create a new file 'bar' under the root of the volume, which hashes to subvol-1
-# of DHT, that consumes 40M
-TEST dd if=/dev/zero of=$M0/bar bs=5120k count=8
-
-TEST ! stat $B0/${V0}1/bar
-TEST stat $B0/${V0}2/bar
-
-# Touch a zero-byte file on the root of the volume to make sure the statfs data
-# on DHT is refreshed
-sleep 1;
-TEST touch $M0/empty1;
-
-# At this point, the available space on each subvol {60M,60M} is greater than
-# their min-free-disk {50M,50M}, but if this bug still exists, then
-# the total available space on the volume as perceived by DHT should be less
-# than min-free-disk, i.e.,
-#
-# consumed space returned per subvol by quota = (40M + 40M) = 80M
-#
-# Therefore, consumed space per subvol computed by DHT WITHOUT the fix would be:
-# (80M/150M)*100 = 53%
-#
-# Available space per subvol as perceived by DHT with the bug = 47%
-# which is less than min-free-disk
-
-# Now I create a file that hashes to subvol-1 (counting from 0) of DHT.
-# If this bug still exists,then DHT should be routing this creation to subvol-0.
-# If this bug is fixed, then DHT should be routing the creation to subvol-1 only
-# as it has more than min-free-disk space available.
-
-TEST dd if=/dev/zero of=$M0/file bs=1k count=1
-sleep 1;
-TEST ! stat $B0/${V0}1/file
-TEST stat $B0/${V0}2/file
-
-# Touch another zero-byte file on the root of the volume to refresh statfs
-# values stored by DHT.
-
-TEST touch $M0/empty2;
-
-# Now I create a new file that hashes to subvol-0, at the end of which, there
-# will be less than min-free-disk space available on it.
-TEST dd if=/dev/zero of=$M0/fil bs=5120k count=4
-sleep 1;
-TEST stat $B0/${V0}1/fil
-TEST ! stat $B0/${V0}2/fil
-
-# Touch to refresh statfs info cached by DHT
-
-TEST touch $M0/empty3;
-
-# Now I create a file that hashes to subvol-0 but since it has less than
-# min-free-disk space available, its data will be cached on subvol-1.
-
-TEST dd if=/dev/zero of=$M0/zz bs=5120k count=1
-
-TEST stat $B0/${V0}1/zz
-TEST stat $B0/${V0}2/zz
-
-EXPECT "$V0-client-1" dht_get_linkto_target "$B0/${V0}1/zz"
-
-EXPECT "1" is_dht_linkfile "$B0/${V0}1/zz"
-
-force_umount $M0
-$CLI volume stop $V0
-UMOUNT_LOOP ${B0}/${V0}{1,2}
-rm -f ${B0}/brick{1,2}
-
-cleanup