summaryrefslogtreecommitdiffstats
path: root/tests/basic/ec/self-heal.t
diff options
context:
space:
mode:
authorXavier Hernandez <xhernandez@datalab.es>2014-05-15 10:35:14 +0200
committerVijay Bellur <vbellur@redhat.com>2014-07-11 10:34:24 -0700
commit1392da3e237d8ea080573909015916e3544a6d2c (patch)
tree89f7f37e65b5d526c18e043cc7dbb51c9e19a50e /tests/basic/ec/self-heal.t
parentad112305a1c7452b13c92238b40ded80361838f3 (diff)
cli/glusterd: Added support for dispersed volumes
Two new options have been added to the 'create' command of the cli interface: disperse [<count>] redundancy <count> Both are optional. A dispersed volume is created by specifying, at least, one of them. If 'disperse' is missing or it's present but '<count>' does not, the number of bricks enumerated in the command line is taken as the disperse count. If 'redundancy' is missing, the lowest optimal value is assumed. A configuration is considered optimal (for most workloads) when the disperse count - redundancy count is a power of 2. If the resulting redundancy is 1, the volume is created normally, but if it's greater than 1, a warning is shown to the user and he/she must answer yes/no to continue volume creation. If there isn't any optimal value for the given number of bricks, a warning is also shown and, if the user accepts, a redundancy of 1 is used. If 'redundancy' is specified and the resulting volume is not optimal, another warning is shown to the user. A distributed-disperse volume can be created using a number of bricks multiple of the disperse count. Change-Id: Iab93efbe78e905cdb91f54f3741599f7ea6645e4 BUG: 1118629 Signed-off-by: Xavier Hernandez <xhernandez@datalab.es> Reviewed-on: http://review.gluster.org/7782 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'tests/basic/ec/self-heal.t')
-rw-r--r--tests/basic/ec/self-heal.t123
1 files changed, 123 insertions, 0 deletions
diff --git a/tests/basic/ec/self-heal.t b/tests/basic/ec/self-heal.t
new file mode 100644
index 00000000000..99cfd9420aa
--- /dev/null
+++ b/tests/basic/ec/self-heal.t
@@ -0,0 +1,123 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test checks self-healing feature of dispersed volumes
+
+cleanup
+
+tmp=`mktemp -d`
+if [ ! -d $tmp ]; then
+ exit 1
+fi
+
+TESTS_EXPECTED_IN_LOOP=85
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+TEST dd if=/dev/urandom of=$tmp/test bs=1024 count=1024
+
+cs=$(sha1sum $tmp/test | awk '{ print $1 }')
+
+TEST df -h
+TEST stat $M0
+
+for idx in {0..5}; do
+ brick[$idx]=$(gf_get_gfid_backend_file_path $B0/$V0$idx)
+done
+
+cd $M0
+TEST cp $tmp/test test
+TEST chmod 644 test
+EXPECT "-rw-r--r--" stat -c "%A" test
+
+for idx1 in {0..5}; do
+ TEST chmod 666 ${brick[$idx1]}/test
+ sleep 1
+ EXPECT "-rw-r--r--" stat -c "%A" test
+ EXPECT_WITHIN 5 "-rw-r--r--" stat -c "%A" ${brick[$idx1]}/test
+done
+
+for idx1 in {0..4}; do
+ for idx2 in `seq $(($idx1 + 1)) 5`; do
+ if [ $idx1 -ne $idx2 ]; then
+ TEST chmod 666 ${brick[$idx1]}/test
+ TEST chmod 600 ${brick[$idx2]}/test
+ sleep 1
+ EXPECT "-rw-r--r--" stat -c "%A" test
+ EXPECT_WITHIN 5 "-rw-r--r--" stat -c "%A" ${brick[$idx1]}/test
+ EXPECT_WITHIN 5 "-rw-r--r--" stat -c "%A" ${brick[$idx2]}/test
+ fi
+ done
+done
+
+TEST truncate -s 0 ${brick[0]}/test
+TEST truncate -s 2097152 ${brick[1]}/test
+TEST setfattr -n user.test -v "test1" ${brick[0]}/test
+TEST setfattr -n user.test -v "test2" ${brick[1]}/test
+TEST chmod 600 ${brick[0]}/test
+TEST chmod 666 ${brick[1]}/test
+sleep 1
+
+EXPECT "1048576" stat -c "%s" test
+TEST ! getfattr -n user.test test
+
+EXPECT_WITHIN 5 "262144" stat -c "%s" ${brick[0]}/test
+EXPECT_WITHIN 5 "262144" stat -c "%s" ${brick[1]}/test
+TEST ! getfattr -n user.test ${brick[0]}/test
+TEST ! getfattr -n user.test ${brick[1]}/test
+EXPECT "-rw-r--r--" stat -c "%A" ${brick[0]}/test
+EXPECT "-rw-r--r--" stat -c "%A" ${brick[1]}/test
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST cp $tmp/test test2
+EXPECT "1048576" stat -c "%s" test2
+TEST chmod 777 test2
+EXPECT "-rwxrwxrwx" stat -c "%A" test2
+
+TEST mkdir dir1
+TEST ls -al dir1
+
+TEST ln -s test2 test3
+TEST [ -h test3 ]
+
+TEST ln test2 test4
+TEST [ -f test4 ]
+EXPECT "2" stat -c "%h" test2
+EXPECT "2" stat -c "%h" test4
+
+cd
+TEST umount $M0
+TEST $CLI volume stop $V0 force
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+
+EXPECT "1048576" stat -c "%s" test2
+EXPECT "-rwxrwxrwx" stat -c "%A" test2
+EXPECT_WITHIN 5 "262144" stat -c "%s" ${brick[0]}/test2
+EXPECT_WITHIN 5 "262144" stat -c "%s" ${brick[1]}/test2
+EXPECT "-rwxrwxrwx" stat -c "%A" ${brick[0]}/test2
+EXPECT "-rwxrwxrwx" stat -c "%A" ${brick[1]}/test2
+
+TEST ls -al dir1
+EXPECT_WITHIN 5 "1" eval "if [ -d ${brick[0]}/dir1 ]; then echo 1; fi"
+EXPECT_WITHIN 5 "1" eval "if [ -d ${brick[1]}/dir1 ]; then echo 1; fi"
+
+TEST [ -h test3 ]
+EXPECT_WITHIN 5 "1" eval "if [ -h ${brick[0]}/test3 ]; then echo 1; fi"
+EXPECT_WITHIN 5 "1" eval "if [ -h ${brick[1]}/test3 ]; then echo 1; fi"
+
+EXPECT "2" stat -c "%h" test4
+EXPECT_WITHIN 5 "3" stat -c "%h" ${brick[0]}/test4
+EXPECT_WITHIN 5 "3" stat -c "%h" ${brick[1]}/test4
+
+rm -rf $tmp
+
+cleanup