From 64954eb3c58f4ef077e54e8a3726fd2d27419b12 Mon Sep 17 00:00:00 2001 From: Niels de Vos Date: Fri, 26 Dec 2014 12:57:48 +0100 Subject: tests: move all test-cases into component subdirectories There are around 300 regression tests, 250 being in tests/bugs. Running partial set of tests/bugs is not easy because this is a flat directory with almost all tests inside. It would be valuable to make partial test/bugs easier, and allow the use of mulitple build hosts for a single commit, each running a subset of the tests for a quicker result. Additional changes made: - correct the include path for *.rc shell libraries and *.py utils - make the testcases pass checkpatch - arequal-checksum in afr/self-heal.t was never executed, now it is - include.rc now complains loudly if it fails to find env.rc Change-Id: I26ffd067e9853d3be1fd63b2f37d8aa0fd1b4fea BUG: 1178685 Reported-by: Emmanuel Dreyfus Reported-by: Atin Mukherjee URL: http://www.gluster.org/pipermail/gluster-devel/2014-December/043414.html Signed-off-by: Niels de Vos Reviewed-on: http://review.gluster.org/9353 Reviewed-by: Kaleb KEITHLEY Reviewed-by: Emmanuel Dreyfus Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/bugs/glusterd/bug-913555.t | 54 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100755 tests/bugs/glusterd/bug-913555.t (limited to 'tests/bugs/glusterd/bug-913555.t') diff --git a/tests/bugs/glusterd/bug-913555.t b/tests/bugs/glusterd/bug-913555.t new file mode 100755 index 00000000000..4f9e004a654 --- /dev/null +++ b/tests/bugs/glusterd/bug-913555.t @@ -0,0 +1,54 @@ +#!/bin/bash + +# Test that a volume becomes unwritable when the cluster loses quorum. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + + +function check_fs { + df $1 &> /dev/null + echo $? +} + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function glusterfsd_count { + pidof glusterfsd | wc -w; +} + +cleanup; + +TEST launch_cluster 3; # start 3-node virtual cluster +TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli +TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli + +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 +TEST $CLI_1 volume set $V0 cluster.server-quorum-type server +TEST $CLI_1 volume start $V0 +TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0 + +# Kill one pseudo-node, make sure the others survive and volume stays up. +TEST kill_node 3; +EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers; +EXPECT 0 check_fs $M0; +EXPECT 2 glusterfsd_count; + +# Kill another pseudo-node, make sure the last one dies and volume goes down. +TEST kill_node 2; +EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers +EXPECT 1 check_fs $M0; +EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds + # must be dead + +TEST $glusterd_2; +TEST $glusterd_3; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 glusterfsd_count; # restore quorum, all ok +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0; + +cleanup -- cgit