summaryrefslogtreecommitdiffstats
path: root/tests/bugs/glusterd/bug-913555.t
diff options
context:
space:
mode:
authorSanju Rakonde <srakonde@redhat.com>2018-01-04 10:35:29 +0530
committerAtin Mukherjee <amukherj@redhat.com>2018-02-10 16:25:01 +0000
commit535fd517c6b188732f9d69c0301dd78c3dc3d09c (patch)
tree1ca37c83a6f4dd299f7d74413fb9eafaa0cf6514 /tests/bugs/glusterd/bug-913555.t
parent446ddbf1b10ce835e0e40790bc997ec6ac53766a (diff)
glusterd: optimization of test cases
To reduce the overall time taken by the every regression job for all glusterd test cases, avoiding some duplicate tests by clubbing similar test cases into one. real time taken for all regression jobs of glusterd without this patch is 1959 seconds, with this patch it is 1059 seconds. Look at the below document for your reference. https://docs.google.com/document/d/1u8o4-wocrsuPDI8BwuBU6yi_x4xA_pf2qSrFY6WEQpo/edit?usp=sharing Change-Id: Ib14c61ace97e62c3abce47230dd40598640fe9cb BUG: 1530905 Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Diffstat (limited to 'tests/bugs/glusterd/bug-913555.t')
-rwxr-xr-xtests/bugs/glusterd/bug-913555.t57
1 files changed, 0 insertions, 57 deletions
diff --git a/tests/bugs/glusterd/bug-913555.t b/tests/bugs/glusterd/bug-913555.t
deleted file mode 100755
index 9bc875340d1..00000000000
--- a/tests/bugs/glusterd/bug-913555.t
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-# Test that a volume becomes unwritable when the cluster loses quorum.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-
-function check_fs {
- df $1 &> /dev/null
- echo $?
-}
-
-function check_peers {
- $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
-}
-
-function online_brick_count {
- $CLI_1 --xml volume status | grep '<status>1' | wc -l
-}
-
-cleanup;
-
-TEST launch_cluster 3; # start 3-node virtual cluster
-TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
-TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
-
-TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-TEST $CLI_1 volume start $V0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count;
-
-TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
-
-# Kill one pseudo-node, make sure the others survive and volume stays up.
-TEST kill_node 3;
-EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 online_brick_count;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
-
-# Kill another pseudo-node, make sure the last one dies and volume goes down.
-TEST kill_node 2;
-EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
-#two glusterfsds of the other two glusterds must be dead
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 online_brick_count;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_fs $M0;
-
-TEST $glusterd_2;
-TEST $glusterd_3;
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count; # restore quorum, all ok
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
-
-cleanup