summaryrefslogtreecommitdiffstats
path: root/tests/bugs/glusterd/sync-post-glusterd-restart.t
diff options
context:
space:
mode:
authorSanju Rakonde <srakonde@redhat.com>2018-01-04 10:35:29 +0530
committerAtin Mukherjee <amukherj@redhat.com>2018-02-10 16:25:01 +0000
commit535fd517c6b188732f9d69c0301dd78c3dc3d09c (patch)
tree1ca37c83a6f4dd299f7d74413fb9eafaa0cf6514 /tests/bugs/glusterd/sync-post-glusterd-restart.t
parent446ddbf1b10ce835e0e40790bc997ec6ac53766a (diff)
glusterd: optimization of test cases
To reduce the overall time taken by the every regression job for all glusterd test cases, avoiding some duplicate tests by clubbing similar test cases into one. real time taken for all regression jobs of glusterd without this patch is 1959 seconds, with this patch it is 1059 seconds. Look at the below document for your reference. https://docs.google.com/document/d/1u8o4-wocrsuPDI8BwuBU6yi_x4xA_pf2qSrFY6WEQpo/edit?usp=sharing Change-Id: Ib14c61ace97e62c3abce47230dd40598640fe9cb BUG: 1530905 Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Diffstat (limited to 'tests/bugs/glusterd/sync-post-glusterd-restart.t')
-rw-r--r--tests/bugs/glusterd/sync-post-glusterd-restart.t54
1 files changed, 54 insertions, 0 deletions
diff --git a/tests/bugs/glusterd/sync-post-glusterd-restart.t b/tests/bugs/glusterd/sync-post-glusterd-restart.t
new file mode 100644
index 00000000000..de3dff715ab
--- /dev/null
+++ b/tests/bugs/glusterd/sync-post-glusterd-restart.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function volume_get_field()
+{
+ local vol=$1
+ local field=$2
+ $CLI_2 volume get $vol $field | tail -1 | awk '{print $2}'
+}
+
+cleanup
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+TEST $CLI_1 volume start $V0
+
+TEST $CLI_1 volume set $V0 performance.readdir-ahead on
+
+# Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+##bug-1420637 and bug-1323287 - sync post glusterd restart
+
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 60
+TEST $CLI_1 volume set $V0 performance.readdir-ahead off
+TEST $CLI_1 volume set $V0 performance.write-behind off
+
+# Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a cluster
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+#bug-1420637-volume sync post glusterd restart
+
+EXPECT_WITHIN $PROBE_TIMEOUT "60" volinfo_field_2 all cluster.server-quorum-ratio
+EXPECT_WITHIN $PROBE_TIMEOUT "off" volinfo_field_2 $V0 performance.readdir-ahead
+
+#bug-1323287
+EXPECT_WITHIN $PROBE_TIMEOUT 'off' volume_get_field $V0 'write-behind'
+
+#bug-1213295 - volume stop should not crash glusterd post glusterd restart
+
+TEST $CLI_2 volume stop $V0
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+
+cleanup