summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorGauravKumarGarg <ggarg@redhat.com>2014-12-24 16:39:03 +0530
committerKrishnan Parthasarathi <kparthas@redhat.com>2015-01-20 22:38:36 -0800
commit30ad195d49b971a5389d37c9d9a3583186f3d54a (patch)
tree84a7e51f14477ddbd1b6cc33f40effc21f692780 /tests
parentf6a2f152aa9c8a66768e4ba0d1f66737c081639b (diff)
glusterd: quorum validatation in glusterd syncop framework
Previously glusterd was not checking quorum validation in syncop framework. So when there is loss in quorum then few operation (for eg. add-brick, remove-brick, volume set) which is based on syncop framework passed successfully with out doing quorum validation check. With this change it will do quorum validation in syncop framework and it will block all operation (except volume set <quorum options> and "volume reset all" commands) when there is loss in quorum. Change-Id: I4c2ef16728d55c98a228bb86795023d9c1f4e9fb BUG: 1177132 Signed-off-by: Gaurav Kumar Garg <ggarg@redhat.com> Reviewed-on: http://review.gluster.org/9349 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Atin Mukherjee <amukherj@redhat.com> Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Tested-by: Krishnan Parthasarathi <kparthas@redhat.com>
Diffstat (limited to 'tests')
-rwxr-xr-xtests/bugs/glusterd/bug-1177132-quorum-calculation-fix.t40
-rw-r--r--tests/bugs/glusterd/bug-1177132-quorum-validation.t64
2 files changed, 64 insertions, 40 deletions
diff --git a/tests/bugs/glusterd/bug-1177132-quorum-calculation-fix.t b/tests/bugs/glusterd/bug-1177132-quorum-calculation-fix.t
deleted file mode 100755
index e10fd193f5d..00000000000
--- a/tests/bugs/glusterd/bug-1177132-quorum-calculation-fix.t
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../cluster.rc
-
-cleanup;
-
-TEST launch_cluster 2;
-TEST $CLI_1 peer probe $H2;
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
-
-# Lets create the volume and set quorum type as a server
-TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
-TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
-
-# Start the volume
-TEST $CLI_1 volume start $V0
-
-# Set the quorum ratio to 80% which means in a two node cluster if one node is
-# down quorum shouldn't be met and operations which goes through quorum
-# validation should fail
-TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
-
-# Bring down one glusterd instance
-TEST kill_glusterd 2
-
-# Now execute a command which goes through op state machine and it should fail
-TEST ! $CLI_1 volume profile $V0 start
-
-# Bring back the glusterd instance
-TEST $glusterd_2
-
-EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
-
-# Now re-execute the same profile command and this time it should succeed
-TEST $CLI_1 volume profile $V0 start
-
-cleanup;
diff --git a/tests/bugs/glusterd/bug-1177132-quorum-validation.t b/tests/bugs/glusterd/bug-1177132-quorum-validation.t
new file mode 100644
index 00000000000..57aec5ccf57
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1177132-quorum-validation.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+# Test case for quorum validation in glusterd for syncop framework
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+
+cleanup;
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+# Lets create the volume and set quorum type as a server
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}0 $H2:$B2/${V0}1
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+
+# Start the volume
+TEST $CLI_1 volume start $V0
+
+# Set quorum ratio 52. means 52 % or more than 52% nodes of total available node
+# should be available for performing volume operation.
+# i.e. Server-side quorum is met if the number of nodes that are available is
+# greater than or equal to 'quorum-ratio' times the number of nodes in the
+# cluster
+
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 52
+
+# Bring down 2nd glusterd
+TEST kill_glusterd 2
+
+# Now quorum is not meet. Add-brick, Remove-brick, volume-set command
+#(Command based on syncop framework)should fail
+TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1
+TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
+TEST ! $CLI_1 volume set $V0 barrier enable
+
+# Now execute a command which goes through op state machine and it should fail
+TEST ! $CLI_1 volume profile $V0 start
+
+# Volume set all command and volume reset all command should be successful
+TEST $CLI_1 volume set all cluster.server-quorum-ratio 80
+TEST $CLI_1 volume reset all
+
+# Bring back 2nd glusterd
+TEST $glusterd_2
+
+# After 2nd glusterd come back, there will be 2 nodes in a clusater
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+# Now quorum is meet.
+# Add-brick, Remove-brick, volume-set command should success
+TEST $CLI_1 volume add-brick $V0 $H2:$B2/${V0}2
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 start
+TEST $CLI_1 volume set $V0 barrier enable
+TEST $CLI_1 volume remove-brick $V0 $H2:$B2/${V0}2 stop
+
+# Now re-execute the same profile command and this time it should succeed
+TEST $CLI_1 volume profile $V0 start
+
+cleanup;