summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnand Avati <avati@redhat.com>2013-02-21 18:49:43 -0800
committerVijay Bellur <vbellur@redhat.com>2013-02-26 09:07:13 -0800
commit89ea4583161382de7e56007b3dee3359e2a41b98 (patch)
tree1fa75c53e0fe24eeb1b55847031def3a6f03b468
parent5e6dfce0b0d55d96b5bdad6a693fdb2826c20b92 (diff)
tests/cluster.rc: support for virtual multi-server glusterd tests
Since http://review.gluster.org/4556 glusterd is capable of running many instances of itself on a single system. This patch exploits that feature and enhances the regression test framework to expose handy primitives so that test cases may be written to test glusterd in a cluster. Usage: 1. Include "$(dirname)/../cluster.rc" to get access to the extensions 2. Call launch_cluster $N where $N is the count of virtual servers Calling launch_cluster, starts $N glusterds which bind to $N different IPs and dynamically defines these primitives: - Variables $H1 .. $Hn assigned to hostnames of each "server". - Variables $CLI_1 .. $CLI_n assigned as commands to run CLI commands on the corresponding N'th server. - Variables $B1 .. $Bn assigned to the backend directories on each "server". - Function kill_glusterd, which accepts a parameter - index number of glusterd to be killed. - Variables $glusterd_1 .. $glusterd_n assigned to the command lines to restart the corresponding glusterd, if it was previously killed. The current set of primitives and functions were implemented with the goal of satisfying ./tests/bugs/bug-913555.t. The API will be made richer as we add more cluster test cases Change-Id: Ieb13ed9f4a72ac0321db0ca0844c7b294145bb32 BUG: 913555 Signed-off-by: Anand Avati <avati@redhat.com> Reviewed-on: http://review.gluster.org/4566 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Krishnan Parthasarathi <kparthas@redhat.com> Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
-rwxr-xr-xtests/bugs/bug-913555.t64
-rwxr-xr-xtests/cluster.rc106
2 files changed, 132 insertions, 38 deletions
diff --git a/tests/bugs/bug-913555.t b/tests/bugs/bug-913555.t
index 0e08bd37..f58d7bd6 100755
--- a/tests/bugs/bug-913555.t
+++ b/tests/bugs/bug-913555.t
@@ -4,16 +4,8 @@
. $(dirname $0)/../include.rc
. $(dirname $0)/../volume.rc
+. $(dirname $0)/../cluster.rc
-function vglusterd {
- wd=$1/wd-$2
- cp -r /var/lib/glusterd $wd
- rm -rf $wd/peers/* $wd/vols/*
- echo -n "UUID=$(uuidgen)\noperating-version=1\n" > $wd/glusterd.info
- opt1="management.transport.socket.bind-address=127.0.0.$2"
- opt2="management.working-directory=$wd"
- glusterd --xlator-option $opt1 --xlator-option $opt2
-}
function check_fs {
df $1 &> /dev/null
@@ -21,46 +13,42 @@ function check_fs {
}
function check_peers {
- $VCLI peer status | grep 'Peer in Cluster (Connected)' | wc -l
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
-cleanup;
+function glusterfsd_count {
+ pidof glusterfsd | wc -w;
+}
-topwd=$(mktemp -d)
-trap "rm -rf $topwd" EXIT
+cleanup;
-vglusterd $topwd 100
-VCLI="$CLI --remote-host=127.0.0.100"
-vglusterd $topwd 101
-TEST $VCLI peer probe 127.0.0.101
-vglusterd $topwd 102
-TEST $VCLI peer probe 127.0.0.102
+TEST launch_cluster 3; # start 3-node virtual cluster
+TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
+TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
EXPECT_WITHIN 20 2 check_peers
-create_cmd="$VCLI volume create $V0"
-for i in $(seq 100 102); do
- mkdir -p $B0/$V0$i
- create_cmd="$create_cmd 127.0.0.$i:$B0/$V0$i"
-done
-
-TEST $create_cmd
-TEST $VCLI volume set $V0 cluster.server-quorum-type server
-TEST $VCLI volume start $V0
-TEST glusterfs --volfile-server=127.0.0.100 --volfile-id=$V0 $M0
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
+TEST $CLI_1 volume start $V0
+TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
# Kill one pseudo-node, make sure the others survive and volume stays up.
-kill -9 $(ps -ef | grep gluster | grep 127.0.0.102 | awk '{print $2}')
-EXPECT_WITHIN 20 1 check_peers
-fs_status=$(check_fs $M0)
-nnodes=$(pidof glusterfsd | wc -w)
-TEST [ "$fs_status" = 0 -a "$nnodes" = 2 ]
+TEST kill_node 3;
+EXPECT_WITHIN 20 1 check_peers;
+EXPECT 0 check_fs $M0;
+EXPECT 2 glusterfsd_count;
# Kill another pseudo-node, make sure the last one dies and volume goes down.
-kill -9 $(ps -ef | grep gluster | grep 127.0.0.101 | awk '{print $2}')
+TEST kill_node 2;
EXPECT_WITHIN 20 0 check_peers
-fs_status=$(check_fs $M0)
-nnodes=$(pidof glusterfsd | wc -w)
-TEST [ "$fs_status" = 1 -a "$nnodes" = 0 ]
+EXPECT 1 check_fs $M0;
+EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds
+ # must be dead
+
+TEST $glusterd_2;
+TEST $glusterd_3;
+EXPECT_WITHIN 20 3 glusterfsd_count; # restore quorum, all ok
+EXPECT_WITHIN 5 0 check_fs $M0;
cleanup
diff --git a/tests/cluster.rc b/tests/cluster.rc
new file mode 100755
index 00000000..1c06bca4
--- /dev/null
+++ b/tests/cluster.rc
@@ -0,0 +1,106 @@
+#!/bin/bash
+
+CLUSTER_PFX="127.1.1"; # ".x" for each glusterd
+CLUSTER_COUNT=1; # Just initial definition
+
+function launch_cluster() {
+ local count=$1;
+
+ CLUSTER_COUNT=$count;
+
+ define_backends $count;
+ define_hosts $count;
+ define_glusterds $count;
+ define_clis $count;
+
+ start_glusterds;
+}
+
+
+function define_backends() {
+ local b;
+
+ for i in `seq 1 $count`; do
+ eval "B$i=$B0/$i";
+ done
+
+ for i in `seq 1 $count`; do
+ b="B$i";
+ mkdir -pv ${!b}/glusterd;
+ done
+}
+
+
+function define_glusterds() {
+ local count=$1;
+ local h;
+ local b;
+ local wopt;
+ local bopt;
+ local popt;
+
+ for i in `seq 1 $count`; do
+ b="B$i";
+ h="H$i";
+ wopt="management.working-directory=${!b}/glusterd";
+ bopt="management.transport.socket.bind-address=${!h}";
+ popt="--pid-file=${!b}/glusterd.pid";
+ eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt $popt'";
+ eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt $popt'";
+ done
+}
+
+
+function start_glusterds() {
+ local g;
+
+ for i in `seq 1 $CLUSTER_COUNT`; do
+ g="glusterd_$i";
+ ${!g};
+ done
+}
+
+
+function kill_glusterd() {
+ local index=$1;
+ local b;
+ local pidfile;
+
+ b="B$index";
+ pidfile="${!b}/glusterd.pid";
+
+ kill `cat $pidfile`;
+}
+
+
+function kill_node() {
+ local index=$1;
+ local h;
+
+ h="H$index";
+
+ kill -9 $(ps -ef | grep gluster | grep ${!h} | awk '{print $2}');
+}
+
+
+function define_hosts() {
+ local count=$1;
+
+ for i in `seq 1 $count`; do
+ eval "H_$i=${CLUSTER_PFX}.$i"
+ eval "H$i=${CLUSTER_PFX}.$i";
+ done
+}
+
+
+function define_clis() {
+ local count=$1;
+ local h;
+
+ for i in `seq 1 $count`; do
+ h="H$i";
+ eval "CLI_$i='$CLI --remote-host=${!h}'";
+ eval "CLI$i='$CLI --remote-host=${!h}'";
+ done
+}
+