From 362f90b5612c0f38894684d1d6f3bd66a31fe5b1 Mon Sep 17 00:00:00 2001 From: hari Date: Thu, 28 Apr 2016 19:36:25 +0530 Subject: Tier/glusterd: Resetting the tier status value to not started back-port of : http://review.gluster.org/#/c/14106/ back-port of : http://review.gluster.org/#/c/14229/ Problem: during a volume restart or tier start force, the value of tier status is set as started irrespective of the result. Fix: The appropriate value of status is set during the restart of rebalance function. >Change-Id: I6164f0add48542a57dee059e80fa0f9bb036dbef >BUG: 1315666 >Signed-off-by: hari >Change-Id: Ie4345bd7ce1d458574e36b70fe8994b3d758396a >BUG: 1316808 >Signed-off-by: hari >Reviewed-on: http://review.gluster.org/14229 >Smoke: Gluster Build System >Tested-by: hari gowtham >NetBSD-regression: NetBSD Build System >CentOS-regression: Gluster Build System >Reviewed-by: Atin Mukherjee Change-Id: I8e8e0662535c9dbe09eb6c7078422b40c218b473 BUG: 1347509 Signed-off-by: hari gowtham Reviewed-on: http://review.gluster.org/14749 Tested-by: hari gowtham Reviewed-by: Atin Mukherjee Smoke: Gluster Build System NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System --- tests/basic/tier/tierd_check.t | 76 ++++++++++++++++++++++++++++++++---------- 1 file changed, 58 insertions(+), 18 deletions(-) (limited to 'tests') diff --git a/tests/basic/tier/tierd_check.t b/tests/basic/tier/tierd_check.t index 2b8ccbbbd96..1f88ea0b72e 100644 --- a/tests/basic/tier/tierd_check.t +++ b/tests/basic/tier/tierd_check.t @@ -3,20 +3,24 @@ . $(dirname $0)/../../include.rc . $(dirname $0)/../../volume.rc . $(dirname $0)/../../tier.rc +. $(dirname $0)/../../cluster.rc # Creates a tiered volume with pure distribute hot and cold tiers # Both hot and cold tiers will have an equal number of bricks. +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + function create_dist_tier_vol () { - mkdir $B0/cold - mkdir $B0/hot - TEST $CLI volume create $V0 $H0:$B0/cold/${V0}{1..3} - TEST $CLI volume set $V0 performance.quick-read off - TEST $CLI volume set $V0 performance.io-cache off - TEST $CLI volume start $V0 - TEST $CLI volume attach-tier $V0 $H0:$B0/hot/${V0}{1..2} - TEST $CLI volume set $V0 cluster.tier-mode test + TEST $CLI_1 volume create $V0 $H1:$B1/${V0} $H2:$B2/${V0} + TEST $CLI_1 volume start $V0 + TEST $CLI_1 volume attach-tier $V0 $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 +} + +function tier_status () { + $CLI_1 volume tier $V0 status | grep progress | wc -l } function tier_deamon_kill () { @@ -26,38 +30,74 @@ echo "$?" cleanup; -#Basic checks -TEST glusterd -TEST pidof glusterd -TEST $CLI volume status +#setup cluster and test volume +TEST launch_cluster 3; # start 3-node virtual cluster +TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli +TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli +EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers; #Create and start a tiered volume create_dist_tier_vol EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 tier_daemon_check +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 tier_deamon_kill -TEST $CLI volume tier $V0 start +TEST $CLI_1 volume tier $V0 start EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_deamon_kill -TEST $CLI volume tier $V0 start force +TEST $CLI_3 volume tier $V0 start force EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check -TEST $CLI volume tier $V0 start force +#The pattern progress should occur twice only. +#it shouldn't come up on the third node without tierd even +#after the tier start force is issued on the node without +#tierd + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + +#kill the node on which tier is not supposed to run +TEST kill_node 3 + +#bring the node back, it should not have tierd running on it +TEST $glusterd_3; + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + +#after volume restart, check for tierd + +TEST $CLI_3 volume stop $V0 + +TEST $CLI_3 volume start $V0 + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + +#check for detach start and stop + +TEST $CLI_3 volume tier $V0 detach start + +TEST $CLI_3 volume tier $V0 detach stop + +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" tier_status + +TEST $CLI_1 volume tier $V0 start force EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" tier_daemon_check -# To test fordetach start fail while the brick is down +# To test for detach start fail while the brick is down -TEST pkill -f "$B0/hot/$V0" +TEST pkill -f "$B1/$V0" -TEST ! $CLI volume tier $V0 detach start +TEST ! $CLI_1 volume tier $V0 detach start cleanup #G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000 -- cgit