From e3d97f57c8e25e8b44d3c96b09d69336ff6edb4b Mon Sep 17 00:00:00 2001 From: Mohit Agrawal Date: Wed, 6 Nov 2019 10:32:04 +0530 Subject: glusterd: Client Handling of Elastic Clusters Configure the list of gluster servers in the key GLUSTERD_BRICK_SERVERS at the time of GETSPEC RPC CALL and access the value in client side to update volfile serve list so that client would be able to connect next volfile server in case of current volfile server is down Updates #741 Signed-off-by: Mohit Agrawal Change-Id: I23f36ddb92982bb02ffd83937a8bd8a2c97e8104 --- tests/bugs/glusterd/check_elastic_server.t | 60 ++++++++++++++++++++++++++++++ tests/cluster.rc | 5 +++ 2 files changed, 65 insertions(+) create mode 100644 tests/bugs/glusterd/check_elastic_server.t (limited to 'tests') diff --git a/tests/bugs/glusterd/check_elastic_server.t b/tests/bugs/glusterd/check_elastic_server.t new file mode 100644 index 00000000000..8e9e4e5b0eb --- /dev/null +++ b/tests/bugs/glusterd/check_elastic_server.t @@ -0,0 +1,60 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc +. $(dirname $0)/../../volume.rc + +function cluster_rebalance_status { + local vol=$1 + $CLI_2 volume status | grep -iw "Rebalance" -A 5 | grep "Status" | sed 's/.*: //' +} + +cleanup; +TEST launch_cluster 4; +TEST $CLI_1 peer probe $H2; +TEST $CLI_1 peer probe $H3; +TEST $CLI_1 peer probe $H4; + +EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 +EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status'; + +$CLI_1 volume start $V0 +EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status'; + +#Mount FUSE +TEST glusterfs -s $H1 --volfile-id=$V0 $M0; + +TEST mkdir $M0/dir{1..4}; +TEST touch $M0/dir{1..4}/files{1..4}; + +TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 start +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_remove_brick_status_completed_field "$V0 $H1:$B1/$V0" + +TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 commit + +kill_glusterd 1 + +total_files=`find $M0 -name "files*" | wc -l` +TEST [ $total_files -eq 16 ]; + +TEST $CLI_2 volume add-brick $V0 $H3:$B3/$V0 + +TEST $CLI_2 volume rebalance $V0 start +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0 + +total_files=`find $M0 -name "files*" | wc -l` +TEST [ $total_files -eq 16 ]; + +TEST $CLI_2 volume add-brick $V0 $H4:$B4/$V0 + +TEST $CLI_2 volume rebalance $V0 start +EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0 +kill_glusterd 2 + +total_files=`find $M0 -name "files*" | wc -l` +TEST [ $total_files -eq 16 ]; + +cleanup; + diff --git a/tests/cluster.rc b/tests/cluster.rc index 06f329e0ef6..2d826d07e39 100644 --- a/tests/cluster.rc +++ b/tests/cluster.rc @@ -192,3 +192,8 @@ function cluster_brick_up_status { eval \$CLI_$1 volume status $vol $host:$brick --xml | sed -ne 's/.*\([01]\)<\/status>/\1/p' } +function cluster_remove_brick_status_completed_field { + local vol=$1 + local brick_list=$2 + $CLI_1 volume remove-brick $vol $brick_list status | awk '{print $7}' | sed -n 3p +} -- cgit