summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorMohit Agrawal <moagrawal@redhat.com>2018-07-12 13:29:48 +0530
committerAtin Mukherjee <amukherj@redhat.com>2018-07-27 01:24:09 +0000
commit9400b6f2c8aa219a493961e0ab9770b7f12e80d2 (patch)
tree50e31f0467b154d39f3d602e5a0f05d65d38a76c /tests
parent2836e158f38eb9ed070de88b64a3a8758cd2d4c0 (diff)
glusterd: Add multiple checks before attach/start a brick
Problem: In brick mux scenario sometime glusterd is not able to start/attach a brick and gluster v status shows brick is already running Solution: 1) To make sure brick is running check brick_path in /proc/<pid>/fd , if a brick is consumed by the brick process it means brick stack is come up otherwise not 2) Before start/attach a brick check if a brick is mounted or not 3) At the time of printing volume status check brick is consumed by any brick process Test: To test the same followed procedure 1) Setup brick mux environment on a vm 2) Put a breaking point in gdb in function posix_health_check_thread_proc at the time of notify GF_EVENT_CHILD_DOWN event 3) unmount anyone brick path forcefully 4) check gluster v status it will show N/A for the brick 5) Try to start volume with force option, glusterd throw message "No device available for mount brick" 6) Mount the brick_root path 7) Try to start volume with force option 8) down brick is started successfully Change-Id: I91898dad21d082ebddd12aa0d1f7f0ed012bdf69 fixes: bz#1595320 Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/basic/bug-1595320.t92
-rw-r--r--tests/basic/posix/shared-statfs.t2
-rw-r--r--tests/bitrot/bug-1373520.t1
-rw-r--r--tests/bugs/distribute/bug-1368012.t2
-rwxr-xr-xtests/bugs/distribute/bug-853258.t1
-rw-r--r--tests/bugs/quota/bug-1293601.t3
6 files changed, 100 insertions, 1 deletions
diff --git a/tests/basic/bug-1595320.t b/tests/basic/bug-1595320.t
new file mode 100644
index 00000000000..9d856eeadec
--- /dev/null
+++ b/tests/basic/bug-1595320.t
@@ -0,0 +1,92 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../snapshot.rc
+
+cleanup
+
+function count_up_bricks {
+ $CLI --xml volume status $V0 | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+# Setup 3 LVMS
+LVM_PREFIX="test"
+TEST init_n_bricks 3
+TEST setup_lvm 3
+
+# Start glusterd
+TEST glusterd
+TEST pidof glusterd
+
+# Create volume and enable brick multiplexing
+TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3
+gluster v set all cluster.brick-multiplex on
+
+# Start the volume
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
+EXPECT 1 count_brick_processes
+
+# Kill volume ungracefully
+brick_pid=`pgrep glusterfsd`
+
+# Make sure every brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+
+b1_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-1*.pid)
+b2_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-2*.pid)
+b3_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-3*.pid)
+
+kill -9 $brick_pid
+EXPECT 0 count_brick_processes
+
+# Unmount 3rd brick root from node
+brick_root=$L3
+TEST umount -l $brick_root 2>/dev/null
+
+# Start the volume only 2 brick should be start
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 count_up_bricks
+EXPECT 1 count_brick_processes
+
+brick_pid=`pgrep glusterfsd`
+
+# Make sure only two brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 0 ]
+
+# Mount the brick root
+TEST mount -t xfs -o nouuid /dev/test_vg_3/brick_lvm $brick_root
+
+# Replace brick_pid file to test brick_attach code
+TEST cp $b1_pid_file $b3_pid_file
+
+# Start the volume all brick should be up
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_up_bricks
+EXPECT 1 count_brick_processes
+
+# Make sure every brick root should be consumed by a brick process
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L1 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L2 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+n=`ls -lrth /proc/$brick_pid/fd | grep -iw $L3 | grep -v ".glusterfs" | wc -l`
+TEST [ $n -eq 1 ]
+
+cleanup
diff --git a/tests/basic/posix/shared-statfs.t b/tests/basic/posix/shared-statfs.t
index 8caa9fa2110..33439562ec9 100644
--- a/tests/basic/posix/shared-statfs.t
+++ b/tests/basic/posix/shared-statfs.t
@@ -23,6 +23,7 @@ TEST MOUNT_LOOP $LO2 $B0/${V0}2
# Create a subdir in mountpoint and use that for volume.
TEST $CLI volume create $V0 $H0:$B0/${V0}1/1 $H0:$B0/${V0}2/1;
TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" online_brick_count
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
# Keeping the size less than 200M mainly because XFS will use
@@ -38,6 +39,7 @@ EXPECT 'Stopped' volinfo_field $V0 'Status';
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1/2 $H0:$B0/${V0}2/2 $H0:$B0/${V0}1/3 $H0:$B0/${V0}2/3
TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" online_brick_count
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ]
diff --git a/tests/bitrot/bug-1373520.t b/tests/bitrot/bug-1373520.t
index 225d3b1a9bc..c09d424f73b 100644
--- a/tests/bitrot/bug-1373520.t
+++ b/tests/bitrot/bug-1373520.t
@@ -11,6 +11,7 @@ TEST pidof glusterd
#Create a disperse volume
TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" online_brick_count
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'
#Disable md-cache
diff --git a/tests/bugs/distribute/bug-1368012.t b/tests/bugs/distribute/bug-1368012.t
index f89314b1f2e..b8615549ad9 100644
--- a/tests/bugs/distribute/bug-1368012.t
+++ b/tests/bugs/distribute/bug-1368012.t
@@ -22,6 +22,7 @@ EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
## Start volume and verify
TEST $CLI volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" online_brick_count
TEST $CLI volume set $V0 performance.stat-prefetch off
EXPECT 'Started' volinfo_field $V0 'Status';
TEST glusterfs -s $H0 --volfile-id=$V0 $M0
@@ -36,6 +37,7 @@ TEST permission_root=`stat -c "%A" $M0`
TEST echo $permission_root
#Add-brick
TEST $CLI volume add-brick $V0 $H0:/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" online_brick_count
#Allow one lookup to happen
TEST pushd $M0
diff --git a/tests/bugs/distribute/bug-853258.t b/tests/bugs/distribute/bug-853258.t
index e39f507baf9..6817d9e2cd3 100755
--- a/tests/bugs/distribute/bug-853258.t
+++ b/tests/bugs/distribute/bug-853258.t
@@ -31,6 +31,7 @@ done
# Expand the volume and force assignment of new ranges.
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" online_brick_count
# Force assignment of initial ranges.
TEST $CLI volume rebalance $V0 fix-layout start
EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" fix-layout_status_field $V0
diff --git a/tests/bugs/quota/bug-1293601.t b/tests/bugs/quota/bug-1293601.t
index def4ef9eda3..741758b73f5 100644
--- a/tests/bugs/quota/bug-1293601.t
+++ b/tests/bugs/quota/bug-1293601.t
@@ -9,6 +9,7 @@ TEST glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" online_brick_count
TEST $CLI volume quota $V0 enable
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
@@ -27,6 +28,6 @@ EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "1.0MB" quotausage "/"
TEST $CLI volume quota $V0 disable
TEST $CLI volume quota $V0 enable
-EXPECT_WITHIN 40 "1.0MB" quotausage "/"
+EXPECT_WITHIN 60 "1.0MB" quotausage "/"
cleanup;