summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--tests/basic/afr/read-subvol-data.t2
-rw-r--r--tests/basic/afr/read-subvol-entry.t2
-rw-r--r--tests/basic/afr/self-heal.t86
-rw-r--r--tests/basic/afr/sparse-file-self-heal.t20
-rw-r--r--tests/basic/afr/stale-file-lookup.t2
-rw-r--r--tests/basic/mgmt_v3-locks.t12
-rwxr-xr-xtests/basic/mount.t2
-rwxr-xr-xtests/basic/quota.t12
-rw-r--r--tests/basic/self-heald.t14
-rwxr-xr-xtests/basic/volume-snapshot.t2
-rw-r--r--tests/basic/volume-status.t4
-rwxr-xr-xtests/bugs/859927/repl.t6
-rw-r--r--tests/bugs/886998/strict-readdir.t12
-rw-r--r--tests/bugs/bug-1004744.t4
-rwxr-xr-xtests/bugs/bug-1022055.t2
-rw-r--r--tests/bugs/bug-1032894.t6
-rw-r--r--tests/bugs/bug-1035576.t2
-rwxr-xr-xtests/bugs/bug-1043886.t4
-rw-r--r--tests/bugs/bug-1047955.t2
-rwxr-xr-xtests/bugs/bug-1049834.t2
-rwxr-xr-xtests/bugs/bug-1053579.t2
-rw-r--r--tests/bugs/bug-1058797.t2
-rw-r--r--tests/bugs/bug-1064768.t4
-rwxr-xr-xtests/bugs/bug-1066798.t2
-rwxr-xr-xtests/bugs/bug-1070734.t4
-rw-r--r--tests/bugs/bug-1077682.t2
-rw-r--r--tests/bugs/bug-1087198.t2
-rwxr-xr-xtests/bugs/bug-765473.t4
-rw-r--r--tests/bugs/bug-765564.t2
-rwxr-xr-xtests/bugs/bug-802417.t6
-rw-r--r--tests/bugs/bug-821056.t8
-rwxr-xr-xtests/bugs/bug-830665.t8
-rwxr-xr-xtests/bugs/bug-847622.t2
-rwxr-xr-xtests/bugs/bug-847624.t2
-rwxr-xr-xtests/bugs/bug-853258.t6
-rw-r--r--tests/bugs/bug-861015-index.t2
-rwxr-xr-xtests/bugs/bug-864222.t2
-rwxr-xr-xtests/bugs/bug-865825.t2
-rwxr-xr-xtests/bugs/bug-872923.t2
-rw-r--r--tests/bugs/bug-873962-spb.t4
-rwxr-xr-xtests/bugs/bug-873962.t6
-rw-r--r--tests/bugs/bug-874498.t2
-rwxr-xr-xtests/bugs/bug-877885.t2
-rwxr-xr-xtests/bugs/bug-884455.t2
-rwxr-xr-xtests/bugs/bug-887145.t4
-rw-r--r--tests/bugs/bug-888752.t2
-rwxr-xr-xtests/bugs/bug-889630.t2
-rwxr-xr-xtests/bugs/bug-904065.t4
-rwxr-xr-xtests/bugs/bug-904300.t10
-rw-r--r--tests/bugs/bug-906646.t2
-rwxr-xr-xtests/bugs/bug-912564.t2
-rw-r--r--tests/bugs/bug-913051.t6
-rwxr-xr-xtests/bugs/bug-913555.t10
-rwxr-xr-xtests/bugs/bug-915280.t2
-rwxr-xr-xtests/bugs/bug-915554.t2
-rw-r--r--tests/bugs/bug-916226.t2
-rw-r--r--tests/bugs/bug-918437-sh-mtime.t6
-rwxr-xr-xtests/bugs/bug-921072.t28
-rwxr-xr-xtests/bugs/bug-921408.t2
-rwxr-xr-xtests/bugs/bug-924265.t2
-rwxr-xr-xtests/bugs/bug-927616.t2
-rwxr-xr-xtests/bugs/bug-948686.t2
-rw-r--r--tests/bugs/bug-948729/bug-948729-force.t2
-rw-r--r--tests/bugs/bug-948729/bug-948729-mode-script.t2
-rw-r--r--tests/bugs/bug-948729/bug-948729.t2
-rw-r--r--tests/bugs/bug-957877.t2
-rw-r--r--tests/bugs/bug-958691.t2
-rw-r--r--tests/bugs/bug-961615.t2
-rwxr-xr-xtests/bugs/bug-964059.t2
-rw-r--r--tests/bugs/bug-966018.t2
-rwxr-xr-xtests/bugs/bug-973073.t4
-rw-r--r--tests/bugs/bug-974007.t2
-rwxr-xr-xtests/bugs/bug-974972.t14
-rwxr-xr-xtests/bugs/bug-977797.t4
-rw-r--r--tests/bugs/bug-978794.t4
-rwxr-xr-xtests/bugs/bug-983477.t9
-rwxr-xr-xtests/bugs/bug-986905.t2
-rw-r--r--tests/include.rc10
-rw-r--r--tests/nfs.rc1
79 files changed, 219 insertions, 215 deletions
diff --git a/tests/basic/afr/read-subvol-data.t b/tests/basic/afr/read-subvol-data.t
index 7db4988faee..25ae3e63c64 100644
--- a/tests/basic/afr/read-subvol-data.t
+++ b/tests/basic/afr/read-subvol-data.t
@@ -24,7 +24,7 @@ TEST dd if=/dev/urandom of=$M0/afr_success_5.txt bs=1M count=1
TEST kill_brick $V0 $H0 $B0/brick0
TEST dd if=/dev/urandom of=$M0/afr_success_5.txt bs=1M count=10
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 5 "10485760" echo `ls -l $M0/afr_success_5.txt | awk '{ print $5}'`
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "10485760" echo `ls -l $M0/afr_success_5.txt | awk '{ print $5}'`
#Cleanup
TEST umount $M0
diff --git a/tests/basic/afr/read-subvol-entry.t b/tests/basic/afr/read-subvol-entry.t
index 91110b8cd70..3e7ee3f35dd 100644
--- a/tests/basic/afr/read-subvol-entry.t
+++ b/tests/basic/afr/read-subvol-entry.t
@@ -26,7 +26,7 @@ TEST kill_brick $V0 $H0 $B0/brick0
TEST touch $M0/abc/def/ghi
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 5 "ghi" echo `ls $M0/abc/def/`
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "ghi" echo `ls $M0/abc/def/`
#Cleanup
TEST umount $M0
diff --git a/tests/basic/afr/self-heal.t b/tests/basic/afr/self-heal.t
index df9526bcf88..575ed4c4f72 100644
--- a/tests/basic/afr/self-heal.t
+++ b/tests/basic/afr/self-heal.t
@@ -39,12 +39,12 @@ TEST dd if=/dev/urandom of=$M0/jkl/mno/file.txt bs=1M count=4 2>/dev/null
TEST chown $NEW_UID:$NEW_GID $M0/def/ghi/file2.txt
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
#check all files created/deleted on brick1 are also replicated on brick 0
#(i.e. no reverse heal has happened)
@@ -68,12 +68,12 @@ TEST rm -f $M0/file
TEST mkdir $M0/file
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
#check heal has happened in the correct direction
TEST test -d $B0/brick0/file
@@ -91,12 +91,12 @@ TEST chmod 666 $M0/file
TEST kill_brick $V0 $H0 $B0/brick0
TEST chmod 777 $M0/file
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
#check heal has happened in the correct direction
EXPECT "777" stat --printf=%a $B0/brick0/file
@@ -115,12 +115,12 @@ NEW_UID=36
NEW_GID=36
TEST chown $NEW_UID:$NEW_GID $M0/file
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
#check heal has happened in the correct direction
EXPECT "$NEW_UID$NEW_GID" stat --printf=%u%g $B0/brick0/file
@@ -138,20 +138,20 @@ TEST `echo "write1">$M0/file`
TEST kill_brick $V0 $H0 $B0/brick0
TEST `echo "write2">>$M0/file`
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
TEST kill_brick $V0 $H0 $B0/brick1
TEST truncate -s 0 $M0/file
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
#check heal has happened in the correct direction
EXPECT 0 stat --printf=%s $B0/brick1/file
@@ -170,11 +170,11 @@ TEST rm -f $M0/file
TEST touch $M0/file
GFID=$(gf_get_gfid_xattr $B1/brick1/file)
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
#check heal has happened in the correct direction
EXPECT "$GFID" gf_get_gfid_xattr $B0/brick0/file
@@ -193,12 +193,12 @@ TEST rm -f $M0/link_to_file
TEST ln -s $M0/file $M0/link_to_file
TEST ln $M0/file $M0/hard_link_to_file
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
#check heal has happened in the correct direction
TEST test -f $B0/brick0/hard_link_to_file
@@ -219,12 +219,12 @@ TEST kill_brick $V0 $H0 $B0/brick0
TEST setfattr -n user.myattr_1 -v "My_attribute_1_modified" $M0/file
TEST setfattr -n user.myattr_3 -v "My_attribute_3" $M0/file
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
TEST diff <(echo "user.myattr_1=\"My_attribute_1_modified\"") <(getfattr -n user.myattr_1 $B0/brick1/file|grep user.myattr_1)
TEST diff <(echo "user.myattr_3=\"My_attribute_3\"") <(getfattr -n user.myattr_3 $B0/brick1/file|grep user.myattr_3)
diff --git a/tests/basic/afr/sparse-file-self-heal.t b/tests/basic/afr/sparse-file-self-heal.t
index 9b795c331cf..01b676ea900 100644
--- a/tests/basic/afr/sparse-file-self-heal.t
+++ b/tests/basic/afr/sparse-file-self-heal.t
@@ -39,12 +39,12 @@ TEST truncate -s 2M $M0/big2bigger
big2bigger_md5sum=$(md5sum $M0/big2bigger | awk '{print $1}')
$CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST gluster volume heal $V0 full
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}')
small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}')
@@ -96,12 +96,12 @@ TEST truncate -s 2M $M0/big2bigger
big2bigger_md5sum=$(md5sum $M0/big2bigger | awk '{print $1}')
$CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST gluster volume heal $V0 full
-EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}')
small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}')
diff --git a/tests/basic/afr/stale-file-lookup.t b/tests/basic/afr/stale-file-lookup.t
index 24a478d5c4a..f2ab560cb28 100644
--- a/tests/basic/afr/stale-file-lookup.t
+++ b/tests/basic/afr/stale-file-lookup.t
@@ -22,7 +22,7 @@ TEST touch $M0/a
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST rm -f $M0/a
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST stat $B0/${V0}0/a
TEST ! stat $B0/${V0}1/a
TEST ! ls -l $M0/a
diff --git a/tests/basic/mgmt_v3-locks.t b/tests/basic/mgmt_v3-locks.t
index 22ca27b9f20..4c259caa874 100644
--- a/tests/basic/mgmt_v3-locks.t
+++ b/tests/basic/mgmt_v3-locks.t
@@ -79,7 +79,7 @@ TEST launch_cluster 3;
TEST $CLI_1 peer probe $H2;
TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN 20 2 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
two_diff_vols_create
EXPECT 'Created' volinfo_field $V0 'Status';
@@ -92,12 +92,12 @@ EXPECT 'Started' volinfo_field $V1 'Status';
same_vol_remove_brick $V0 $H2:$B2/$V0
# Checking glusterd crashed or not after same volume remove brick
# on both nodes.
-EXPECT_WITHIN 20 2 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
same_vol_remove_brick $V1 $H2:$B2/$V1
# Checking glusterd crashed or not after same volume remove brick
# on both nodes.
-EXPECT_WITHIN 20 2 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
$CLI_1 volume set $V1 diagnostics.client-log-level DEBUG
@@ -105,7 +105,7 @@ kill_glusterd 3
$CLI_1 volume status $V0
$CLI_2 volume status $V1
$CLI_1 peer status
-EXPECT_WITHIN 20 1 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
EXPECT 'Started' volinfo_field $V0 'Status';
EXPECT 'Started' volinfo_field $V1 'Status';
@@ -113,9 +113,9 @@ TEST $glusterd_3
$CLI_1 volume status $V0
$CLI_2 volume status $V1
$CLI_1 peer status
-#EXPECT_WITHIN 20 2 check_peers
+#EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
#EXPECT 'Started' volinfo_field $V0 'Status';
#EXPECT 'Started' volinfo_field $V1 'Status';
#two_diff_vols_stop_force
-#EXPECT_WITHIN 20 2 check_peers
+#EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
cleanup;
diff --git a/tests/basic/mount.t b/tests/basic/mount.t
index 8163975d679..c77317f37f5 100755
--- a/tests/basic/mount.t
+++ b/tests/basic/mount.t
@@ -50,7 +50,7 @@ TEST 'mount -t fuse.glusterfs | grep -E "^$H0:$V0 .+ \(ro,"';
TEST 'grep -E "^$H0:$V0 .+ ,?ro,.+" /proc/mounts';
## Wait for volume to register with rpc.mountd
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
## Mount NFS
TEST mount -t nfs -o nolock,soft,intr $H0:/$V0 $N0;
diff --git a/tests/basic/quota.t b/tests/basic/quota.t
index cfc4f06950d..d92b382c780 100755
--- a/tests/basic/quota.t
+++ b/tests/basic/quota.t
@@ -74,20 +74,20 @@ TEST ! dd if=/dev/urandom of=$M0/test_dir/1.txt bs=1M count=12
TEST rm $M0/test_dir/1.txt
# wait for marker's accounting to complete
-EXPECT_WITHIN 10 "0Bytes" usage "/test_dir"
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "0Bytes" usage "/test_dir"
TEST dd if=/dev/urandom of=$M0/test_dir/2.txt bs=1M count=8
-EXPECT_WITHIN 20 "8.0MB" usage "/test_dir"
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "8.0MB" usage "/test_dir"
TEST rm $M0/test_dir/2.txt
-EXPECT_WITHIN 10 "0Bytes" usage "/test_dir"
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "0Bytes" usage "/test_dir"
## rename tests
TEST dd if=/dev/urandom of=$M0/test_dir/2 bs=1M count=8
-EXPECT_WITHIN 20 "8.0MB" usage "/test_dir"
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "8.0MB" usage "/test_dir"
TEST mv $M0/test_dir/2 $M0/test_dir/0
-EXPECT_WITHIN 10 "8.0MB" usage "/test_dir"
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "8.0MB" usage "/test_dir"
TEST rm $M0/test_dir/0
-EXPECT_WITHIN 10 "0Bytes" usage "/test_dir"
+EXPECT_WITHIN $MARKER_UPDATE_TIMEOUT "0Bytes" usage "/test_dir"
## ---------------------------
diff --git a/tests/basic/self-heald.t b/tests/basic/self-heald.t
index 4468c881bac..913cfc80599 100644
--- a/tests/basic/self-heald.t
+++ b/tests/basic/self-heald.t
@@ -34,15 +34,11 @@ TEST ! $CLI volume heal $V0 info
TEST ! $CLI volume heal $V0
TEST $CLI volume start $V0 force
TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN 20 "Y" glustershd_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 2
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 4
-TEST $CLI volume heal $V0
-sleep 5 #Until the heal-statistics command implementation
-#check that this heals the contents partially
-TEST [ $HEAL_FILES -gt $(afr_get_pending_heal_count $V0) ]
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4
TEST $CLI volume heal $V0 full
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
cleanup
diff --git a/tests/basic/volume-snapshot.t b/tests/basic/volume-snapshot.t
index 30dfbbca195..21d6ea3b939 100755
--- a/tests/basic/volume-snapshot.t
+++ b/tests/basic/volume-snapshot.t
@@ -76,7 +76,7 @@ TEST setup_lvm 3
TEST $CLI_1 peer probe $H2;
TEST $CLI_1 peer probe $H3;
-EXPECT_WITHIN 20 2 peer_count;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count;
create_volumes
EXPECT 'Created' volinfo_field $V0 'Status';
diff --git a/tests/basic/volume-status.t b/tests/basic/volume-status.t
index f4196ac30f0..a91db3cc50a 100644
--- a/tests/basic/volume-status.t
+++ b/tests/basic/volume-status.t
@@ -24,8 +24,8 @@ TEST mount -t nfs -o vers=3,nolock,soft,intr $H0:/$V0 $N0;
TEST $CLI volume status all
TEST $CLI volume status $V0
-EXPECT_WITHIN 10 'Y' nfs_up_status
-EXPECT_WITHIN 10 'Y' glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' nfs_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' glustershd_up_status
function test_nfs_cmds () {
local ret=0
declare -a nfs_cmds=("clients" "mem" "inode" "callpool")
diff --git a/tests/bugs/859927/repl.t b/tests/bugs/859927/repl.t
index 856b057fbcb..9ac524fc1bd 100755
--- a/tests/bugs/859927/repl.t
+++ b/tests/bugs/859927/repl.t
@@ -32,20 +32,20 @@ touch $M0/a
TEST $CLI volume set $V0 cluster.data-self-heal-algorithm full
EXPECT full volume_option $V0 cluster.data-self-heal-algorithm
create_setup_for_self_heal $M0/a
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
cat $file 2>&1 > /dev/null
TEST cmp $B0/${V0}1/a $B0/${V0}2/a
TEST $CLI volume set $V0 cluster.data-self-heal-algorithm diff
EXPECT diff volume_option $V0 cluster.data-self-heal-algorithm
create_setup_for_self_heal $M0/a
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
cat $file 2>&1 > /dev/null
TEST cmp $B0/${V0}1/a $B0/${V0}2/a
TEST $CLI volume reset $V0 cluster.data-self-heal-algorithm
create_setup_for_self_heal $M0/a
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
cat $file 2>&1 > /dev/null
TEST cmp $B0/${V0}1/a $B0/${V0}2/a
diff --git a/tests/bugs/886998/strict-readdir.t b/tests/bugs/886998/strict-readdir.t
index 0de953e8a52..57a8c1c32dc 100644
--- a/tests/bugs/886998/strict-readdir.t
+++ b/tests/bugs/886998/strict-readdir.t
@@ -25,16 +25,16 @@ TEST kill_brick $V0 $H0 $B0/r2d2_2
EXPECT "100" num_files_in_dir $M0
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
TEST kill_brick $V0 $H0 $B0/r2d2_1
TEST kill_brick $V0 $H0 $B0/r2d2_3
EXPECT "100" num_files_in_dir $M0
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 3
TEST $CLI volume set $V0 cluster.strict-readdir on
EXPECT "on" volinfo_field $V0 cluster.strict-readdir
@@ -43,8 +43,8 @@ TEST kill_brick $V0 $H0 $B0/r2d2_2
EXPECT "100" num_files_in_dir $M0
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
TEST kill_brick $V0 $H0 $B0/r2d2_1
TEST kill_brick $V0 $H0 $B0/r2d2_3
diff --git a/tests/bugs/bug-1004744.t b/tests/bugs/bug-1004744.t
index 1211002e299..089cb35a184 100644
--- a/tests/bugs/bug-1004744.t
+++ b/tests/bugs/bug-1004744.t
@@ -36,9 +36,7 @@ TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{3,4};
#perform rebalance fix-layout
TEST $CLI volume rebalance $V0 fix-layout start
-EXPECT_WITHIN 1 "fix-layout in progress" rebalance_status_field $V0;
-
-EXPECT_WITHIN 30 "fix-layout completed" rebalance_status_field $V0;
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0;
TEST umount $M0
TEST $CLI volume stop $V0
diff --git a/tests/bugs/bug-1022055.t b/tests/bugs/bug-1022055.t
index c2f4218bb20..07d0b1f2a6f 100755
--- a/tests/bugs/bug-1022055.t
+++ b/tests/bugs/bug-1022055.t
@@ -13,7 +13,7 @@ TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN 20 1 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0;
diff --git a/tests/bugs/bug-1032894.t b/tests/bugs/bug-1032894.t
index 2a2b77fece9..ecb5952d860 100644
--- a/tests/bugs/bug-1032894.t
+++ b/tests/bugs/bug-1032894.t
@@ -21,13 +21,13 @@ for i in {1..10}; do echo abc > $i; done
for i in {1..10}; do rm -f $i; done
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
#Since maximum depth of the directory structure that needs healin is 2
#Trigger two self-heals. That should make sure the heal is complete
TEST $CLI volume heal $V0
-EXPECT_WITHIN 20 "0" afr_get_index_count $B0/${V0}1
+EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_index_count $B0/${V0}1
cleanup
diff --git a/tests/bugs/bug-1035576.t b/tests/bugs/bug-1035576.t
index 938306a8503..62d431a703a 100644
--- a/tests/bugs/bug-1035576.t
+++ b/tests/bugs/bug-1035576.t
@@ -29,7 +29,7 @@ TEST mkdir $M0/a
TEST $CLI volume quota $V0 limit-usage /a 1GB
echo abc > $M0/a/f
$CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
quota_limit_val1=$(get_hex_xattr trusted.glusterfs.quota.limit-set $B0/${V0}1/a)
quota_size_val1=$(get_hex_xattr trusted.glusterfs.quota.size $B0/${V0}1/a)
diff --git a/tests/bugs/bug-1043886.t b/tests/bugs/bug-1043886.t
index b9ee320ae96..edebdc2cd2e 100755
--- a/tests/bugs/bug-1043886.t
+++ b/tests/bugs/bug-1043886.t
@@ -14,7 +14,7 @@ sleep 2;
## Mount FUSE with caching disabled
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
## Mount volume as NFS export
TEST mount_nfs $H0:/$V0 $N0 nolock;
@@ -32,7 +32,7 @@ TEST $CLI volume set $V0 server.anongid $gid;
sleep 2;
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
# create files and directories in the root of the glusterfs and nfs mount
# which is owned by root and hence the right behavior is getting EACCESS
diff --git a/tests/bugs/bug-1047955.t b/tests/bugs/bug-1047955.t
index e15f3ceef00..169333e9eb4 100644
--- a/tests/bugs/bug-1047955.t
+++ b/tests/bugs/bug-1047955.t
@@ -17,7 +17,7 @@ TEST launch_cluster 2;
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/${V0}{1,2,3,4}
TEST $CLI_1 volume start $V0;
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN 20 1 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
TEST $CLI_2 volume remove-brick $V0 $H1:$B1/${V0}{3,4} start;
TEST $CLI_2 volume info
cleanup;
diff --git a/tests/bugs/bug-1049834.t b/tests/bugs/bug-1049834.t
index eb9a7fbe875..7e16fde61aa 100755
--- a/tests/bugs/bug-1049834.t
+++ b/tests/bugs/bug-1049834.t
@@ -11,7 +11,7 @@ TEST launch_cluster 2
TEST setup_lvm 2
TEST $CLI_1 peer probe $H2
-EXPECT_WITHIN 20 1 peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
TEST $CLI_1 volume create $V0 $H1:$L1 $H2:$L2
EXPECT 'Created' volinfo_field $V0 'Status'
diff --git a/tests/bugs/bug-1053579.t b/tests/bugs/bug-1053579.t
index afbc4c9579a..7a8ea192adc 100755
--- a/tests/bugs/bug-1053579.t
+++ b/tests/bugs/bug-1053579.t
@@ -28,7 +28,7 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}1
TEST $CLI volume set $V0 nfs.server-aux-gids on
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 "1" is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
# mount the volume
TEST mount_nfs $H0:/$V0 $N0 nolock
diff --git a/tests/bugs/bug-1058797.t b/tests/bugs/bug-1058797.t
index 1e9f09af0a8..c48f19b9aa8 100644
--- a/tests/bugs/bug-1058797.t
+++ b/tests/bugs/bug-1058797.t
@@ -28,7 +28,7 @@ EXPECT "s" echo $setuid_bit1
#Restart volume and do lookup from mount to trigger heal
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST dd if=$M0/file of=/dev/null
#Get file permissions from healed brick1 and verify that S_ISUID is indeed set
diff --git a/tests/bugs/bug-1064768.t b/tests/bugs/bug-1064768.t
index b87168150d2..b0d04eb25ec 100644
--- a/tests/bugs/bug-1064768.t
+++ b/tests/bugs/bug-1064768.t
@@ -8,7 +8,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
TEST $CLI volume start $V0
-EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
TEST $CLI volume profile $V0 start
TEST $CLI volume profile $V0 info
@@ -16,5 +16,5 @@ TEST $CLI volume profile $V0 stop
TEST $CLI volume status
TEST $CLI volume stop $V0
-EXPECT_WITHIN 15 'Stopped' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Stopped' volinfo_field $V0 'Status';
cleanup;
diff --git a/tests/bugs/bug-1066798.t b/tests/bugs/bug-1066798.t
index 635b143f05a..445ec75c936 100755
--- a/tests/bugs/bug-1066798.t
+++ b/tests/bugs/bug-1066798.t
@@ -56,7 +56,7 @@ done
TEST $CLI volume remove-brick $V0 $H0:${HASHED} start
-EXPECT_WITHIN 20 "completed" remove_brick_status_completed_field "$V0" "$H0:${HASHED}";
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:${HASHED}";
#check consistency in mount point
#And also check all the links are migrated to OTHER
diff --git a/tests/bugs/bug-1070734.t b/tests/bugs/bug-1070734.t
index f35189eb871..fd7bbe01cf6 100755
--- a/tests/bugs/bug-1070734.t
+++ b/tests/bugs/bug-1070734.t
@@ -22,7 +22,7 @@ EXPECT 'Created' volinfo_field $V0 'Status';
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0;
############################################################################
@@ -54,7 +54,7 @@ fi
TEST rm -f $N0/DIR/file;
TEST rmdir $N0/DIR;
TEST $CLI volume remove-brick $V0 $H0:${HASHED} start;
-EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0" \
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \
"$H0:${HASHED}";
TEST mkdir $N0/DIR;
diff --git a/tests/bugs/bug-1077682.t b/tests/bugs/bug-1077682.t
index 2923c5f66dd..b47744e603c 100644
--- a/tests/bugs/bug-1077682.t
+++ b/tests/bugs/bug-1077682.t
@@ -24,7 +24,7 @@ TEST ! $CLI volume remove-brick $V0 $H0:$B0/${V0}1
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 start
-EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0" \
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" \
"$H0:$B0/${V0}3"
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}3 commit
diff --git a/tests/bugs/bug-1087198.t b/tests/bugs/bug-1087198.t
index 29a81c3abbb..6d3802ed088 100644
--- a/tests/bugs/bug-1087198.t
+++ b/tests/bugs/bug-1087198.t
@@ -30,7 +30,7 @@ EXPECT 'Created' volinfo_field $V0 'Status';
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 noac,nolock
diff --git a/tests/bugs/bug-765473.t b/tests/bugs/bug-765473.t
index 5fc0ec9d77e..d9af751f472 100755
--- a/tests/bugs/bug-765473.t
+++ b/tests/bugs/bug-765473.t
@@ -11,7 +11,7 @@ function clients_connected()
volname=$1
gluster volume status $volname clients | grep -i 'Clients connected' | sed -e 's/[^0-9]*\(.*\)/\1/g'
}
-
+
## Start and create a volume
TEST glusterd;
TEST pidof glusterd;
@@ -27,7 +27,7 @@ TEST $CLI volume stop $V0
# write some content which will result in marking fd bad
fd_write $fd "more content"
TEST $CLI volume start $V0
-EXPECT_WITHIN 30 2 clients_connected $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 2 clients_connected $V0
TEST ! fd_write $fd "still more content"
cleanup
diff --git a/tests/bugs/bug-765564.t b/tests/bugs/bug-765564.t
index 6e4087f805c..b21bade9478 100644
--- a/tests/bugs/bug-765564.t
+++ b/tests/bugs/bug-765564.t
@@ -76,7 +76,7 @@ TEST mv $M0/h $M0/1;
TEST $CLI volume start $V0 force;
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1;
find $M0 | xargs stat 2>/dev/null 1>/dev/null;
TEST rm_mv_correctness;
diff --git a/tests/bugs/bug-802417.t b/tests/bugs/bug-802417.t
index b596df30385..3a6db22edbf 100755
--- a/tests/bugs/bug-802417.t
+++ b/tests/bugs/bug-802417.t
@@ -51,9 +51,9 @@ TEST write_file $M0/a_file "new_data"
## Bring all the bricks up and kill one so we do a partial self-heal.
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
TEST kill_brick ${V0} ${H0} ${B0}/${V0}-2
TEST dd if=${M0}/a_file of=/dev/null
diff --git a/tests/bugs/bug-821056.t b/tests/bugs/bug-821056.t
index 5e81541ac3a..db87993ed76 100644
--- a/tests/bugs/bug-821056.t
+++ b/tests/bugs/bug-821056.t
@@ -27,14 +27,14 @@ realpath=$(gf_get_gfid_backend_file_path $B0/${V0}0 "a")
kill_brick $V0 $H0 $B0/${V0}0
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
EXPECT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
kill_brick $V0 $H0 $B0/${V0}0
TEST gf_rm_file_and_gfid_link $B0/${V0}0 "a"
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
ls -l $M0/a 2>&1 > /dev/null #Make sure the file is re-created
EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a
@@ -43,10 +43,10 @@ for i in {1..1024}; do
echo "open sesame" >&5
done
-EXPECT_WITHIN 20 "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a
#close the fd
exec 5>&-
#Check that anon-fd based file is not leaking.
-EXPECT_WITHIN 20 "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
+EXPECT_WITHIN $REOPEN_TIMEOUT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
cleanup;
diff --git a/tests/bugs/bug-830665.t b/tests/bugs/bug-830665.t
index e6804891e02..f5a5a67f8e9 100755
--- a/tests/bugs/bug-830665.t
+++ b/tests/bugs/bug-830665.t
@@ -43,7 +43,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
## Mount NFS
TEST mount_nfs $H0:/$V0 $N0 nolock;
@@ -69,7 +69,7 @@ setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0
## Restart and remount. Note that we use actimeo=0 so that the stat calls
## we need for self-heal don't get blocked by the NFS client.
TEST $CLI volume start $V0;
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock,actimeo=0;
## The Linux NFS client has a really charming habit of caching stuff right
@@ -89,8 +89,8 @@ TEST gluster volume heal $V0 full
## check, but we want to test whether self-heal already happened.
## Make sure everything's in order on the recreated brick.
-EXPECT_WITHIN 20 'test_data' cat $B0/${V0}-0/a_file;
-EXPECT_WITHIN 20 'more_test_data' cat $B0/${V0}-0/a_dir/another_file;
+EXPECT_WITHIN $HEAL_TIMEOUT 'test_data' cat $B0/${V0}-0/a_file;
+EXPECT_WITHIN $HEAL_TIMEOUT 'more_test_data' cat $B0/${V0}-0/a_dir/another_file;
if [ "$EXIT_EARLY" = "1" ]; then
exit 0;
diff --git a/tests/bugs/bug-847622.t b/tests/bugs/bug-847622.t
index ffc5b54194f..a3e04ca2047 100755
--- a/tests/bugs/bug-847622.t
+++ b/tests/bugs/bug-847622.t
@@ -10,7 +10,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/brick0
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock
cd $N0
diff --git a/tests/bugs/bug-847624.t b/tests/bugs/bug-847624.t
index 40dd13de399..fcd203c7422 100755
--- a/tests/bugs/bug-847624.t
+++ b/tests/bugs/bug-847624.t
@@ -11,7 +11,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/$V0
TEST $CLI volume set $V0 nfs.drc on
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock
cd $N0
#7
diff --git a/tests/bugs/bug-853258.t b/tests/bugs/bug-853258.t
index faa9d4465ed..3d4cfba00a2 100755
--- a/tests/bugs/bug-853258.t
+++ b/tests/bugs/bug-853258.t
@@ -16,11 +16,11 @@ mkdir -p $H0:$B0/${V0}3
# Create and start a volume.
TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
TEST $CLI volume start $V0
-EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
# Force assignment of initial ranges.
TEST $CLI volume rebalance $V0 fix-layout start
-EXPECT_WITHIN 15 "fix-layout completed" rebalance_status_field $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0
# Get the original values.
xattrs=""
@@ -32,7 +32,7 @@ done
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
# Force assignment of initial ranges.
TEST $CLI volume rebalance $V0 fix-layout start
-EXPECT_WITHIN 15 "fix-layout completed" rebalance_status_field $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "fix-layout completed" rebalance_status_field $V0
for i in $(seq 0 3); do
xattrs="$xattrs $(dht_get_layout $B0/${V0}$i)"
diff --git a/tests/bugs/bug-861015-index.t b/tests/bugs/bug-861015-index.t
index 4b148e6ccc5..4ca115f8263 100644
--- a/tests/bugs/bug-861015-index.t
+++ b/tests/bugs/bug-861015-index.t
@@ -10,7 +10,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume set $V0 ensure-durability off
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST kill_brick $V0 $H0 $B0/${V0}2
diff --git a/tests/bugs/bug-864222.t b/tests/bugs/bug-864222.t
index 958fcb9dd78..4fd2f9235b1 100755
--- a/tests/bugs/bug-864222.t
+++ b/tests/bugs/bug-864222.t
@@ -10,7 +10,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/brick0
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock
cd $N0
diff --git a/tests/bugs/bug-865825.t b/tests/bugs/bug-865825.t
index 4b4b8427cd7..7ca076dcc84 100755
--- a/tests/bugs/bug-865825.t
+++ b/tests/bugs/bug-865825.t
@@ -62,7 +62,7 @@ sleep 10
gluster volume heal $V0 full
## Make sure brick 2 now has the correct contents.
-EXPECT_WITHIN 30 "test_data" cat $B0/${V0}-2/a_file
+EXPECT_WITHIN $HEAL_TIMEOUT "test_data" cat $B0/${V0}-2/a_file
if [ "$EXIT_EARLY" = "1" ]; then
exit 0;
diff --git a/tests/bugs/bug-872923.t b/tests/bugs/bug-872923.t
index 68d2e116769..e94fd905e3b 100755
--- a/tests/bugs/bug-872923.t
+++ b/tests/bugs/bug-872923.t
@@ -11,7 +11,7 @@ TEST $CLI volume info
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick0 $H0:$B0/brick1
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock
cd $N0
diff --git a/tests/bugs/bug-873962-spb.t b/tests/bugs/bug-873962-spb.t
index 62a8318ed42..2821952ac9a 100644
--- a/tests/bugs/bug-873962-spb.t
+++ b/tests/bugs/bug-873962-spb.t
@@ -24,12 +24,12 @@ exec 5<$M0/a
kill_brick $V0 $H0 $B0/${V0}0
echo "hi" > $M0/a
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
kill_brick $V0 $H0 $B0/${V0}1
echo "bye" > $M0/a
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST ! cat $M0/a #To mark split-brain
diff --git a/tests/bugs/bug-873962.t b/tests/bugs/bug-873962.t
index 0281417f07f..47b39984cd5 100755
--- a/tests/bugs/bug-873962.t
+++ b/tests/bugs/bug-873962.t
@@ -50,15 +50,15 @@ echo "1" > $M0/c
TEST setfattr -n trusted.mdata -v abc $M0/b
TEST setfattr -n trusted.mdata -v abc $M0/d
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST kill_brick $V0 $H0 $B0/${V0}1
echo "2" > $M0/a
echo "2" > $M0/c
TEST setfattr -n trusted.mdata -v def $M0/b
TEST setfattr -n trusted.mdata -v def $M0/d
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M1 --direct-io-mode=enable
diff --git a/tests/bugs/bug-874498.t b/tests/bugs/bug-874498.t
index 0b5991011d4..de58aef4d39 100644
--- a/tests/bugs/bug-874498.t
+++ b/tests/bugs/bug-874498.t
@@ -53,7 +53,7 @@ TEST $CLI volume heal $V0
##Expected number of entries are 0 in the .glusterfs/indices/xattrop directory
-EXPECT_WITHIN 60 '0' count_sh_entries $FILEN;
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $FILEN;
TEST $CLI volume stop $V0;
TEST $CLI volume delete $V0;
diff --git a/tests/bugs/bug-877885.t b/tests/bugs/bug-877885.t
index 05b4f1cdf2b..955d517876a 100755
--- a/tests/bugs/bug-877885.t
+++ b/tests/bugs/bug-877885.t
@@ -17,7 +17,7 @@ $M0;
TEST touch $M0/file
TEST mkdir $M0/dir
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock
cd $N0
diff --git a/tests/bugs/bug-884455.t b/tests/bugs/bug-884455.t
index 3b3a2241e14..e63af4334ae 100755
--- a/tests/bugs/bug-884455.t
+++ b/tests/bugs/bug-884455.t
@@ -66,7 +66,7 @@ TEST ls -l $M0 2>/dev/null;
TEST $CLI volume rebalance $V0 start force
-EXPECT_WITHIN 30 "0" rebalance_completed
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
## check for layout overlaps.
EXPECT "0" get_layout $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
diff --git a/tests/bugs/bug-887145.t b/tests/bugs/bug-887145.t
index b80f4bd392f..afd39da1616 100755
--- a/tests/bugs/bug-887145.t
+++ b/tests/bugs/bug-887145.t
@@ -15,7 +15,7 @@ sleep 2;
## Mount FUSE with caching disabled
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
useradd tmp_user 2>/dev/null 1>/dev/null;
@@ -34,7 +34,7 @@ TEST $CLI volume set $V0 server.root-squash on;
sleep 2;
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
# create files and directories in the root of the glusterfs and nfs mount
# which is owned by root and hence the right behavior is getting EACCESS
diff --git a/tests/bugs/bug-888752.t b/tests/bugs/bug-888752.t
index 56d3f9ffb2b..b82c0ddb33a 100644
--- a/tests/bugs/bug-888752.t
+++ b/tests/bugs/bug-888752.t
@@ -13,7 +13,7 @@ cleanup
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN 5 1 get_peer_count
+EXPECT_WITHIN $PROBE_TIMEOUT 1 get_peer_count
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
diff --git a/tests/bugs/bug-889630.t b/tests/bugs/bug-889630.t
index b04eb34076e..d2fcc10a4d4 100755
--- a/tests/bugs/bug-889630.t
+++ b/tests/bugs/bug-889630.t
@@ -21,7 +21,7 @@ cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN 20 1 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
diff --git a/tests/bugs/bug-904065.t b/tests/bugs/bug-904065.t
index 5d5045bb734..ff677453015 100755
--- a/tests/bugs/bug-904065.t
+++ b/tests/bugs/bug-904065.t
@@ -34,7 +34,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status'
# glusterfs/nfs needs some time to start up in the background
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
# before mounting the rmtab should be empty
EXPECT '0' count_lines /var/lib/glusterd/nfs/rmtab
@@ -73,7 +73,7 @@ EXPECT '2' count_lines $M0/rmtab
TEST gluster volume set $V0 nfs.mount-rmtab $M0/rmtab
# glusterfs/nfs needs some time to restart
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
# a new mount should be added to the rmtab, not overwrite exiting ones
TEST mount_nfs $H0:/$V0 $N0 nolock
diff --git a/tests/bugs/bug-904300.t b/tests/bugs/bug-904300.t
index 1e6917463ec..95be2724282 100755
--- a/tests/bugs/bug-904300.t
+++ b/tests/bugs/bug-904300.t
@@ -11,7 +11,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/$V0;
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST mount_nfs $H0:/$V0 $N0 nolock
TEST mkdir $N0/dir1
@@ -21,7 +21,7 @@ TEST umount $N0
# Case 1: Allow "dir1" to be mounted only from 127.0.0.1
# 9-12
TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.1)"\"
-EXPECT_WITHIN 20 2 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available
TEST mount_nfs localhost:/$V0/dir1 $N0 nolock
TEST umount $N0
@@ -31,7 +31,7 @@ TEST umount $N0
# a negative test case therefore the mount should fail.
# 13-16
TEST $CLI volume set $V0 export-dir \""/dir1(8.8.8.8)"\"
-EXPECT_WITHIN 20 2 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available
TEST ! mount_nfs $H0:/$V0/dir1 $N0 nolock
TEST ! umount $N0
@@ -41,7 +41,7 @@ TEST ! umount $N0
# instead of ip address.
# 17-20
TEST $CLI volume set $V0 export-dir \""/dir1($H0)"\"
-EXPECT_WITHIN 20 2 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available
TEST mount_nfs $H0:/$V0/dir1 $N0 nolock
TEST umount $N0
@@ -49,7 +49,7 @@ TEST umount $N0
# Case 4: Variation of test case1. Here we are checking with IP range
# 21-24
TEST $CLI volume set $V0 export-dir \""/dir1(127.0.0.0/24)"\"
-EXPECT_WITHIN 20 2 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 2 is_nfs_export_available
TEST mount_nfs localhost:/$V0/dir1 $N0 nolock
TEST umount $N0
diff --git a/tests/bugs/bug-906646.t b/tests/bugs/bug-906646.t
index b2cbf6bc32e..754f193beb8 100644
--- a/tests/bugs/bug-906646.t
+++ b/tests/bugs/bug-906646.t
@@ -82,7 +82,7 @@ EXPECT 1 xattr_query_check ${backend_paths_array[1]} "trusted.name"
# restart the brick process
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 `expr $brick_id - 1`
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 `expr $brick_id - 1`
cat $pth >/dev/null
diff --git a/tests/bugs/bug-912564.t b/tests/bugs/bug-912564.t
index b24268fbc9b..4fc548c695c 100755
--- a/tests/bugs/bug-912564.t
+++ b/tests/bugs/bug-912564.t
@@ -38,7 +38,7 @@ mkdir -p $H0:$B0/${V0}3
TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1 \
$H0:$B0/${V0}2 $H0:$B0/${V0}3
TEST $CLI volume start $V0
-EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
# Mount it.
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
diff --git a/tests/bugs/bug-913051.t b/tests/bugs/bug-913051.t
index 9a59424f412..cdf1bfcd1dc 100644
--- a/tests/bugs/bug-913051.t
+++ b/tests/bugs/bug-913051.t
@@ -35,7 +35,7 @@ TEST rfd=`fd_available`
TEST fd_open $rfd "r" $M0/dir/b
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
#check that the files are not opned on brick-0
realpatha=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/a")
@@ -57,8 +57,8 @@ TEST fd_write $wfd "open sesame"
#trigger readv for attempting open-fd-fix in afr
TEST fd_cat $rfd
-EXPECT_WITHIN 20 "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/a
-EXPECT_WITHIN 20 "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/b
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/a
+EXPECT_WITHIN $REOPEN_TIMEOUT "Y" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/b
TEST fd_close $wfd
TEST fd_close $rfd
diff --git a/tests/bugs/bug-913555.t b/tests/bugs/bug-913555.t
index f58d7bd6dd6..2393a16ad6f 100755
--- a/tests/bugs/bug-913555.t
+++ b/tests/bugs/bug-913555.t
@@ -26,7 +26,7 @@ TEST launch_cluster 3; # start 3-node virtual cluster
TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-EXPECT_WITHIN 20 2 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
TEST $CLI_1 volume set $V0 cluster.server-quorum-type server
@@ -35,20 +35,20 @@ TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
# Kill one pseudo-node, make sure the others survive and volume stays up.
TEST kill_node 3;
-EXPECT_WITHIN 20 1 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
EXPECT 0 check_fs $M0;
EXPECT 2 glusterfsd_count;
# Kill another pseudo-node, make sure the last one dies and volume goes down.
TEST kill_node 2;
-EXPECT_WITHIN 20 0 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 0 check_peers
EXPECT 1 check_fs $M0;
EXPECT 0 glusterfsd_count; # the two glusterfsds of the other two glusterds
# must be dead
TEST $glusterd_2;
TEST $glusterd_3;
-EXPECT_WITHIN 20 3 glusterfsd_count; # restore quorum, all ok
-EXPECT_WITHIN 5 0 check_fs $M0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 glusterfsd_count; # restore quorum, all ok
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 check_fs $M0;
cleanup
diff --git a/tests/bugs/bug-915280.t b/tests/bugs/bug-915280.t
index 218fa7baf89..2c3553be540 100755
--- a/tests/bugs/bug-915280.t
+++ b/tests/bugs/bug-915280.t
@@ -24,7 +24,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
MOUNTDIR=$N0;
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock,timeo=30,retrans=1
TEST touch $N0/testfile
diff --git a/tests/bugs/bug-915554.t b/tests/bugs/bug-915554.t
index beb669f8cef..0425117a42d 100755
--- a/tests/bugs/bug-915554.t
+++ b/tests/bugs/bug-915554.t
@@ -60,7 +60,7 @@ TEST $CLI volume rebalance $V0 start force
# check if rebalance has completed for upto 15 secs
-EXPECT_WITHIN 30 "0" rebalance_completed
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
# validate the file size after the migration
SIZE2=`stat -c %s $M0/$i`
diff --git a/tests/bugs/bug-916226.t b/tests/bugs/bug-916226.t
index 2abfa1fc604..50d1e312012 100644
--- a/tests/bugs/bug-916226.t
+++ b/tests/bugs/bug-916226.t
@@ -21,6 +21,6 @@ TEST touch $M0/dir{1..10}/files{1..10};
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}4 $H0:/$B0/${V0}5
TEST $CLI volume rebalance $V0 start force
-EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
cleanup;
diff --git a/tests/bugs/bug-918437-sh-mtime.t b/tests/bugs/bug-918437-sh-mtime.t
index 11155ad1629..e541a81d642 100644
--- a/tests/bugs/bug-918437-sh-mtime.t
+++ b/tests/bugs/bug-918437-sh-mtime.t
@@ -35,15 +35,15 @@ TEST gf_rm_file_and_gfid_link $B0/gfs0/brick01 a
TEST gf_rm_file_and_gfid_link $B0/gfs0/brick02 b
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST $CLI volume set $V0 cluster.self-heal-daemon on
sleep 1
TEST gluster volume heal $V0 full
size=`stat -c '%s' /etc/passwd`
-EXPECT_WITHIN 60 $size stat -c '%s' $B0/gfs0/brick01/a
+EXPECT_WITHIN $HEAL_TIMEOUT $size stat -c '%s' $B0/gfs0/brick01/a
TEST modify_atstamp1=$(get_mtime $B0/gfs0/brick01/a)
TEST modify_atstamp2=$(get_mtime $B0/gfs0/brick02/a)
diff --git a/tests/bugs/bug-921072.t b/tests/bugs/bug-921072.t
index 8ccdea90399..ea5a91c2c24 100755
--- a/tests/bugs/bug-921072.t
+++ b/tests/bugs/bug-921072.t
@@ -11,33 +11,33 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/$V0
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST mount_nfs $H0:/$V0 $N0 nolock
TEST umount $N0
# based on ip addresses (1-4)
# case 1: allow only localhost ip
TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST mount_nfs localhost:/$V0 $N0 nolock
TEST umount $N0
# case 2: allow only non-localhost ip
TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
#11
TEST ! mount_nfs localhost:/$V0 $N0 nolock
TEST $CLI volume reset --mode=script $V0
# case 3: reject only localhost ip
TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST ! mount_nfs localhost:/$V0 $N0 nolock
# case 4: reject only non-localhost ip
TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST mount_nfs localhost:/$V0 $N0 nolock
TEST umount $N0
@@ -48,21 +48,21 @@ TEST umount $N0
# CASES WITH NFS.ADDR-NAMELOOKUP ON (5-12)
TEST $CLI volume reset --mode=script $V0
TEST $CLI volume set $V0 nfs.addr-namelookup on
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
#20
TEST mount_nfs localhost:/$V0 $N0 nolock
TEST umount $N0
# case 5: allow only localhost
TEST $CLI volume set $V0 nfs.rpc-auth-allow localhost
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST mount_nfs localhost:/$V0 $N0 nolock
TEST umount $N0
# case 6: allow only somehost
TEST $CLI volume set $V0 nfs.rpc-auth-allow somehost
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST ! mount_nfs localhost:/$V0 $N0 nolock
@@ -70,13 +70,13 @@ TEST ! mount_nfs localhost:/$V0 $N0 nolock
TEST $CLI volume reset --mode=script $V0
TEST $CLI volume set $V0 nfs.addr-namelookup on
TEST $CLI volume set $V0 nfs.rpc-auth-reject localhost
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
#30
TEST ! mount_nfs localhost:/$V0 $N0 nolock
# case 8: reject only somehost
TEST $CLI volume set $V0 nfs.rpc-auth-reject somehost
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST mount_nfs localhost:/$V0 $N0 nolock
TEST umount $N0
@@ -86,7 +86,7 @@ TEST umount $N0
TEST $CLI volume reset --mode=script $V0
TEST $CLI volume set $V0 nfs.addr-namelookup on
TEST $CLI volume set $V0 nfs.rpc-auth-allow 127.0.0.1
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST mount_nfs localhost:/$V0 $N0 nolock
TEST mkdir -p $N0/subdir
@@ -94,7 +94,7 @@ TEST umount $N0
# case 10: allow a non-localhost ip
TEST $CLI volume set $V0 nfs.rpc-auth-allow 192.168.1.1
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
#41
TEST ! mount_nfs localhost:/$V0 $N0 nolock
@@ -102,14 +102,14 @@ TEST ! mount_nfs localhost:/$V0 $N0 nolock
TEST $CLI volume reset --mode=script $V0
TEST $CLI volume set $V0 nfs.addr-namelookup on
TEST $CLI volume set $V0 nfs.rpc-auth-reject 127.0.0.1
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST ! mount_nfs localhost:/$V0 $N0 nolock
TEST ! mount_nfs localhost:/$V0/subdir $N0 nolock
# case 12: reject only non-localhost ip
TEST $CLI volume set $V0 nfs.rpc-auth-reject 192.168.1.1
-EXPECT_WITHIN 20 1 is_nfs_export_available
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT 1 is_nfs_export_available
TEST mount_nfs localhost:/$V0 $N0 nolock
TEST umount $N0
diff --git a/tests/bugs/bug-921408.t b/tests/bugs/bug-921408.t
index ef2b4fb21cd..483bc6b3d3f 100755
--- a/tests/bugs/bug-921408.t
+++ b/tests/bugs/bug-921408.t
@@ -31,7 +31,7 @@ addbr_rebal_till_layout_change()
do
$CLI volume add-brick $V0 $H0:$B0/${V0}$l &>/dev/null
$CLI volume rebalance $V0 fix-layout start &>/dev/null
- wait_check_status 15
+ wait_check_status $REBALANCE_TIMEOUT
if [ $? -eq 1 ]
then
break
diff --git a/tests/bugs/bug-924265.t b/tests/bugs/bug-924265.t
index 13491356dbc..b08c13fddcb 100755
--- a/tests/bugs/bug-924265.t
+++ b/tests/bugs/bug-924265.t
@@ -24,7 +24,7 @@ TEST $CLI volume set $V0 cluster.dht-xattr-name trusted.foo.bar
# Start and mount the volume.
TEST $CLI volume start $V0
-EXPECT_WITHIN 15 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status';
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
# Create a directory and make sure it has the right xattr.
diff --git a/tests/bugs/bug-927616.t b/tests/bugs/bug-927616.t
index d8fe7520d8c..d19327ca268 100755
--- a/tests/bugs/bug-927616.t
+++ b/tests/bugs/bug-927616.t
@@ -15,7 +15,7 @@ sleep 1;
## Mount FUSE with caching disabled
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock;
TEST mkdir $M0/dir;
diff --git a/tests/bugs/bug-948686.t b/tests/bugs/bug-948686.t
index db9c198a96f..c9afc2f0758 100755
--- a/tests/bugs/bug-948686.t
+++ b/tests/bugs/bug-948686.t
@@ -13,7 +13,7 @@ TEST launch_cluster 3; # start 3-node virtual cluster
TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
-EXPECT_WITHIN 20 2 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
TEST $CLI_1 volume start $V0
diff --git a/tests/bugs/bug-948729/bug-948729-force.t b/tests/bugs/bug-948729/bug-948729-force.t
index d14e9406182..b68b8b88bdd 100644
--- a/tests/bugs/bug-948729/bug-948729-force.t
+++ b/tests/bugs/bug-948729/bug-948729-force.t
@@ -19,7 +19,7 @@ TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN 20 1 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
B3=/d/backends/3
B4=/d/backends/4
diff --git a/tests/bugs/bug-948729/bug-948729-mode-script.t b/tests/bugs/bug-948729/bug-948729-mode-script.t
index 5a1fdd3b1db..5ac133a5bfe 100644
--- a/tests/bugs/bug-948729/bug-948729-mode-script.t
+++ b/tests/bugs/bug-948729/bug-948729-mode-script.t
@@ -17,7 +17,7 @@ TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN 20 1 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
B3=/d/backends/3
mkdir -p $B3
diff --git a/tests/bugs/bug-948729/bug-948729.t b/tests/bugs/bug-948729/bug-948729.t
index 3914a454c53..8a0620a7d60 100644
--- a/tests/bugs/bug-948729/bug-948729.t
+++ b/tests/bugs/bug-948729/bug-948729.t
@@ -17,7 +17,7 @@ TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN 20 1 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
B3=/d/backends/3
diff --git a/tests/bugs/bug-957877.t b/tests/bugs/bug-957877.t
index 23aefea2549..e2b74bf52f5 100644
--- a/tests/bugs/bug-957877.t
+++ b/tests/bugs/bug-957877.t
@@ -21,7 +21,7 @@ sleep 5
TEST $CLI volume heal $V0
# Wait for self-heal to complete
-EXPECT_WITHIN 30 '0' count_sh_entries $BRICK;
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $BRICK;
TEST getfattr -n "user.foo" $B0/${V0}0/f1;
diff --git a/tests/bugs/bug-958691.t b/tests/bugs/bug-958691.t
index 3f6b93eb1c3..cd38d46e1e4 100644
--- a/tests/bugs/bug-958691.t
+++ b/tests/bugs/bug-958691.t
@@ -11,7 +11,7 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
TEST $CLI volume start $V0;
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock;
sleep 2;
diff --git a/tests/bugs/bug-961615.t b/tests/bugs/bug-961615.t
index d183e6c5276..d10eeeabb3b 100644
--- a/tests/bugs/bug-961615.t
+++ b/tests/bugs/bug-961615.t
@@ -26,7 +26,7 @@ dd if=/dev/zero of=$M0/10 bs=1k &
bg_pid=$!
#Now rebalance force will migrate file '10'
TEST $CLI volume rebalance $V0 start force
-EXPECT_WITHIN 60 "completed" rebalance_status_field $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
#If the bug exists mount would have crashed by now
TEST ls $M0
kill -9 $bg_pid > /dev/null 2>&1
diff --git a/tests/bugs/bug-964059.t b/tests/bugs/bug-964059.t
index df07f95ee99..e81e4d708bc 100755
--- a/tests/bugs/bug-964059.t
+++ b/tests/bugs/bug-964059.t
@@ -21,7 +21,7 @@ cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
-EXPECT_WITHIN 20 1 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
TEST $CLI_1 volume start $V0
diff --git a/tests/bugs/bug-966018.t b/tests/bugs/bug-966018.t
index 55a01b8b7b3..7127189c63b 100644
--- a/tests/bugs/bug-966018.t
+++ b/tests/bugs/bug-966018.t
@@ -19,7 +19,7 @@ TEST $CLI volume set $V0 cluster.post-op-delay-secs 3
TEST $CLI volume start $V0
TEST $CLI volume profile $V0 start
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0 nolock;
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0
echo 1 > $N0/1 && chmod +x $N0/1
diff --git a/tests/bugs/bug-973073.t b/tests/bugs/bug-973073.t
index 619f1733114..3ea54132a83 100755
--- a/tests/bugs/bug-973073.t
+++ b/tests/bugs/bug-973073.t
@@ -34,13 +34,13 @@ TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 start
## remove-brick status == rebalance_status
-EXPECT_WITHIN 30 "0" remove_brick_completed
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" remove_brick_completed
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 stop
TEST $CLI volume rebalance $V0 fix-layout start
-EXPECT_WITHIN 30 "0" rebalance_completed
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
TEST mkdir $M0/dir 2>/dev/null;
diff --git a/tests/bugs/bug-974007.t b/tests/bugs/bug-974007.t
index c8c1c862b33..241aa6ba28e 100644
--- a/tests/bugs/bug-974007.t
+++ b/tests/bugs/bug-974007.t
@@ -31,7 +31,7 @@ function remove_brick_start_status {
EXPECT "success" remove_brick_start_status;
# Wait for rebalance to complete
-EXPECT_WITHIN 10 "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}6 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}5"
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/${V0}6 $H0:$B0/${V0}1 $H0:$B0/${V0}2 $H0:$B0/${V0}5"
# Check commit status
function remove_brick_commit_status {
diff --git a/tests/bugs/bug-974972.t b/tests/bugs/bug-974972.t
index 1b57307e76d..4d367a52cdf 100755
--- a/tests/bugs/bug-974972.t
+++ b/tests/bugs/bug-974972.t
@@ -12,22 +12,22 @@ TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 self-heal-daemon off
TEST $CLI volume start $V0
-EXPECT_WITHIN 20 "1" is_nfs_export_available;
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
TEST mount_nfs $H0:/$V0 $N0
TEST touch $N0/1
TEST kill_brick ${V0} ${H0} ${B0}/${V0}1
echo abc > $N0/1
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "Y" nfs_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_nfs $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_nfs $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 1
TEST kill_brick ${V0} ${H0} ${B0}/${V0}0
echo def > $N0/1
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "Y" nfs_up_status
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_nfs $V0 0
-EXPECT_WITHIN 20 "1" afr_child_up_status_in_nfs $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_nfs $V0 1
#Lookup should not fail
TEST ls $N0/1
diff --git a/tests/bugs/bug-977797.t b/tests/bugs/bug-977797.t
index f2252159a21..a8b1ee7b4e4 100755
--- a/tests/bugs/bug-977797.t
+++ b/tests/bugs/bug-977797.t
@@ -42,7 +42,7 @@ TEST `echo "GLUSTER-FILE-SYSTEM" > $M0/a/file`
TEST mkdir $M0/a/b
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0;
@@ -52,7 +52,7 @@ TEST chmod 757 $M0/a
TEST chmod 757 $M0/a/file
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1;
TEST dd if=$M0/a/file of=/dev/null bs=1M
diff --git a/tests/bugs/bug-978794.t b/tests/bugs/bug-978794.t
index d22d3cde33d..8cda83efe0a 100644
--- a/tests/bugs/bug-978794.t
+++ b/tests/bugs/bug-978794.t
@@ -18,12 +18,12 @@ TEST touch $M0/{1..100}
for i in {1..100}; do fd[$i]=`fd_available`; fd_open ${fd[$i]} 'w' $M0/$i; done
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{2,3}
TEST $CLI volume rebalance $V0 start force
-EXPECT_WITHIN 120 "completed" rebalance_status_field $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
TEST cat $M0/{1..100}
for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5}
TEST $CLI volume rebalance $V0 start force
-EXPECT_WITHIN 120 "completed" rebalance_status_field $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
for i in {1..100}; do fd_write ${fd[$i]} 'abc'; done
TEST cat $M0/{1..100}
cleanup
diff --git a/tests/bugs/bug-983477.t b/tests/bugs/bug-983477.t
index c19fa96c8bf..b9d3002be30 100755
--- a/tests/bugs/bug-983477.t
+++ b/tests/bugs/bug-983477.t
@@ -8,6 +8,7 @@
function get_use_readdirp_value {
local vol=$1
local statedump=$(generate_mount_statedump $vol)
+ sleep 1
local val=$(grep "use_readdirp=" $statedump | cut -f2 -d'=' | tail -1)
rm -f $statedump
echo $val
@@ -21,28 +22,28 @@ TEST $CLI volume start $V0
#If readdirp is enabled statedump should reflect it
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=yes
TEST cd $M0
-EXPECT_WITHIN 20 "1" get_use_readdirp_value $V0
+EXPECT "1" get_use_readdirp_value $V0
TEST cd -
TEST umount $M0
#If readdirp is enabled statedump should reflect it
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp=no
TEST cd $M0
-EXPECT_WITHIN 20 "0" get_use_readdirp_value $V0
+EXPECT "0" get_use_readdirp_value $V0
TEST cd -
TEST umount $M0
#Since args are optional on this argument just specifying "--use-readdirp" should also turn it `on` not `off`
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 --use-readdirp
TEST cd $M0
-EXPECT_WITHIN 20 "1" get_use_readdirp_value $V0
+EXPECT "1" get_use_readdirp_value $V0
TEST cd -
TEST umount $M0
#By default it is enabled.
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST cd $M0
-EXPECT_WITHIN 20 "1" get_use_readdirp_value $V0
+EXPECT "1" get_use_readdirp_value $V0
TEST cd -
TEST umount $M0
diff --git a/tests/bugs/bug-986905.t b/tests/bugs/bug-986905.t
index 0fac40fb4e3..ed11bbbd03d 100755
--- a/tests/bugs/bug-986905.t
+++ b/tests/bugs/bug-986905.t
@@ -20,7 +20,7 @@ TEST kill_brick $V0 $H0 $B0/${V0}0
TEST touch $M0/a
TEST ln $M0/a $M0/link_a
TEST $CLI volume start $V0 force
-EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST ls -l $M0
inum=$(get_inum $B0/${V0}0/a)
EXPECT "$inum" get_inum $B0/${V0}0/link_a
diff --git a/tests/include.rc b/tests/include.rc
index 35b0ddd5d63..882785bd1b3 100644
--- a/tests/include.rc
+++ b/tests/include.rc
@@ -7,6 +7,16 @@ V1=${V1:=patchy1}; # volume name to use in tests
B0=${B0:=/d/backends}; # top level of brick directories
H0=${H0:=`hostname --fqdn`}; # hostname
DEBUG=${DEBUG:=0} # turn on debugging?
+
+PROCESS_UP_TIMEOUT=20
+NFS_EXPORT_TIMEOUT=20
+CHILD_UP_TIMEOUT=20
+PROBE_TIMEOUT=20
+REBALANCE_TIMEOUT=120
+REOPEN_TIMEOUT=20
+HEAL_TIMEOUT=60
+MARKER_UPDATE_TIMEOUT=20
+
statedumpdir=`gluster --print-statedumpdir`; # Default directory for statedump
CLI="gluster --mode=script --wignore";
diff --git a/tests/nfs.rc b/tests/nfs.rc
index ed80c1b426b..6e4f5c0ab49 100644
--- a/tests/nfs.rc
+++ b/tests/nfs.rc
@@ -1,6 +1,5 @@
#!/bin/bash
-
# Due to portmap registration NFS takes some time to
# export all volumes. Therefore tests should start only
# after exports are visible by showmount command. This