summaryrefslogtreecommitdiffstats
path: root/tests/basic/afr
diff options
context:
space:
mode:
authorPranith Kumar K <pkarampu@redhat.com>2015-06-30 23:01:36 +0530
committerXavier Hernandez <xhernandez@datalab.es>2015-07-21 14:37:38 -0700
commit918b3aeae03b3aecc64fbc202f00a7c1955f6db7 (patch)
tree61bfe5fa99f8492fb435054957a6e22096509cfe /tests/basic/afr
parent05f7cc9815a9fe067584251fa89b1461938f41ea (diff)
cluster/ec: Make background healing optional behavior
Provide options to control number of active background heal count and qlen. >Change-Id: Idc2419219d881f47e7d2e9bbc1dcdd999b372033 >BUG: 1237381 >Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> >Reviewed-on: http://review.gluster.org/11473 >Reviewed-by: Xavier Hernandez <xhernandez@datalab.es> >Tested-by: Gluster Build System <jenkins@build.gluster.com> BUG: 1238476 Change-Id: I22ba902d9911195656db9e458c01b54cf0afcd7a Signed-off-by: Pranith Kumar K <pkarampu@redhat.com> Reviewed-on: http://review.gluster.org/11680 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Xavier Hernandez <xhernandez@datalab.es>
Diffstat (limited to 'tests/basic/afr')
-rw-r--r--tests/basic/afr/arbiter.t2
-rw-r--r--tests/basic/afr/client-side-heal.t10
-rw-r--r--tests/basic/afr/replace-brick-self-heal.t2
-rw-r--r--tests/basic/afr/root-squash-self-heal.t2
-rw-r--r--tests/basic/afr/self-heal.t16
-rw-r--r--tests/basic/afr/self-heald.t24
-rw-r--r--tests/basic/afr/sparse-file-self-heal.t4
-rw-r--r--tests/basic/afr/split-brain-resolution.t4
8 files changed, 32 insertions, 32 deletions
diff --git a/tests/basic/afr/arbiter.t b/tests/basic/afr/arbiter.t
index 8a983fb0577..f06fdb1c49f 100644
--- a/tests/basic/afr/arbiter.t
+++ b/tests/basic/afr/arbiter.t
@@ -60,7 +60,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST $CLI volume heal $V0
-EXPECT 0 afr_get_pending_heal_count $V0
+EXPECT 0 get_pending_heal_count $V0
# I/O can resume again.
TEST cat $M0/file
diff --git a/tests/basic/afr/client-side-heal.t b/tests/basic/afr/client-side-heal.t
index c9b3e355802..18f76265b29 100644
--- a/tests/basic/afr/client-side-heal.t
+++ b/tests/basic/afr/client-side-heal.t
@@ -33,7 +33,7 @@ TEST chmod +x $M0/mdatafile
#pending entry heal. Also causes pending metadata/data heals on file{1..5}
TEST touch $M0/dir/file{1..5}
-EXPECT 8 afr_get_pending_heal_count $V0
+EXPECT 8 get_pending_heal_count $V0
#After brick comes back up, access from client should not trigger heals
TEST $CLI volume start $V0 force
@@ -54,7 +54,7 @@ TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
TEST ls $M0/dir
#No heal must have happened
-EXPECT 8 afr_get_pending_heal_count $V0
+EXPECT 8 get_pending_heal_count $V0
#Enable heal client side heal options and trigger heals
TEST $CLI volume set $V0 cluster.data-self-heal on
@@ -63,7 +63,7 @@ TEST $CLI volume set $V0 cluster.entry-self-heal on
#Metadata heal is triggered by lookup without need for inode refresh.
TEST ls $M0/mdatafile
-EXPECT 7 afr_get_pending_heal_count $V0
+EXPECT 7 get_pending_heal_count $V0
#Inode refresh must trigger data and entry heals.
#To trigger inode refresh for sure, the volume is unmounted and mounted each time.
@@ -74,7 +74,7 @@ TEST cat $M0/datafile
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
TEST ls $M0/dir
-EXPECT 5 afr_get_pending_heal_count $V0
+EXPECT 5 get_pending_heal_count $V0
TEST cat $M0/dir/file1
TEST cat $M0/dir/file2
@@ -82,5 +82,5 @@ TEST cat $M0/dir/file3
TEST cat $M0/dir/file4
TEST cat $M0/dir/file5
-EXPECT 0 afr_get_pending_heal_count $V0
+EXPECT 0 get_pending_heal_count $V0
cleanup;
diff --git a/tests/basic/afr/replace-brick-self-heal.t b/tests/basic/afr/replace-brick-self-heal.t
index 8ced7df3c76..fef671a3875 100644
--- a/tests/basic/afr/replace-brick-self-heal.t
+++ b/tests/basic/afr/replace-brick-self-heal.t
@@ -43,7 +43,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
# Wait for heal to complete
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
# Check if entry-heal has happened
TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1_new | sort)
diff --git a/tests/basic/afr/root-squash-self-heal.t b/tests/basic/afr/root-squash-self-heal.t
index fa9a163e623..8337432dbc9 100644
--- a/tests/basic/afr/root-squash-self-heal.t
+++ b/tests/basic/afr/root-squash-self-heal.t
@@ -20,6 +20,6 @@ echo abc > $M0/a
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
find $M0 | xargs stat > /dev/null
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
cleanup
diff --git a/tests/basic/afr/self-heal.t b/tests/basic/afr/self-heal.t
index dbd89619c09..e1ac17c2d79 100644
--- a/tests/basic/afr/self-heal.t
+++ b/tests/basic/afr/self-heal.t
@@ -53,7 +53,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
#check all files created/deleted on brick1 are also replicated on brick 0
#(i.e. no reverse heal has happened)
@@ -82,7 +82,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
#check heal has happened in the correct direction
TEST test -d $B0/brick0/file
@@ -105,7 +105,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
#check heal has happened in the correct direction
EXPECT "777" stat -c %a $B0/brick0/file
@@ -129,7 +129,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
#check heal has happened in the correct direction
EXPECT "$NEW_UID$NEW_GID" stat -c %u%g $B0/brick0/file
@@ -160,7 +160,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
#check heal has happened in the correct direction
EXPECT 0 stat -c %s $B0/brick1/file
@@ -183,7 +183,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
#check heal has happened in the correct direction
EXPECT "$GFID" gf_get_gfid_xattr $B0/brick0/file
@@ -207,7 +207,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
#check heal has happened in the correct direction
TEST test -f $B0/brick0/hard_link_to_file
@@ -233,7 +233,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
TEST diff <(echo "user.myattr_1=\"My_attribute_1_modified\"") <(getfattr -n user.myattr_1 $B0/brick1/file|grep user.myattr_1)
TEST diff <(echo "user.myattr_3=\"My_attribute_3\"") <(getfattr -n user.myattr_3 $B0/brick1/file|grep user.myattr_3)
diff --git a/tests/basic/afr/self-heald.t b/tests/basic/afr/self-heald.t
index ee0afaf9d4e..b8bee5cf0a1 100644
--- a/tests/basic/afr/self-heald.t
+++ b/tests/basic/afr/self-heald.t
@@ -68,7 +68,7 @@ done
HEAL_FILES=$(($HEAL_FILES + 3))
cd ~
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
#When bricks are down, it says Transport End point Not connected for them
EXPECT "3" disconnected_brick_count $V0
@@ -78,12 +78,12 @@ EXPECT "3" disconnected_brick_count $V0
#replica pair.
for i in {11..20}; do echo abc > $M0/$i; done
HEAL_FILES=$(($HEAL_FILES + 10)) #count extra 10 files
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
#delete the files now, so that stale indices will remain.
for i in {11..20}; do rm -f $M0/$i; done
#After deleting files they should not appear in heal info
HEAL_FILES=$(($HEAL_FILES - 10))
-EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
+EXPECT "$HEAL_FILES" get_pending_heal_count $V0
TEST ! $CLI volume heal $V0
@@ -99,10 +99,10 @@ check_bricks_up $V0
TEST $CLI volume heal $V0
sleep 5 #Until the heal-statistics command implementation
#check that this heals the contents partially
-TEST [ $HEAL_FILES -gt $(afr_get_pending_heal_count $V0) ]
+TEST [ $HEAL_FILES -gt $(get_pending_heal_count $V0) ]
TEST $CLI volume heal $V0 full
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
#Test that ongoing IO is not considered as Pending heal
(dd if=/dev/zero of=$M0/file1 bs=1k 2>/dev/null 1>/dev/null)&
@@ -115,7 +115,7 @@ back_pid3=$!;
back_pid4=$!;
(dd if=/dev/zero of=$M0/file5 bs=1k 2>/dev/null 1>/dev/null)&
back_pid5=$!;
-EXPECT 0 afr_get_pending_heal_count $V0
+EXPECT 0 get_pending_heal_count $V0
kill -SIGTERM $back_pid1;
kill -SIGTERM $back_pid2;
kill -SIGTERM $back_pid3;
@@ -132,13 +132,13 @@ TEST $CLI volume set $V0 cluster.data-self-heal off
EXPECT "off" volume_option $V0 cluster.data-self-heal
kill_multiple_bricks $V0 $H0 $B0
echo abc > $M0/f
-EXPECT 1 afr_get_pending_heal_count $V0
+EXPECT 1 get_pending_heal_count $V0
TEST $CLI volume start $V0 force
EXPECT_WITHIN 20 "Y" glustershd_up_status
check_bricks_up $V0
TEST $CLI volume heal $V0
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
TEST $CLI volume set $V0 cluster.data-self-heal on
#METADATA
@@ -147,13 +147,13 @@ EXPECT "off" volume_option $V0 cluster.metadata-self-heal
kill_multiple_bricks $V0 $H0 $B0
TEST chmod 777 $M0/f
-EXPECT 1 afr_get_pending_heal_count $V0
+EXPECT 1 get_pending_heal_count $V0
TEST $CLI volume start $V0 force
EXPECT_WITHIN 20 "Y" glustershd_up_status
check_bricks_up $V0
TEST $CLI volume heal $V0
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
TEST $CLI volume set $V0 cluster.metadata-self-heal on
#ENTRY
@@ -163,13 +163,13 @@ kill_multiple_bricks $V0 $H0 $B0
TEST touch $M0/d/a
# 4 if mtime/ctime is modified for d in bricks without a
# 2 otherwise
-PENDING=$( afr_get_pending_heal_count $V0 )
+PENDING=$( get_pending_heal_count $V0 )
TEST test $PENDING -eq 2 -o $PENDING -eq 4
TEST $CLI volume start $V0 force
EXPECT_WITHIN 20 "Y" glustershd_up_status
check_bricks_up $V0
TEST $CLI volume heal $V0
-EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
TEST $CLI volume set $V0 cluster.entry-self-heal on
#Negative test cases
diff --git a/tests/basic/afr/sparse-file-self-heal.t b/tests/basic/afr/sparse-file-self-heal.t
index 1bc915e062c..4101e6d1db7 100644
--- a/tests/basic/afr/sparse-file-self-heal.t
+++ b/tests/basic/afr/sparse-file-self-heal.t
@@ -49,7 +49,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST gluster volume heal $V0 full
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}')
small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}')
@@ -114,7 +114,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST gluster volume heal $V0 full
-EXPECT_WITHIN $HEAL_TIMEOUT "0" afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
big_md5sum_0=$(md5sum $B0/${V0}0/big | awk '{print $1}')
small_md5sum_0=$(md5sum $B0/${V0}0/small | awk '{print $1}')
diff --git a/tests/basic/afr/split-brain-resolution.t b/tests/basic/afr/split-brain-resolution.t
index fa1342e2cd5..84b2cc8db51 100644
--- a/tests/basic/afr/split-brain-resolution.t
+++ b/tests/basic/afr/split-brain-resolution.t
@@ -38,7 +38,7 @@ TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-EXPECT 4 afr_get_pending_heal_count $V0
+EXPECT 4 get_pending_heal_count $V0
TEST ! cat $M0/data-split-brain.txt
TEST ! getfattr -n user.test $M0/metadata-split-brain.txt
@@ -82,6 +82,6 @@ TEST setfattr -n replica.split-brain-heal-finalize -v $V0-client-1 $M0/data-spli
EXPECT "brick0" get_text_xattr user.test $M0/metadata-split-brain.txt
EXPECT "brick1_alive" cat $M0/data-split-brain.txt
-EXPECT 0 afr_get_pending_heal_count $V0
+EXPECT 0 get_pending_heal_count $V0
cleanup;