From 0c20b17c09b2eca82f3c79013fd3fe1c72a957fd Mon Sep 17 00:00:00 2001 From: Ravishankar N Date: Thu, 27 Mar 2014 15:04:40 +0530 Subject: tests/afr: self-heal Basic functional tests related to self-heal. arequal-checksum.c is taken from https://github.com/raghavendrabhat/arequal after consent from all authors. Change-Id: I43facc31c61375f4dbe58bbb46238e15df5c9011 BUG: 1080759 Signed-off-by: Ravishankar N Reviewed-on: http://review.gluster.org/7357 Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/basic/afr/self-heal.t | 237 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 237 insertions(+) create mode 100644 tests/basic/afr/self-heal.t (limited to 'tests/basic/afr/self-heal.t') diff --git a/tests/basic/afr/self-heal.t b/tests/basic/afr/self-heal.t new file mode 100644 index 000000000..df9526bcf --- /dev/null +++ b/tests/basic/afr/self-heal.t @@ -0,0 +1,237 @@ +#!/bin/bash +#Self-heal tests + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +#Init +AREQUAL_PATH=$(dirname $0)/../../utils +build_tester $AREQUAL_PATH/arequal-checksum.c +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume set $V0 stat-prefetch off +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.background-self-heal-count 0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0; + +############################################################################### +#1.Test successful data, metadata and entry self-heal + +#Test +TEST mkdir -p $M0/abc/def $M0/abc/ghi +TEST dd if=/dev/urandom of=$M0/abc/file_abc.txt bs=1M count=2 2>/dev/null +TEST dd if=/dev/urandom of=$M0/abc/def/file_abc_def_1.txt bs=1M count=2 2>/dev/null +TEST dd if=/dev/urandom of=$M0/abc/def/file_abc_def_2.txt bs=1M count=3 2>/dev/null +TEST dd if=/dev/urandom of=$M0/abc/ghi/file_abc_ghi.txt bs=1M count=4 2>/dev/null + +TEST kill_brick $V0 $H0 $B0/brick0 +TEST truncate -s 0 $M0/abc/def/file_abc_def_1.txt +NEW_UID=36 +NEW_GID=36 +TEST chown $NEW_UID:$NEW_GID $M0/abc/def/file_abc_def_2.txt +TEST rm -rf $M0/abc/ghi +TEST mkdir -p $M0/def/ghi $M0/jkl/mno +TEST dd if=/dev/urandom of=$M0/def/ghi/file1.txt bs=1M count=2 2>/dev/null +TEST dd if=/dev/urandom of=$M0/def/ghi/file2.txt bs=1M count=3 2>/dev/null +TEST dd if=/dev/urandom of=$M0/jkl/mno/file.txt bs=1M count=4 2>/dev/null +TEST chown $NEW_UID:$NEW_GID $M0/def/ghi/file2.txt + +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check all files created/deleted on brick1 are also replicated on brick 0 +#(i.e. no reverse heal has happened) +TEST ls $B0/brick0/def/ghi/file1.txt +TEST ls $B0/brick0/def/ghi/file2.txt +TEST ls $B0/brick0/jkl/mno/file.txt +TEST ! ls $B0/brick0/abc/ghi +EXPECT "$NEW_UID$NEW_GID" stat --printf=%u%g $B0/brick0/abc/def/file_abc_def_2.txt +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#2.Test successful self-heal of different file types. + +#Test +TEST touch $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST rm -f $M0/file +TEST mkdir $M0/file + +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +TEST test -d $B0/brick0/file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#3.Test successful self-heal of file permissions. + +#Test +TEST touch $M0/file +TEST chmod 666 $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST chmod 777 $M0/file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +EXPECT "777" stat --printf=%a $B0/brick0/file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#4.Test successful self-heal of file ownership + +#Test +TEST touch $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +NEW_UID=36 +NEW_GID=36 +TEST chown $NEW_UID:$NEW_GID $M0/file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +EXPECT "$NEW_UID$NEW_GID" stat --printf=%u%g $B0/brick0/file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#5.File size test + +#Test +TEST touch $M0/file +TEST `echo "write1">$M0/file` +TEST kill_brick $V0 $H0 $B0/brick0 +TEST `echo "write2">>$M0/file` +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +TEST kill_brick $V0 $H0 $B0/brick1 +TEST truncate -s 0 $M0/file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 1 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +EXPECT 0 stat --printf=%s $B0/brick1/file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#6.GFID heal + +#Test +TEST touch $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST rm -f $M0/file +TEST touch $M0/file +GFID=$(gf_get_gfid_xattr $B1/brick1/file) +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +EXPECT "$GFID" gf_get_gfid_xattr $B0/brick0/file + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#7. Link/symlink heal + +#Test +TEST touch $M0/file +TEST ln $M0/file $M0/link_to_file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST rm -f $M0/link_to_file +TEST ln -s $M0/file $M0/link_to_file +TEST ln $M0/file $M0/hard_link_to_file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +#check heal has happened in the correct direction +TEST test -f $B0/brick0/hard_link_to_file +TEST test -h $B0/brick0/link_to_file +TEST diff <($AREQUAL_PATH/arequal-checksum -p $B0/brick0 -i .glusterfs) <($AREQUAL_PATH/arequal-checksum -p $B0/brick1 -i .glusterfs) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +#8. Heal xattrs set by application + +#Test +TEST touch $M0/file +TEST setfattr -n user.myattr_1 -v My_attribute_1 $M0/file +TEST setfattr -n user.myattr_2 -v "My_attribute_2" $M0/file +TEST kill_brick $V0 $H0 $B0/brick0 +TEST setfattr -n user.myattr_1 -v "My_attribute_1_modified" $M0/file +TEST setfattr -n user.myattr_3 -v "My_attribute_3" $M0/file +TEST $CLI volume start $V0 force +EXPECT_WITHIN 20 "1" afr_child_up_status $V0 0 +EXPECT_WITHIN 20 "Y" glustershd_up_status +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN 20 "0" afr_get_pending_heal_count $V0 + +TEST diff <(echo "user.myattr_1=\"My_attribute_1_modified\"") <(getfattr -n user.myattr_1 $B0/brick1/file|grep user.myattr_1) +TEST diff <(echo "user.myattr_3=\"My_attribute_3\"") <(getfattr -n user.myattr_3 $B0/brick1/file|grep user.myattr_3) + +#Cleanup +TEST rm -rf $M0/* +############################################################################### + +TEST rm -rf $AREQUAL_PATH/arequal-checksum +cleanup; -- cgit