From a45bef14b370fe82d4f3af41a35d2802a359c287 Mon Sep 17 00:00:00 2001 From: Krutika Dhananjay Date: Thu, 26 May 2016 08:25:37 +0530 Subject: tests: Add more tests for granular entry self-heal feature Change-Id: I6f14e413c538e392c8ee5bf4bf9f283e8ac792b7 BUG: 1332566 Signed-off-by: Krutika Dhananjay Reviewed-on: http://review.gluster.org/14542 NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System Reviewed-by: Ravishankar N Smoke: Gluster Build System Reviewed-by: Pranith Kumar Karampuri --- tests/basic/afr/granular-esh/granular-esh.t | 169 ++++++++++++++++++++++++++++ tests/include.rc | 2 + tests/volume.rc | 5 + 3 files changed, 176 insertions(+) create mode 100644 tests/basic/afr/granular-esh/granular-esh.t (limited to 'tests') diff --git a/tests/basic/afr/granular-esh/granular-esh.t b/tests/basic/afr/granular-esh/granular-esh.t new file mode 100644 index 00000000000..9aa4505a3c2 --- /dev/null +++ b/tests/basic/afr/granular-esh/granular-esh.t @@ -0,0 +1,169 @@ +#!/bin/bash + +. $(dirname $0)/../../../include.rc +. $(dirname $0)/../../../volume.rc +. $(dirname $0)/../../../afr.rc + +cleanup + +TESTS_EXPECTED_IN_LOOP=12 + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume start $V0 +TEST $CLI volume set $V0 cluster.data-self-heal off +TEST $CLI volume set $V0 cluster.metadata-self-heal off +TEST $CLI volume set $V0 cluster.entry-self-heal off +TEST $CLI volume set $V0 self-heal-daemon off +TEST $CLI volume set $V0 granular-entry-heal on + +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 + +# Create files under root +for i in {1..4} +do + echo $i > $M0/f$i +done + +# Create a directory and few files under it +TEST mkdir $M0/dir +gfid_dir=$(get_gfid_string $M0/dir) + +for i in {1..3} +do + echo $i > $M0/dir/f$i +done + +# Kill brick-0. +TEST kill_brick $V0 $H0 $B0/${V0}0 + +# Create more files +for i in {5..6} +do + echo $i > $M0/f$i +done + +# Test that the index associated with '/' is created on B1. +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID + +# Check for successful creation of granular entry indices +for i in {5..6} +do + TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i +done + +# Delete an existing file +TEST unlink $M0/f1 + +# Rename an existing file +TEST mv $M0/f2 $M0/f2_renamed + +# Create a hard link on f3 +TEST ln $M0/f3 $M0/link + +# Create a symlink on f4 +TEST ln -s $M0/f4 $M0/symlink + +# Check for successful creation of granular entry indices +for i in {1..2} +do + TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f$i +done + +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2_renamed +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/link +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/symlink + +# Create a file and also delete it. This is to test deletion of stale indices during heal. +TEST touch $M0/file_stale +TEST unlink $M0/file_stale +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/file_stale + +# Create a directory and create its subdirs and files while a brick is down +TEST mkdir -p $M0/newdir/newsubdir + +for i in {1..3} +do + echo $i > $M0/newdir/f$i + echo $i > $M0/newdir/newsubdir/f$i +done + +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/newdir +gfid_newdir=$(get_gfid_string $M0/newdir) +gfid_newsubdir=$(get_gfid_string $M0/newdir/newsubdir) +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir/newsubdir +# Check if 'data' segment of the changelog is set for the newly created directories 'newdir' and 'newsubdir' +EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}1/newdir trusted.afr.$V0-client-0 data +EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}1/newdir/newsubdir trusted.afr.$V0-client-0 data + +# Test that removal of an entire sub-tree in the hierarchy works. +TEST rm -rf $M0/dir + +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/dir +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f1 +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f2 +TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_dir/f3 + +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 + +TEST gluster volume set $V0 cluster.self-heal-daemon on +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 + +# Wait for heal to complete +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +# Test if data was healed +for i in {5..6} +do + TEST_IN_LOOP diff $B0/${V0}0/f$i $B0/${V0}1/f$i +done + +for i in {1..3} +do + TEST_IN_LOOP diff $B0/${V0}0/newdir/f$i $B0/${V0}1/newdir/f$i + TEST_IN_LOOP diff $B0/${V0}0/newdir/newsubdir/f$i $B0/${V0}1/newdir/newsubdir/f$i +done + +# Verify that all the deleted names have been removed on the sink brick too by self-heal. +TEST ! stat $B0/${V0}0/f1 +TEST ! stat $B0/${V0}0/f2 +TEST stat $B0/${V0}0/f2_renamed +TEST stat $B0/${V0}0/symlink +EXPECT "3" get_hard_link_count $B0/${V0}0/f3 +EXPECT "f4" readlink $B0/${V0}0/symlink +TEST ! stat $B0/${V0}0/dir + +# Now verify that there are no name indices left after self-heal +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f1 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/f2_renamed +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/link +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/symlink +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/dir +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/newdir +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID/file_stale + +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changesi/$gfid_dir/f1 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changesi/$gfid_dir/f2 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changesi/$gfid_dir/f3 + +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir/f1 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir/f2 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir/f3 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newsubdir/f1 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newsubdir/f2 +TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newsubdir/f3 + +# To be uncommented once index xl is made to purge the parent gfid dir +# TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$ROOT_GFID +# TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$gfid_newdir +# TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$newsubdir +# TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$dir + +cleanup diff --git a/tests/include.rc b/tests/include.rc index 13a5188a34e..e122a0ed627 100644 --- a/tests/include.rc +++ b/tests/include.rc @@ -10,6 +10,8 @@ GSV0=${GSV0:=slave}; # slave volume name to use in geo-rep tests B0=${B0:=/d/backends}; # top level of brick directories WORKDIRS="$B0 $M0 $M1 $M2 $N0 $N1" +ROOT_GFID="00000000-0000-0000-0000-000000000001" + META_VOL=${META_VOL:=gluster_shared_storage}; # shared gluster storage volume used by snapshot scheduler, nfs ganesha and geo-rep. META_MNT=${META_MNT:=/var/run/gluster/shared_storage}; # Mount point of shared gluster volume. diff --git a/tests/volume.rc b/tests/volume.rc index f46f8a19e62..6ff25cc79a3 100644 --- a/tests/volume.rc +++ b/tests/volume.rc @@ -677,3 +677,8 @@ function get_gfid_string { function file_all_zeroes { < $1 tr -d '\0' | read -n 1 || echo 1 } + +function get_hard_link_count { + local path=$1; + stat -c %h $path +} -- cgit