summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorShyam <srangana@redhat.com>2014-09-04 14:10:02 -0400
committerVijay Bellur <vbellur@redhat.com>2014-09-09 10:52:32 -0700
commit7fa8f593e1375e6a917de0a24efa91f82aab05a4 (patch)
treede20ada2327fb4c1869fae02e79fbe3f8f6cdf5f /tests
parent13a044ab4d643a39d8138ab33226162ef125dbd3 (diff)
cluster/dht: Fix dht_access treating directory like files
When the cluster topology changes due to add-brick, all sub volumes of DHT will not contain the directories till a rebalance is completed. Till the rebalance is run, if a caller bypasses lookup and calls access due to saved/cached inode information (like NFS server does) then, dht_access misreads the error (ESTALE/ENOENT) from the new subvolumes and incorrectly tries to handle the inode as a file. This results in the directories in memory state in DHT to be corrupted and not heal even post a rebalance. This commit fixes the problem in dht_access thereby preventing DHT from misrepresenting a directory as a file in the case presented above. Change-Id: Idcdaa3837db71c8fe0a40ec0084a6c3dbe27e772 BUG: 1138393 Signed-off-by: Shyam <srangana@redhat.com> Reviewed-on-master: http://review.gluster.org/8462 Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Vijay Bellur <vbellur@redhat.com> Reviewed-on: http://review.gluster.org/8608 Reviewed-by: Jeff Darcy <jdarcy@redhat.com>
Diffstat (limited to 'tests')
-rwxr-xr-xtests/bugs/bug-1125824.t100
1 files changed, 100 insertions, 0 deletions
diff --git a/tests/bugs/bug-1125824.t b/tests/bugs/bug-1125824.t
new file mode 100755
index 00000000000..fb4fb00cf88
--- /dev/null
+++ b/tests/bugs/bug-1125824.t
@@ -0,0 +1,100 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
+
+create_files () {
+ for i in {1..10}; do
+ orig=$(printf %s/file%04d $1 $i)
+ echo "This is file $i" > $orig
+ done
+ for i in {1..10}; do
+ mkdir $(printf %s/dir%04d $1 $i)
+ done
+ sync
+}
+
+create_dirs () {
+ for i in {1..10}; do
+ mkdir $(printf %s/dir%04d $1 $i)
+ create_files $(printf %s/dir%04d $1 $i)
+ done
+ sync
+}
+
+stat_files () {
+ for i in {1..10}; do
+ orig=$(printf %s/file%04d $1 $i)
+ stat $orig
+ done
+ for i in {1..10}; do
+ stat $(printf %s/dir%04d $1 $i)
+ done
+ sync
+}
+
+stat_dirs () {
+ for i in {1..10}; do
+ stat $(printf %s/dir%04d $1 $i)
+ stat_files $(printf %s/dir%04d $1 $i)
+ done
+ sync
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '4' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+TEST mount_nfs $H0:/$V0 $N0
+
+# Create and poulate the NFS inode tables
+TEST create_dirs $N0
+TEST stat_dirs $N0
+
+# add-bricks changing the state of the volume where some bricks
+# would have some directories and others would not
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}{5,6,7,8}
+
+# Post this dht_access was creating a mess for directories which is fixed
+# with this commit. The issues could range from getting ENOENT or
+# ESTALE or entries missing to directories not having complete
+# layouts.
+TEST cd $N0
+TEST ls -lR
+
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+# tests to check post rebalance if layouts and entires are fine and
+# accessible by NFS to clear the volume
+TEST ls -lR
+rm -rf ./*
+# There are additional bugs where NFS+DHT does not delete all entries
+# on an rm -rf, so we do an additional rm -rf to ensure all is done
+# and we are facing this transient issue, rather than a bad directory
+# layout that is cached in memory
+TEST rm -rf ./*
+
+# Get out of the mount, so that umount can work
+TEST cd /
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+TEST $CLI volume stop $V0;
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+TEST ! $CLI volume info $V0;
+
+cleanup;