summaryrefslogtreecommitdiffstats
path: root/tests/bugs/shard
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bugs/shard')
-rw-r--r--tests/bugs/shard/bug-1245547.t35
-rw-r--r--tests/bugs/shard/bug-1248887.t39
-rw-r--r--tests/bugs/shard/bug-1250855.t34
-rw-r--r--tests/bugs/shard/bug-1251824.t109
-rw-r--r--tests/bugs/shard/bug-1256580.t34
-rw-r--r--tests/bugs/shard/bug-1258334.t40
-rw-r--r--tests/bugs/shard/bug-1259651.t40
-rw-r--r--tests/bugs/shard/bug-1260637.t42
-rw-r--r--tests/bugs/shard/bug-1261773.t14
-rw-r--r--tests/bugs/shard/bug-1272986.t35
-rw-r--r--tests/bugs/shard/bug-1342298.t23
-rw-r--r--tests/bugs/shard/bug-1468483.t58
-rw-r--r--tests/bugs/shard/bug-1488546.t25
-rw-r--r--tests/bugs/shard/bug-1568521-EEXIST.t91
-rw-r--r--tests/bugs/shard/bug-1568521.t53
-rw-r--r--tests/bugs/shard/bug-1605056-2.t34
-rw-r--r--tests/bugs/shard/bug-1605056.t63
-rw-r--r--tests/bugs/shard/bug-1669077.t29
-rw-r--r--tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t34
-rw-r--r--tests/bugs/shard/bug-1696136.c122
-rw-r--r--tests/bugs/shard/bug-1696136.t33
-rw-r--r--tests/bugs/shard/bug-1705884.t32
-rw-r--r--tests/bugs/shard/bug-1738419.t29
-rw-r--r--tests/bugs/shard/bug-shard-discard.c70
-rw-r--r--tests/bugs/shard/bug-shard-discard.t65
-rw-r--r--tests/bugs/shard/bug-shard-zerofill.c60
-rw-r--r--tests/bugs/shard/bug-shard-zerofill.t46
-rw-r--r--tests/bugs/shard/configure-lru-limit.t52
-rw-r--r--tests/bugs/shard/issue-1243.t43
-rw-r--r--tests/bugs/shard/issue-1281.t34
-rw-r--r--tests/bugs/shard/issue-1425.t45
-rw-r--r--tests/bugs/shard/parallel-truncate-read.t48
-rw-r--r--tests/bugs/shard/shard-append-test.c183
-rw-r--r--tests/bugs/shard/shard-append-test.t32
-rw-r--r--tests/bugs/shard/shard-fallocate.c113
-rw-r--r--tests/bugs/shard/shard-inode-refcount-test.t30
-rw-r--r--tests/bugs/shard/unlinks-and-renames.t333
-rw-r--r--tests/bugs/shard/zero-flag.t76
38 files changed, 2278 insertions, 0 deletions
diff --git a/tests/bugs/shard/bug-1245547.t b/tests/bugs/shard/bug-1245547.t
new file mode 100644
index 00000000000..3c46785d10f
--- /dev/null
+++ b/tests/bugs/shard/bug-1245547.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+#Create a file.
+TEST touch $M0/foo
+#Write some data into it.
+TEST `echo "abc" > $M0/foo`
+
+#This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+TEST stat $B0/${V0}1/.shard
+
+#Create a file 'bar' with holes.
+TEST touch $M0/bar
+TEST truncate -s 10G $M0/bar
+#Unlink on such a file should succeed.
+TEST unlink $M0/bar
+
+#Create a file 'baz' with holes.
+TEST touch $M0/baz
+TEST truncate -s 10G $M0/baz
+#Rename with a sharded existing dest that has holes must succeed.
+TEST mv -f $M0/foo $M0/baz
+
+cleanup
diff --git a/tests/bugs/shard/bug-1248887.t b/tests/bugs/shard/bug-1248887.t
new file mode 100644
index 00000000000..2c51f7ce0e8
--- /dev/null
+++ b/tests/bugs/shard/bug-1248887.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+#Create a file.
+TEST touch $M0/foo
+#Write some data into it.
+TEST `echo "abc" > $M0/foo`
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}0/foo
+EXPECT "0000000000000004000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}0/foo
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}1/foo
+EXPECT "0000000000000004000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}1/foo
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST `echo "abc" >> $M0/foo`
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}1/foo
+EXPECT "0000000000000008000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}1/foo
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}0/foo
+EXPECT "0000000000000008000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}0/foo
+EXPECT "0000000000400000" get_hex_xattr trusted.glusterfs.shard.block-size $B0/${V0}1/foo
+EXPECT "0000000000000008000000000000000000000000000000010000000000000000" get_hex_xattr trusted.glusterfs.shard.file-size $B0/${V0}1/foo
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1250855.t b/tests/bugs/shard/bug-1250855.t
new file mode 100644
index 00000000000..b8bc3b42513
--- /dev/null
+++ b/tests/bugs/shard/bug-1250855.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=40
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST mkdir $M0/dir
+
+for i in {1..20}; do
+ TEST_IN_LOOP touch $M0/dir/$i;
+done
+
+TEST $CLI volume set $V0 features.shard on
+
+TEST ls $M0
+TEST ls $M0/dir
+
+for i in {1..10}; do
+ TEST_IN_LOOP mv $M0/dir/$i $M0/dir/$i-sharded;
+done
+
+for i in {11..20}; do
+ TEST_IN_LOOP unlink $M0/dir/$i;
+done
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1251824.t b/tests/bugs/shard/bug-1251824.t
new file mode 100644
index 00000000000..d81685d01de
--- /dev/null
+++ b/tests/bugs/shard/bug-1251824.t
@@ -0,0 +1,109 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../common-utils.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST useradd -M test_user 2>/dev/null
+
+# Create 3 files as root.
+TEST touch $M0/foo
+TEST touch $M0/bar
+TEST touch $M0/baz
+TEST touch $M0/qux
+TEST mkdir $M0/dir
+
+# Change ownership to non-root on foo and bar.
+TEST chown test_user:test_user $M0/foo
+TEST chown test_user:test_user $M0/bar
+
+# Write 6M of data on foo as non-root, 2M overflowing into block-1.
+TEST run_cmd_as_user test_user "dd if=/dev/zero of=$M0/foo bs=1M count=6"
+
+# Ensure owner and group are root on the block-1 shard.
+gfid_foo=$(get_gfid_string $M0/foo)
+
+EXPECT "root" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %U`
+EXPECT "root" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %G`
+
+#Ensure /.shard is owned by root.
+EXPECT "root" echo `find $B0/${V0}0 -name .shard | xargs stat -c %U`
+EXPECT "root" echo `find $B0/${V0}0 -name .shard | xargs stat -c %G`
+EXPECT "root" echo `find $B0/${V0}1 -name .shard | xargs stat -c %U`
+EXPECT "root" echo `find $B0/${V0}1 -name .shard | xargs stat -c %G`
+EXPECT "root" echo `find $B0/${V0}2 -name .shard | xargs stat -c %U`
+EXPECT "root" echo `find $B0/${V0}2 -name .shard | xargs stat -c %G`
+EXPECT "root" echo `find $B0/${V0}3 -name .shard | xargs stat -c %U`
+EXPECT "root" echo `find $B0/${V0}3 -name .shard | xargs stat -c %G`
+
+# Write 6M of data on bar as root.
+TEST dd if=/dev/zero of=$M0/bar bs=1M count=6
+
+# Ensure owner and group are root on the block-1 shard.
+gfid_bar=$(get_gfid_string $M0/bar)
+
+EXPECT "root" echo `find $B0 -name $gfid_bar.1 | xargs stat -c %U`
+EXPECT "root" echo `find $B0 -name $gfid_bar.1 | xargs stat -c %G`
+
+# Write 6M of data on baz as root.
+TEST dd if=/dev/zero of=$M0/baz bs=1M count=6
+
+gfid_baz=$(get_gfid_string $M0/baz)
+
+# Ensure owner and group are root on the block-1 shard.
+EXPECT "root" echo `find $B0 -name $gfid_baz.1 | xargs stat -c %U`
+EXPECT "root" echo `find $B0 -name $gfid_baz.1 | xargs stat -c %G`
+
+# Test to ensure unlink from an unauthorized user does not lead to only
+# the shards under /.shard getting unlinked while that on the base file fails
+# with EPERM/ACCES.
+
+TEST ! run_cmd_as_user test_user "unlink $M0/baz"
+TEST find $B0/*/.shard/$gfid_baz.1
+
+# Test to ensure rename of a file where the dest file exists and is sharded,
+# from an unauthorized user does not lead to only the shards under /.shard
+# getting unlinked while that on the base file fails with EPERM/ACCES.
+
+TEST ! run_cmd_as_user test_user "mv -f $M0/qux $M0/baz"
+TEST find $B0/*/.shard/$gfid_baz.1
+TEST stat $M0/qux
+
+# Shard translator executes steps in the following order while doing a truncate
+# to a lower size:
+# 1) unlinking shards under /.shard first with frame->root->{uid,gid} being 0,
+# 2) truncate the original file by the right amount.
+# The following two tests are towards ensuring that truncate attempt from an
+# unauthorised user doesn't result in only the shards under /.shard getting
+# removed (since they're being performed as root) while step 2) above fails,
+# leaving the file in an inconsistent state.
+
+TEST ! run_cmd_as_user test_user "truncate -s 1M $M0/baz"
+TEST find $B0/*/.shard/$gfid_baz.1
+
+# Perform a cp as non-root user. This should trigger readv() which will trigger
+# reads on first shard of "foo" under /.shard, and this must not fail if shard
+# translator correctly sets frame->root->uid,gid to 0 before reading off the
+# first shard, since it's owned by root.
+TEST chown test_user:test_user $M0/dir
+TEST run_cmd_as_user test_user "cp $M0/foo $M0/dir/quux"
+
+md5sum_foo=$(md5sum $M0/foo | awk '{print $1}')
+EXPECT "$md5sum_foo" echo `md5sum $M0/dir/quux | awk '{print $1}'`
+
+userdel test_user
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1256580.t b/tests/bugs/shard/bug-1256580.t
new file mode 100644
index 00000000000..279fcc54e48
--- /dev/null
+++ b/tests/bugs/shard/bug-1256580.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+
+# Create "file" with holes.
+TEST truncate -s 6M $M0/dir/file
+EXPECT '6291456' stat -c %s $M0/dir/file
+
+# Perform writes that do not cross the 6M boundary
+TEST dd if=/dev/zero of=$M0/dir/file bs=1024 seek=3072 count=2048 conv=notrunc
+
+# Ensure that the file size is 6M (as opposed to 8M that would appear in the
+# presence of this bug).
+EXPECT '6291456' stat -c %s $M0/dir/file
+
+#Extend the write beyond EOF such that it again creates a hole of 1M size
+TEST dd if=/dev/zero of=$M0/dir/file bs=1024 seek=7168 count=2048 conv=notrunc
+
+# Ensure that the file size is not greater than 9M.
+EXPECT '9437184' stat -c %s $M0/dir/file
+cleanup
diff --git a/tests/bugs/shard/bug-1258334.t b/tests/bugs/shard/bug-1258334.t
new file mode 100644
index 00000000000..94ed822aae8
--- /dev/null
+++ b/tests/bugs/shard/bug-1258334.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/foo
+TEST touch $M0/dir/bar
+TEST touch $M0/dir/new
+
+TEST truncate -s 14M $M0/dir/foo
+TEST truncate -s 14M $M0/dir/bar
+
+# Perform writes that fall on the 2nd block of "foo" (counting from 0)
+TEST dd if=/dev/zero of=$M0/dir/foo bs=1024 seek=10240 count=2048 conv=notrunc
+
+# Perform writes that fall on the 2nd block of "bar" (counting from 0)
+TEST dd if=/dev/zero of=$M0/dir/bar bs=1024 seek=10240 count=2048 conv=notrunc
+
+# Now unlink "foo". If the bug exists, it should fail with EINVAL.
+TEST unlink $M0/dir/foo
+
+# Now rename "new" to "bar". If the bug exists, it should fail with EINVAL.
+TEST mv -f $M0/dir/new $M0/dir/bar
+
+TEST dd if=/dev/zero of=$M0/dir/new bs=1024 count=5120
+
+# Now test that this fix does not break unlink of files without holes
+TEST unlink $M0/dir/new
+
+cleanup
diff --git a/tests/bugs/shard/bug-1259651.t b/tests/bugs/shard/bug-1259651.t
new file mode 100644
index 00000000000..72856fdbaad
--- /dev/null
+++ b/tests/bugs/shard/bug-1259651.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file_plain
+
+# Create "file_plain" with holes.
+TEST truncate -s 12M $M0/dir/file_plain
+
+# Perform writes on it that would create holes.
+TEST dd if=/dev/zero of=$M0/dir/file_plain bs=1024 seek=10240 count=1024 conv=notrunc
+
+md5sum_file_plain=$(md5sum $M0/dir/file_plain | awk '{print $1}')
+
+# Now enable sharding on the volume.
+TEST $CLI volume set $V0 features.shard on
+
+# Create a sharded file called "file_sharded"
+TEST touch $M0/dir/file_sharded
+
+# Truncate it to make it sparse
+TEST truncate -s 12M $M0/dir/file_sharded
+
+# Perform writes on it that would create holes in block-0 and block-1.
+TEST dd if=/dev/zero of=$M0/dir/file_sharded bs=1024 seek=10240 count=1024 conv=notrunc
+
+# If this bug is fixed, md5sum of file_sharded and file_plain should be same.
+EXPECT "$md5sum_file_plain" echo `md5sum $M0/dir/file_sharded | awk '{print $1}'`
+
+cleanup
diff --git a/tests/bugs/shard/bug-1260637.t b/tests/bugs/shard/bug-1260637.t
new file mode 100644
index 00000000000..21008ee19dd
--- /dev/null
+++ b/tests/bugs/shard/bug-1260637.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Create a file.
+TEST touch $M0/foo
+
+# Check that the shard xattrs are set in the backend.
+TEST getfattr -n trusted.glusterfs.shard.block-size $B0/${V0}0/foo
+TEST getfattr -n trusted.glusterfs.shard.file-size $B0/${V0}0/foo
+
+# Verify that shard xattrs are not exposed on the mount.
+TEST ! getfattr -n trusted.glusterfs.shard.block-size $M0/foo
+TEST ! getfattr -n trusted.glusterfs.shard.file-size $M0/foo
+
+# Verify that shard xattrs cannot be set from the mount.
+TEST ! setfattr -n trusted.glusterfs.shard.block-size -v "123" $M0/foo
+TEST ! setfattr -n trusted.glusterfs.shard.file-size -v "123" $M0/foo
+
+# Verify that shard xattrs cannot be removed from the mount.
+TEST ! setfattr -x trusted.glusterfs.shard.block-size $M0/foo
+TEST ! setfattr -x trusted.glusterfs.shard.file-size $M0/foo
+
+# Verify that shard xattrs are not listed when listxattr is triggered.
+TEST ! "getfattr -d -m . $M0/foo | grep shard"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1261773.t b/tests/bugs/shard/bug-1261773.t
new file mode 100644
index 00000000000..46d5a8b91c9
--- /dev/null
+++ b/tests/bugs/shard/bug-1261773.t
@@ -0,0 +1,14 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST check_option_help_presence "features.shard"
+TEST check_option_help_presence "features.shard-block-size"
+
+cleanup
diff --git a/tests/bugs/shard/bug-1272986.t b/tests/bugs/shard/bug-1272986.t
new file mode 100644
index 00000000000..66e896ad0c4
--- /dev/null
+++ b/tests/bugs/shard/bug-1272986.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+# $M0 is where the reads will be done and $M1 is where files will be created,
+# written to, etc.
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1
+
+# Write some data into a file, such that its size crosses the shard block size.
+TEST dd if=/dev/urandom of=$M1/file bs=1M count=5 conv=notrunc oflag=direct
+
+md5sum1_reader=$(md5sum $M0/file | awk '{print $1}')
+
+EXPECT "$md5sum1_reader" echo `md5sum $M1/file | awk '{print $1}'`
+
+# Append some more data into the file.
+TEST dd if=/dev/urandom of=$M1/file bs=256k count=1 conv=notrunc oflag=direct
+
+md5sum2_reader=$(dd if=$M0/file iflag=direct bs=256k| md5sum | awk '{print $1}')
+
+# Test to see if the reader refreshes its cache correctly as part of the reads
+# triggered through md5sum. If it does, then the md5sum on the reader and writer
+# must match.
+EXPECT "$md5sum2_reader" echo `md5sum $M1/file | awk '{print $1}'`
+
+cleanup
diff --git a/tests/bugs/shard/bug-1342298.t b/tests/bugs/shard/bug-1342298.t
new file mode 100644
index 00000000000..ecd7720e8db
--- /dev/null
+++ b/tests/bugs/shard/bug-1342298.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+echo a > $M0/a
+TEST dd if=$M0/a of=/dev/null bs=4096 count=1 iflag=direct
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1468483.t b/tests/bugs/shard/bug-1468483.t
new file mode 100644
index 00000000000..e462b8d54d5
--- /dev/null
+++ b/tests/bugs/shard/bug-1468483.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../common-utils.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 16MB
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST dd if=/dev/zero conv=fsync of=$M0/foo bs=1M count=100
+
+#This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+
+gfid_foo=$(get_gfid_string $M0/foo)
+
+TEST stat $B0/${V0}0/.shard/$gfid_foo.1
+TEST stat $B0/${V0}0/.shard/$gfid_foo.2
+TEST stat $B0/${V0}0/.shard/$gfid_foo.3
+TEST stat $B0/${V0}0/.shard/$gfid_foo.4
+TEST stat $B0/${V0}0/.shard/$gfid_foo.5
+TEST stat $B0/${V0}0/.shard/$gfid_foo.6
+
+# For a file with 7 shards, there should be 7 fsyncs on the brick. Without this
+# fix, I was seeing only 1 fsync (on the base shard alone).
+
+EXPECT "7" echo `$CLI volume profile $V0 info incremental | grep -w FSYNC | awk '{print $8}'`
+
+useradd -M test_user 2>/dev/null
+
+TEST touch $M0/bar
+
+# Change ownership to non-root on bar.
+TEST chown test_user:test_user $M0/bar
+
+TEST $CLI volume profile $V0 stop
+TEST $CLI volume profile $V0 start
+
+# Write 100M of data on bar as non-root.
+TEST run_cmd_as_user test_user "dd if=/dev/zero conv=fsync of=$M0/bar bs=1M count=100"
+
+EXPECT "7" echo `$CLI volume profile $V0 info incremental | grep -w FSYNC | awk '{print $8}'`
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+userdel test_user
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1488546.t b/tests/bugs/shard/bug-1488546.t
new file mode 100644
index 00000000000..60480dc55e5
--- /dev/null
+++ b/tests/bugs/shard/bug-1488546.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 md-cache-timeout 60
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST dd if=/dev/zero of=$M0/file bs=1M count=20
+TEST ln $M0/file $M0/linkey
+
+EXPECT "20971520" stat -c %s $M0/linkey
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/bugs/shard/bug-1568521-EEXIST.t b/tests/bugs/shard/bug-1568521-EEXIST.t
new file mode 100644
index 00000000000..2f9f165aa63
--- /dev/null
+++ b/tests/bugs/shard/bug-1568521-EEXIST.t
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function get_file_count {
+ ls $1* | wc -l
+}
+
+FILE_COUNT_TIME=5
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+# Unlink a temporary file to trigger creation of .remove_me
+TEST touch $M0/tmp
+TEST unlink $M0/tmp
+
+TEST stat $B0/${V0}0/.shard/.remove_me
+TEST stat $B0/${V0}1/.shard/.remove_me
+
+TEST dd if=/dev/zero of=$M0/dir/file bs=1024 count=9216
+gfid_file=$(get_gfid_string $M0/dir/file)
+
+# Create marker file from the backend to simulate ENODATA.
+touch $B0/${V0}0/.shard/.remove_me/$gfid_file
+touch $B0/${V0}1/.shard/.remove_me/$gfid_file
+
+# Set block and file size to incorrect values of 64MB and 5MB to simulate "stale xattrs" case
+# and confirm that the correct values are set when the actual unlink takes place
+
+TEST setfattr -n trusted.glusterfs.shard.block-size -v 0x0000000004000000 $B0/${V0}0/.shard/.remove_me/$gfid_file
+TEST setfattr -n trusted.glusterfs.shard.block-size -v 0x0000000004000000 $B0/${V0}1/.shard/.remove_me/$gfid_file
+
+TEST setfattr -n trusted.glusterfs.shard.file-size -v 0x0000000000500000000000000000000000000000000000000000000000000000 $B0/${V0}0/.shard/.remove_me/$gfid_file
+TEST setfattr -n trusted.glusterfs.shard.file-size -v 0x0000000000500000000000000000000000000000000000000000000000000000 $B0/${V0}1/.shard/.remove_me/$gfid_file
+
+# Sleep for 2 seconds to prevent posix_gfid_heal() from believing marker file is "fresh" and failing lookup with ENOENT
+sleep 2
+
+TEST unlink $M0/dir/file
+TEST ! stat $B0/${V0}0/dir/file
+TEST ! stat $B0/${V0}1/dir/file
+
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_file
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_file
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_file
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_file
+
+##############################
+### Repeat test for rename ###
+##############################
+
+TEST touch $M0/src
+TEST dd if=/dev/zero of=$M0/dir/dst bs=1024 count=9216
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+
+# Create marker file from the backend to simulate ENODATA.
+touch $B0/${V0}0/.shard/.remove_me/$gfid_dst
+touch $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+# Set block and file size to incorrect values of 64MB and 5MB to simulate "stale xattrs" case
+# and confirm that the correct values are set when the actual unlink takes place
+
+TEST setfattr -n trusted.glusterfs.shard.block-size -v 0x0000000004000000 $B0/${V0}0/.shard/.remove_me/$gfid_dst
+TEST setfattr -n trusted.glusterfs.shard.block-size -v 0x0000000004000000 $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+TEST setfattr -n trusted.glusterfs.shard.file-size -v 0x0000000000500000000000000000000000000000000000000000000000000000 $B0/${V0}0/.shard/.remove_me/$gfid_dst
+TEST setfattr -n trusted.glusterfs.shard.file-size -v 0x0000000000500000000000000000000000000000000000000000000000000000 $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+# Sleep for 2 seconds to prevent posix_gfid_heal() from believing marker file is "fresh" and failing lookup with ENOENT
+sleep 2
+
+TEST mv -f $M0/src $M0/dir/dst
+TEST ! stat $B0/${V0}0/src
+TEST ! stat $B0/${V0}1/src
+
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_dst
+
+cleanup
diff --git a/tests/bugs/shard/bug-1568521.t b/tests/bugs/shard/bug-1568521.t
new file mode 100644
index 00000000000..167fb635ac8
--- /dev/null
+++ b/tests/bugs/shard/bug-1568521.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+
+function delete_files {
+ local mountpoint=$1;
+ local success=0;
+ local value=$2
+ for i in {1..500}; do
+ unlink $mountpoint/file-$i 2>/dev/null 1>/dev/null
+ if [ $? -eq 0 ]; then
+ echo $2 >> $B0/output.txt
+ fi
+ done
+ echo $success
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M1
+
+for i in {1..500}; do
+ dd if=/dev/urandom of=$M0/file-$i bs=1M count=2
+done
+
+for i in {1..500}; do
+ stat $M1/file-$i > /dev/null
+done
+
+delete_files $M0 0 &
+delete_files $M1 1 &
+wait
+
+success1=$(grep 0 $B0/output.txt | wc -l);
+success2=$(grep 1 $B0/output.txt | wc -l);
+
+echo "Success1 is $success1";
+echo "Success2 is $success2";
+
+success_total=$((success1 + success2));
+
+EXPECT 500 echo $success_total
+
+cleanup
diff --git a/tests/bugs/shard/bug-1605056-2.t b/tests/bugs/shard/bug-1605056-2.t
new file mode 100644
index 00000000000..a9c10fec3ea
--- /dev/null
+++ b/tests/bugs/shard/bug-1605056-2.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 25
+TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Perform a write that would cause 25 shards to be created under .shard
+TEST dd if=/dev/zero of=$M0/foo bs=1M count=104
+
+# Write into another file bar to ensure all of foo's shards are evicted from lru list of $M0
+TEST dd if=/dev/zero of=$M0/bar bs=1M count=104
+
+# Delete foo from $M0. If there's a bug, the mount will crash.
+TEST unlink $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1605056.t b/tests/bugs/shard/bug-1605056.t
new file mode 100644
index 00000000000..c2329ea79f8
--- /dev/null
+++ b/tests/bugs/shard/bug-1605056.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+SHARD_COUNT_TIME=5
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 25
+TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M1
+
+# Perform a write that would cause 25 shards to be created under .shard
+TEST dd if=/dev/zero of=$M0/foo bs=1M count=104
+
+# Read the file from $M1, indirectly filling up the lru list.
+TEST `cat $M1/foo > /dev/null`
+statedump=$(generate_mount_statedump $V0 $M1)
+sleep 1
+EXPECT "25" echo $(grep "inode-count" $statedump | cut -f2 -d'=' | tail -1)
+rm -f $statedump
+
+# Delete foo from $M0.
+TEST unlink $M0/foo
+
+# Send stat on foo from $M1 to force $M1 to "forget" inode associated with foo.
+# Now the ghost shards associated with "foo" are still in lru list of $M1.
+TEST ! stat $M1/foo
+
+# Let's force the ghost shards of "foo" out of lru list by looking up more shards
+# through I/O on a file named "bar" from $M1. This should crash if the base inode
+# had been destroyed by now.
+
+TEST dd if=/dev/zero of=$M1/bar bs=1M count=104
+
+###############################################
+#### Now for some inode ref-leak tests ... ####
+###############################################
+
+# Expect there to be 29 active inodes - 26 belonging to "bar", 1 for .shard,
+# 1 for .shard/remove_me and 1 for '/'
+EXPECT_WITHIN $SHARD_COUNT_TIME `expr 26 + 3` get_mount_active_size_value $V0 $M1
+
+TEST rm -f $M1/bar
+EXPECT_WITHIN $SHARD_COUNT_TIME 3 get_mount_active_size_value $V0 $M1
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1669077.t b/tests/bugs/shard/bug-1669077.t
new file mode 100644
index 00000000000..8d3a67a36be
--- /dev/null
+++ b/tests/bugs/shard/bug-1669077.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+SHARD_COUNT_TIME=5
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 25
+
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# If the bug still exists, client should crash during fallocate below
+TEST fallocate -l 200M $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t b/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t
new file mode 100644
index 00000000000..3e4a65af19a
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 120
+TEST $CLI volume set $V0 features.shard-deletion-rate 120
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST build_tester $(dirname $0)/bug-1696136.c -lgfapi -Wall -O2
+
+# Create a file
+TEST touch $M0/file1
+
+# Fallocate a 500M file. This will make sure number of participant shards are > lru-limit
+TEST $(dirname $0)/bug-1696136 $H0 $V0 "0" "0" "536870912" /file1 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+rm -f $(dirname $0)/bug-1696136
+
+cleanup
diff --git a/tests/bugs/shard/bug-1696136.c b/tests/bugs/shard/bug-1696136.c
new file mode 100644
index 00000000000..cb650535b09
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136.c
@@ -0,0 +1,122 @@
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+enum fallocate_flag {
+ TEST_FALLOCATE_NONE,
+ TEST_FALLOCATE_KEEP_SIZE,
+ TEST_FALLOCATE_ZERO_RANGE,
+ TEST_FALLOCATE_PUNCH_HOLE,
+ TEST_FALLOCATE_MAX,
+};
+
+int
+get_fallocate_flag(int opcode)
+{
+ int ret = 0;
+
+ switch (opcode) {
+ case TEST_FALLOCATE_NONE:
+ ret = 0;
+ break;
+ case TEST_FALLOCATE_KEEP_SIZE:
+ ret = FALLOC_FL_KEEP_SIZE;
+ break;
+ case TEST_FALLOCATE_ZERO_RANGE:
+ ret = FALLOC_FL_ZERO_RANGE;
+ break;
+ case TEST_FALLOCATE_PUNCH_HOLE:
+ ret = FALLOC_FL_PUNCH_HOLE;
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 1;
+ int opcode = -1;
+ off_t offset = 0;
+ size_t len = 0;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+
+ if (argc != 8) {
+ fprintf(stderr,
+ "Syntax: %s <host> <volname> <opcode> <offset> <len> "
+ "<file-path> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, argv[7], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ opcode = atoi(argv[3]);
+ opcode = get_fallocate_flag(opcode);
+ if (opcode < 0) {
+ fprintf(stderr, "get_fallocate_flag: invalid flag \n");
+ goto out;
+ }
+
+ /* Note that off_t is signed but size_t isn't. */
+ offset = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
+
+ fd = glfs_open(fs, argv[6], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ ret = glfs_fallocate(fd, opcode, offset, len);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fallocate: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_unlink(fs, argv[6]);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_unlink: returned %d\n", ret);
+ goto out;
+ }
+ /* Sleep for 3s to give enough time for background deletion to complete
+ * during which if the bug exists, the process will crash.
+ */
+ sleep(3);
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/bug-1696136.t b/tests/bugs/shard/bug-1696136.t
new file mode 100644
index 00000000000..b6dc858f083
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 120
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST build_tester $(dirname $0)/bug-1696136.c -lgfapi -Wall -O2
+
+# Create a file
+TEST touch $M0/file1
+
+# Fallocate a 500M file. This will make sure number of participant shards are > lru-limit
+TEST $(dirname $0)/bug-1696136 $H0 $V0 "0" "0" "536870912" /file1 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+rm -f $(dirname $0)/bug-1696136
+
+cleanup
diff --git a/tests/bugs/shard/bug-1705884.t b/tests/bugs/shard/bug-1705884.t
new file mode 100644
index 00000000000..f6e50376a58
--- /dev/null
+++ b/tests/bugs/shard/bug-1705884.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+require_fallocate -l 1m $M0/file
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST fallocate -l 200M $M0/foo
+EXPECT `echo "$(( ( 200 * 1024 * 1024 ) / 512 ))"` stat -c %b $M0/foo
+TEST truncate -s 0 $M0/foo
+EXPECT "0" stat -c %b $M0/foo
+TEST fallocate -l 100M $M0/foo
+EXPECT `echo "$(( ( 100 * 1024 * 1024 ) / 512 ))"` stat -c %b $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1738419.t b/tests/bugs/shard/bug-1738419.t
new file mode 100644
index 00000000000..8d0a31d9754
--- /dev/null
+++ b/tests/bugs/shard/bug-1738419.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 network.remote-dio off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST dd if=/dev/zero of=$M0/metadata bs=501 count=1
+
+EXPECT "501" echo $("dd" if=$M0/metadata bs=4096 count=1 of=/dev/null iflag=direct 2>&1 | awk '/bytes/ {print $1}')
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-shard-discard.c b/tests/bugs/shard/bug-shard-discard.c
new file mode 100644
index 00000000000..6fa93fb89d1
--- /dev/null
+++ b/tests/bugs/shard/bug-shard-discard.c
@@ -0,0 +1,70 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ off_t off = 0;
+ size_t len = 0;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+
+ if (argc != 7) {
+ fprintf(
+ stderr,
+ "Syntax: %s <host> <volname> <file-path> <off> <len> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, argv[6], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ fd = glfs_open(fs, argv[3], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ /* Note that off_t is signed but size_t isn't. */
+ off = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
+
+ ret = glfs_discard(fd, off, len);
+ if (ret <= 0) {
+ fprintf(stderr, "glfs_discard: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/bug-shard-discard.t b/tests/bugs/shard/bug-shard-discard.t
new file mode 100644
index 00000000000..910ade14801
--- /dev/null
+++ b/tests/bugs/shard/bug-shard-discard.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+FILE_COUNT_TIME=5
+
+function get_shard_count {
+ ls $1/$2.* | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Create a file.
+TEST touch $M0/foo
+TEST dd if=/dev/urandom of=$M0/foo bs=1M count=10
+
+# This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+TEST stat $B0/${V0}1/.shard
+TEST stat $B0/${V0}2/.shard
+TEST stat $B0/${V0}3/.shard
+
+#Note the size of the file, it should be 10M
+EXPECT '10485760' stat -c %s $M0/foo
+
+gfid_foo=$(get_gfid_string $M0/foo)
+
+TEST build_tester $(dirname $0)/bug-shard-discard.c -lgfapi -Wall -O2
+#Call discard on the file at off=7M and len=3M
+TEST $(dirname $0)/bug-shard-discard $H0 $V0 /foo 7340032 3145728 `gluster --print-logdir`/glfs-$V0.log
+
+#Ensure that discard doesn't change the original size of the file.
+EXPECT '10485760' stat -c %s $M0/foo
+
+# Ensure that the last shard is all zero'd out
+EXPECT "1" file_all_zeroes `find $B0 -name $gfid_foo.2`
+EXPECT_NOT "1" file_all_zeroes `find $B0 -name $gfid_foo.1`
+
+# Now unlink the file. And ensure that all shards associated with the file are cleaned up
+TEST unlink $M0/foo
+
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_shard_count $B0/${V0}0/.shard $gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_shard_count $B0/${V0}1/.shard $gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_shard_count $B0/${V0}2/.shard $gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_shard_count $B0/${V0}3/.shard $gfid_foo
+TEST ! stat $M0/foo
+
+#clean up everything
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+TEST rm -f $(dirname $0)/bug-shard-discard
+
+cleanup
diff --git a/tests/bugs/shard/bug-shard-zerofill.c b/tests/bugs/shard/bug-shard-zerofill.c
new file mode 100644
index 00000000000..ed4c8c54dc2
--- /dev/null
+++ b/tests/bugs/shard/bug-shard-zerofill.c
@@ -0,0 +1,60 @@
+#include <stdio.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+ int ret = 1;
+
+ if (argc != 5) {
+ fprintf(stderr, "Syntax: %s <host> <volname> <file-path> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_set_logging(fs, argv[4], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ fd = glfs_open(fs, argv[3], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ /* Zero-fill "foo" with 10MB of data */
+ ret = glfs_zerofill(fd, 0, 10485760);
+ if (ret <= 0) {
+ fprintf(stderr, "glfs_zerofill: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/bug-shard-zerofill.t b/tests/bugs/shard/bug-shard-zerofill.t
new file mode 100644
index 00000000000..4a919a24b99
--- /dev/null
+++ b/tests/bugs/shard/bug-shard-zerofill.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Create a file.
+TEST touch $M0/foo
+
+gfid_foo=$(get_gfid_string $M0/foo)
+
+TEST build_tester $(dirname $0)/bug-shard-zerofill.c -lgfapi -Wall -O2
+TEST $(dirname $0)/bug-shard-zerofill $H0 $V0 /foo `gluster --print-logdir`/glfs-$V0.log
+
+# This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+TEST stat $B0/${V0}1/.shard
+TEST stat $B0/${V0}2/.shard
+TEST stat $B0/${V0}3/.shard
+
+EXPECT "4194304" echo `find $B0 -name $gfid_foo.1 | xargs stat -c %s`
+EXPECT "2097152" echo `find $B0 -name $gfid_foo.2 | xargs stat -c %s`
+
+EXPECT "1" file_all_zeroes $M0/foo
+
+TEST `echo "abc" >> $M0/foo`
+
+EXPECT_NOT "1" file_all_zeroes $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+TEST rm -f $(dirname $0)/bug-shard-zerofill
+
+cleanup
diff --git a/tests/bugs/shard/configure-lru-limit.t b/tests/bugs/shard/configure-lru-limit.t
new file mode 100644
index 00000000000..923a4d8d747
--- /dev/null
+++ b/tests/bugs/shard/configure-lru-limit.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 25
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Perform a write that would cause 25 shards to be created, 24 of them under .shard
+TEST dd if=/dev/zero of=$M0/foo bs=1M count=100
+
+statedump=$(generate_mount_statedump $V0)
+sleep 1
+EXPECT "25" echo $(grep "lru-max-limit" $statedump | cut -f2 -d'=' | tail -1)
+
+# Base shard is never added to this list. So all other shards should make up for 24 inodes in lru list
+EXPECT "24" echo $(grep "inode-count" $statedump | cut -f2 -d'=' | tail -1)
+
+rm -f $statedump
+
+# Test to ensure there's no "reconfiguration" of the value once set.
+TEST $CLI volume set $V0 features.shard-lru-limit 30
+statedump=$(generate_mount_statedump $V0)
+sleep 1
+EXPECT "25" echo $(grep "lru-max-limit" $statedump | cut -f2 -d'=' | tail -1)
+rm -f $statedump
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+statedump=$(generate_mount_statedump $V0)
+sleep 1
+EXPECT "30" echo $(grep "lru-max-limit" $statedump | cut -f2 -d'=' | tail -1)
+rm -f $statedump
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/issue-1243.t b/tests/bugs/shard/issue-1243.t
new file mode 100644
index 00000000000..ba22d2b74fe
--- /dev/null
+++ b/tests/bugs/shard/issue-1243.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST $CLI volume set $V0 md-cache-timeout 10
+
+# Write data into a file such that its size crosses shard-block-size
+TEST dd if=/dev/zero of=$M0/foo bs=1048576 count=8 oflag=direct
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Execute a setxattr on the file.
+TEST setfattr -n trusted.libvirt -v some-value $M0/foo
+
+# Size of the file should be the aggregated size, not the shard-block-size
+EXPECT '8388608' stat -c %s $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Execute a removexattr on the file.
+TEST setfattr -x trusted.libvirt $M0/foo
+
+# Size of the file should be the aggregated size, not the shard-block-size
+EXPECT '8388608' stat -c %s $M0/foo
+cleanup
diff --git a/tests/bugs/shard/issue-1281.t b/tests/bugs/shard/issue-1281.t
new file mode 100644
index 00000000000..9704caa8944
--- /dev/null
+++ b/tests/bugs/shard/issue-1281.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+#Open a file and store descriptor in fd = 5
+exec 5>$M0/foo
+
+#Unlink the same file which is opened in prev step
+TEST unlink $M0/foo
+
+#Write something on the file using the open fd = 5
+echo "issue-1281" >&5
+
+#Write on the descriptor should be succesful
+EXPECT 0 echo $?
+
+#Close the fd = 5
+exec 5>&-
+
+cleanup
diff --git a/tests/bugs/shard/issue-1425.t b/tests/bugs/shard/issue-1425.t
new file mode 100644
index 00000000000..bbe82c0e5b2
--- /dev/null
+++ b/tests/bugs/shard/issue-1425.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+FILE_COUNT_TIME=5
+
+function get_file_count {
+ ls $1* | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST fallocate -l 20M $M0/foo
+gfid_new=$(get_gfid_string $M0/foo)
+
+# Check for the base shard
+TEST stat $M0/foo
+TEST stat $B0/${V0}0/foo
+
+# There should be 4 associated shards
+EXPECT_WITHIN $FILE_COUNT_TIME 4 get_file_count $B0/${V0}0/.shard/$gfid_new
+
+# There should be 1+4 shards and we expect 4 lookups less than on the build without this patch
+EXPECT "21" echo `$CLI volume profile $V0 info incremental | grep -w LOOKUP | awk '{print $8}'`
+
+# Delete the base shard and check shards get cleaned up
+TEST unlink $M0/foo
+
+TEST ! stat $M0/foo
+TEST ! stat $B0/${V0}0/foo
+
+# There should be no shards now
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_new
+cleanup
diff --git a/tests/bugs/shard/parallel-truncate-read.t b/tests/bugs/shard/parallel-truncate-read.t
new file mode 100644
index 00000000000..4de876f58f6
--- /dev/null
+++ b/tests/bugs/shard/parallel-truncate-read.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+#This test will crash if shard's LRU contains a shard's inode even after the
+#inode is forgotten. Minimum time for crash to happen I saw was 180 seconds
+
+. $(dirname $0)/../../include.rc
+
+function keep_writing {
+ cd $M0;
+ while [ -f /tmp/parallel-truncate-read ]
+ do
+ dd if=/dev/zero of=file1 bs=1M count=16
+ done
+ cd
+}
+
+function keep_reading {
+ cd $M0;
+ while [ -f /tmp/parallel-truncate-read ]
+ do
+ cat file1 > /dev/null
+ done
+ cd
+}
+
+cleanup;
+
+TEST touch /tmp/parallel-truncate-read
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+keep_writing &
+keep_reading &
+sleep 180
+TEST rm -f /tmp/parallel-truncate-read
+wait
+#test that the mount is operational
+TEST stat $M0
+
+cleanup;
diff --git a/tests/bugs/shard/shard-append-test.c b/tests/bugs/shard/shard-append-test.c
new file mode 100644
index 00000000000..c7debb2b182
--- /dev/null
+++ b/tests/bugs/shard/shard-append-test.c
@@ -0,0 +1,183 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+/*This test tests that shard xlator handles offset in appending writes
+ * correctly. This test performs writes of 1025 bytes 1025 times, in 5 threads
+ * with different threads. The buffer to be written is same character repeated
+ * 1025 times in the buffer for a thread. At the end it reads the buffer till
+ * end of file and tests that the read of 1025 bytes is always same character
+ * and the content read is 5*1025*1025 size. 1025 bytes is chosen because it
+ * will lead to write on more than one shard at some point when the size is
+ * going over the initial shard*/
+pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
+int thread_data = '1';
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile_server failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+void *
+write_data(void *data)
+{
+ char buf[1025] = {0};
+ glfs_fd_t *glfd = NULL;
+ glfs_t *fs = data;
+ int i = 0;
+
+ pthread_mutex_lock(&lock);
+ {
+ memset(buf, thread_data, sizeof(buf));
+ thread_data++;
+ }
+ pthread_mutex_unlock(&lock);
+
+ for (i = 0; i < 1025; i++) {
+ glfd = glfs_creat(fs, "parallel-write.txt", O_WRONLY | O_APPEND,
+ S_IRUSR | S_IWUSR | O_SYNC);
+ if (!glfd) {
+ LOG_ERR("Failed to create file");
+ exit(1);
+ }
+
+ if (glfs_write(glfd, buf, sizeof(buf), 0) < 0) {
+ LOG_ERR("Failed to write to file");
+ exit(1);
+ }
+ if (glfs_close(glfd) != 0) {
+ LOG_ERR("Failed to close file");
+ exit(1);
+ }
+ }
+ return NULL;
+}
+
+int
+main(int argc, char *argv[])
+{
+ pthread_t tid[5] = {0};
+ char buf[1025] = {0};
+ char cmp_buf[1025] = {0};
+ int ret = 0;
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ ssize_t bytes_read = 0;
+ ssize_t total_bytes_read = 0;
+ int i = 0;
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = init_glfs(hostname, volname, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ for (i = 0; i < 5; i++) {
+ pthread_create(&tid[i], NULL, write_data, fs);
+ }
+
+ for (i = 0; i < 5; i++) {
+ pthread_join(tid[i], NULL);
+ }
+ glfd = glfs_open(fs, "parallel-write.txt", O_RDONLY);
+ if (!glfd) {
+ LOG_ERR("Failed to open file for reading");
+ exit(1);
+ }
+
+ while ((bytes_read = glfs_read(glfd, buf, sizeof(buf), 0)) > 0) {
+ if (bytes_read != sizeof(buf)) {
+ fprintf(stderr,
+ "Didn't read complete data read: %zd "
+ "expected: %lu",
+ bytes_read, sizeof(buf));
+ exit(1);
+ }
+
+ total_bytes_read += bytes_read;
+ if (buf[0] < '1' || buf[0] >= thread_data) {
+ fprintf(stderr, "Invalid character found: %c", buf[0]);
+ exit(1);
+ }
+ memset(cmp_buf, buf[0], sizeof(cmp_buf));
+ if (memcmp(cmp_buf, buf, sizeof(cmp_buf))) {
+ LOG_ERR("Data corrupted");
+ exit(1);
+ }
+ memset(cmp_buf, 0, sizeof(cmp_buf));
+ }
+
+ if (total_bytes_read != 5 * 1025 * 1025) {
+ fprintf(stderr,
+ "Failed to read what is written, read; %zd, "
+ "expected %zu",
+ total_bytes_read, 5 * 1025 * 1025);
+ exit(1);
+ }
+
+ if (glfs_close(glfd) != 0) {
+ LOG_ERR("Failed to close");
+ exit(1);
+ }
+ return 0;
+}
diff --git a/tests/bugs/shard/shard-append-test.t b/tests/bugs/shard/shard-append-test.t
new file mode 100644
index 00000000000..f8719f2a2c1
--- /dev/null
+++ b/tests/bugs/shard/shard-append-test.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{1,2,3};
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+
+#Uncomment the following line after shard-queuing is implemented
+#TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0;
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/shard-append-test.c -lgfapi -lpthread
+
+TEST ./$(dirname $0)/shard-append-test ${H0} $V0 $logdir/shard-append-test.log
+
+cleanup_tester $(dirname $0)/shard-append-test
+
+cleanup;
diff --git a/tests/bugs/shard/shard-fallocate.c b/tests/bugs/shard/shard-fallocate.c
new file mode 100644
index 00000000000..cb0714e8564
--- /dev/null
+++ b/tests/bugs/shard/shard-fallocate.c
@@ -0,0 +1,113 @@
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+enum fallocate_flag {
+ TEST_FALLOCATE_NONE,
+ TEST_FALLOCATE_KEEP_SIZE,
+ TEST_FALLOCATE_ZERO_RANGE,
+ TEST_FALLOCATE_PUNCH_HOLE,
+ TEST_FALLOCATE_MAX,
+};
+
+int
+get_fallocate_flag(int opcode)
+{
+ int ret = 0;
+
+ switch (opcode) {
+ case TEST_FALLOCATE_NONE:
+ ret = 0;
+ break;
+ case TEST_FALLOCATE_KEEP_SIZE:
+ ret = FALLOC_FL_KEEP_SIZE;
+ break;
+ case TEST_FALLOCATE_ZERO_RANGE:
+ ret = FALLOC_FL_ZERO_RANGE;
+ break;
+ case TEST_FALLOCATE_PUNCH_HOLE:
+ ret = FALLOC_FL_PUNCH_HOLE;
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 1;
+ int opcode = -1;
+ off_t offset = 0;
+ size_t len = 0;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+
+ if (argc != 8) {
+ fprintf(stderr,
+ "Syntax: %s <host> <volname> <opcode> <offset> <len> "
+ "<file-path> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, argv[7], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ opcode = atoi(argv[3]);
+ opcode = get_fallocate_flag(opcode);
+ if (opcode < 0) {
+ fprintf(stderr, "get_fallocate_flag: invalid flag \n");
+ goto out;
+ }
+
+ /* Note that off_t is signed but size_t isn't. */
+ offset = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
+
+ fd = glfs_open(fs, argv[6], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ ret = glfs_fallocate(fd, opcode, offset, len);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fallocate: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/shard-inode-refcount-test.t b/tests/bugs/shard/shard-inode-refcount-test.t
new file mode 100644
index 00000000000..3fd181be690
--- /dev/null
+++ b/tests/bugs/shard/shard-inode-refcount-test.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+SHARD_COUNT_TIME=5
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST dd if=/dev/zero conv=fsync of=$M0/one-plus-five-shards bs=1M count=23
+
+ACTIVE_INODES_BEFORE=$(get_mount_active_size_value $V0)
+TEST rm -f $M0/one-plus-five-shards
+# Expect 5 inodes less. But one inode more than before because .remove_me would be created.
+EXPECT_WITHIN $SHARD_COUNT_TIME `expr $ACTIVE_INODES_BEFORE - 5 + 1` get_mount_active_size_value $V0 $M0
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/unlinks-and-renames.t b/tests/bugs/shard/unlinks-and-renames.t
new file mode 100644
index 00000000000..990ca69a8b1
--- /dev/null
+++ b/tests/bugs/shard/unlinks-and-renames.t
@@ -0,0 +1,333 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+# The aim of this test script is to exercise the various codepaths of unlink
+# and rename fops in sharding and make sure they work fine.
+#
+
+FILE_COUNT_TIME=5
+
+function get_file_count {
+ ls $1* | wc -l
+}
+
+#################################################
+################### UNLINK ######################
+#################################################
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/foo
+TEST touch $M0/dir/new
+
+##########################################
+##### 01. Unlink with /.shard absent #####
+##########################################
+
+TEST truncate -s 5M $M0/dir/foo
+TEST ! stat $B0/${V0}0/.shard
+TEST ! stat $B0/${V0}1/.shard
+# Test to ensure that unlink doesn't fail due to absence of /.shard
+gfid_foo=$(get_gfid_string $M0/dir/foo)
+TEST unlink $M0/dir/foo
+TEST stat $B0/${V0}0/.shard/.remove_me
+TEST stat $B0/${V0}1/.shard/.remove_me
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_foo
+
+######################################################
+##### 02. Unlink of a sharded file without holes #####
+######################################################
+
+# Create a 9M sharded file
+TEST dd if=/dev/zero of=$M0/dir/new bs=1024 count=9216
+gfid_new=$(get_gfid_string $M0/dir/new)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_new.1
+TEST stat $B0/${V0}1/.shard/$gfid_new.1
+TEST stat $B0/${V0}0/.shard/$gfid_new.2
+TEST stat $B0/${V0}1/.shard/$gfid_new.2
+TEST unlink $M0/dir/new
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_new
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_new
+TEST ! stat $M0/dir/new
+TEST ! stat $B0/${V0}0/dir/new
+TEST ! stat $B0/${V0}1/dir/new
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_new
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_new
+
+###########################################
+##### 03. Unlink with /.shard present #####
+###########################################
+
+TEST truncate -s 5M $M0/dir/foo
+gfid_foo=$(get_gfid_string $M0/dir/foo)
+# Ensure its shards are absent.
+TEST ! stat $B0/${V0}0/.shard/$gfid_foo.1
+TEST ! stat $B0/${V0}1/.shard/$gfid_foo.1
+# Test to ensure that unlink of a sparse file works fine.
+TEST unlink $M0/dir/foo
+TEST ! stat $B0/${V0}0/dir/foo
+TEST ! stat $B0/${V0}1/dir/foo
+TEST ! stat $M0/dir/foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_foo
+
+#################################################################
+##### 04. Unlink of a file with only one block (the zeroth) #####
+#################################################################
+
+TEST touch $M0/dir/foo
+gfid_foo=$(get_gfid_string $M0/dir/foo)
+TEST dd if=/dev/zero of=$M0/dir/foo bs=1024 count=1024
+# Test to ensure that unlink of a file with only base shard works fine.
+TEST unlink $M0/dir/foo
+TEST ! stat $B0/${V0}0/dir/foo
+TEST ! stat $B0/${V0}1/dir/foo
+TEST ! stat $M0/dir/foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_foo
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_foo
+
+########################################################
+##### 05. Unlink of a sharded file with hard-links #####
+########################################################
+
+# Create a 9M sharded file
+TEST dd if=/dev/zero of=$M0/dir/original bs=1024 count=9216
+gfid_original=$(get_gfid_string $M0/dir/original)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_original.1
+TEST stat $B0/${V0}1/.shard/$gfid_original.1
+TEST stat $B0/${V0}0/.shard/$gfid_original.2
+TEST stat $B0/${V0}1/.shard/$gfid_original.2
+# Create a hard link.
+TEST ln $M0/dir/original $M0/link
+# Now delete the original file.
+TEST unlink $M0/dir/original
+TEST ! stat $B0/${V0}0/.shard/.remove_me/$gfid_original
+TEST ! stat $B0/${V0}1/.shard/.remove_me/$gfid_original
+# Ensure the shards are still intact.
+TEST stat $B0/${V0}0/.shard/$gfid_original.1
+TEST stat $B0/${V0}1/.shard/$gfid_original.1
+TEST stat $B0/${V0}0/.shard/$gfid_original.2
+TEST stat $B0/${V0}1/.shard/$gfid_original.2
+TEST ! stat $M0/dir/original
+TEST stat $M0/link
+TEST stat $B0/${V0}0/link
+TEST stat $B0/${V0}1/link
+# Now delete the last link.
+TEST unlink $M0/link
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_original
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_original
+# Ensure that the shards are all cleaned up.
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_original
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_original
+TEST ! stat $M0/link
+TEST ! stat $B0/${V0}0/link
+TEST ! stat $B0/${V0}1/link
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
+
+#################################################
+################### RENAME ######################
+#################################################
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST touch $M0/dir/src
+TEST touch $M0/dir/dst
+
+##########################################
+##### 06. Rename with /.shard absent #####
+##########################################
+
+TEST truncate -s 5M $M0/dir/dst
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+TEST ! stat $B0/${V0}0/.shard
+TEST ! stat $B0/${V0}1/.shard
+# Test to ensure that rename doesn't fail due to absence of /.shard
+TEST mv -f $M0/dir/src $M0/dir/dst
+TEST ! stat $M0/dir/src
+TEST stat $M0/dir/dst
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $B0/${V0}0/dir/dst
+TEST stat $B0/${V0}1/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+######################################################
+##### 07. Rename to a sharded file without holes #####
+######################################################
+
+TEST unlink $M0/dir/dst
+TEST touch $M0/dir/src
+# Create a 9M sharded file
+TEST dd if=/dev/zero of=$M0/dir/dst bs=1024 count=9216
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_dst.1
+TEST stat $B0/${V0}1/.shard/$gfid_dst.1
+TEST stat $B0/${V0}0/.shard/$gfid_dst.2
+TEST stat $B0/${V0}1/.shard/$gfid_dst.2
+TEST mv -f $M0/dir/src $M0/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_dst
+TEST ! stat $M0/dir/src
+TEST stat $M0/dir/dst
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $B0/${V0}0/dir/dst
+TEST stat $B0/${V0}1/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+#######################################################
+##### 08. Rename of dst file with /.shard present #####
+#######################################################
+
+TEST unlink $M0/dir/dst
+TEST touch $M0/dir/src
+TEST truncate -s 5M $M0/dir/dst
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+# Test to ensure that rename into a sparse file works fine.
+TEST mv -f $M0/dir/src $M0/dir/dst
+TEST ! stat $M0/dir/src
+TEST stat $M0/dir/dst
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $B0/${V0}0/dir/dst
+TEST stat $B0/${V0}1/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+###################################################################
+##### 09. Rename of dst file with only one block (the zeroth) #####
+###################################################################
+
+TEST unlink $M0/dir/dst
+TEST touch $M0/dir/src
+TEST dd if=/dev/zero of=$M0/dir/dst bs=1024 count=1024
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+# Test to ensure that rename into a file with only base shard works fine.
+TEST mv -f $M0/dir/src $M0/dir/dst
+TEST ! stat $M0/dir/src
+TEST stat $M0/dir/dst
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $B0/${V0}0/dir/dst
+TEST stat $B0/${V0}1/dir/dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+############################################################
+##### 10. Rename to a dst sharded file with hard-links #####
+############################################################
+
+TEST unlink $M0/dir/dst
+TEST touch $M0/dir/src
+# Create a 9M sharded file
+TEST dd if=/dev/zero of=$M0/dir/dst bs=1024 count=9216
+gfid_dst=$(get_gfid_string $M0/dir/dst)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_dst.1
+TEST stat $B0/${V0}1/.shard/$gfid_dst.1
+TEST stat $B0/${V0}0/.shard/$gfid_dst.2
+TEST stat $B0/${V0}1/.shard/$gfid_dst.2
+# Create a hard link.
+TEST ln $M0/dir/dst $M0/link
+# Now rename src to the dst.
+TEST mv -f $M0/dir/src $M0/dir/dst
+# Ensure the shards are still intact.
+TEST stat $B0/${V0}0/.shard/$gfid_dst.1
+TEST stat $B0/${V0}1/.shard/$gfid_dst.1
+TEST stat $B0/${V0}0/.shard/$gfid_dst.2
+TEST stat $B0/${V0}1/.shard/$gfid_dst.2
+TEST ! stat $M0/dir/src
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST ! stat $B0/${V0}0/.shard/.remove_me/$gfid_dst
+TEST ! stat $B0/${V0}1/.shard/.remove_me/$gfid_dst
+# Now rename another file to the last link.
+TEST touch $M0/dir/src2
+TEST mv -f $M0/dir/src2 $M0/link
+# Ensure that the shards are all cleaned up.
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/$gfid_dst
+TEST ! stat $B0/${V0}0/.shard/$gfid_dst.1
+TEST ! stat $B0/${V0}1/.shard/$gfid_dst.1
+TEST ! stat $B0/${V0}0/.shard/$gfid_dst.2
+TEST ! stat $B0/${V0}1/.shard/$gfid_dst.2
+TEST ! stat $M0/dir/src2
+TEST ! stat $B0/${V0}0/dir/src2
+TEST ! stat $B0/${V0}1/dir/src2
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/.remove_me/$gfid_dst
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}1/.shard/.remove_me/$gfid_dst
+
+##############################################################
+##### 11. Rename with non-existent dst and a sharded src #####
+##############################################################l
+
+TEST touch $M0/dir/src
+TEST dd if=/dev/zero of=$M0/dir/src bs=1024 count=9216
+gfid_src=$(get_gfid_string $M0/dir/src)
+# Ensure its shards are created.
+TEST stat $B0/${V0}0/.shard/$gfid_src.1
+TEST stat $B0/${V0}1/.shard/$gfid_src.1
+TEST stat $B0/${V0}0/.shard/$gfid_src.2
+TEST stat $B0/${V0}1/.shard/$gfid_src.2
+# Now rename src to the dst.
+TEST mv $M0/dir/src $M0/dir/dst2
+
+TEST stat $B0/${V0}0/.shard/$gfid_src.1
+TEST stat $B0/${V0}1/.shard/$gfid_src.1
+TEST stat $B0/${V0}0/.shard/$gfid_src.2
+TEST stat $B0/${V0}1/.shard/$gfid_src.2
+TEST ! stat $M0/dir/src
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $M0/dir/dst2
+TEST stat $B0/${V0}0/dir/dst2
+TEST stat $B0/${V0}1/dir/dst2
+
+#############################################################################
+##### 12. Rename with non-existent dst and a sharded src with no shards #####
+#############################################################################
+
+TEST touch $M0/dir/src
+TEST dd if=/dev/zero of=$M0/dir/src bs=1024 count=1024
+gfid_src=$(get_gfid_string $M0/dir/src)
+TEST ! stat $B0/${V0}0/.shard/$gfid_src.1
+TEST ! stat $B0/${V0}1/.shard/$gfid_src.1
+# Now rename src to the dst.
+TEST mv $M0/dir/src $M0/dir/dst1
+TEST ! stat $M0/dir/src
+TEST ! stat $B0/${V0}0/dir/src
+TEST ! stat $B0/${V0}1/dir/src
+TEST stat $M0/dir/dst1
+TEST stat $B0/${V0}0/dir/dst1
+TEST stat $B0/${V0}1/dir/dst1
+
+cleanup
diff --git a/tests/bugs/shard/zero-flag.t b/tests/bugs/shard/zero-flag.t
new file mode 100644
index 00000000000..1f39787ab9f
--- /dev/null
+++ b/tests/bugs/shard/zero-flag.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+require_fallocate -l 1m $M0/file
+require_fallocate -p -l 512k $M0/file && rm -f $M0/file
+require_fallocate -z -l 512k $M0/file && rm -f $M0/file
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST build_tester $(dirname $0)/shard-fallocate.c -lgfapi -Wall -O2
+
+# On file1 confirm that when fallocate's offset + len > cur file size,
+# the new file size will increase.
+TEST touch $M0/tmp
+TEST `echo 'abcdefghijklmnopqrstuvwxyz' > $M0/tmp`
+TEST touch $M0/file1
+
+gfid_file1=$(get_gfid_string $M0/file1)
+
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "0" "6291456" /file1 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT '6291456' stat -c %s $M0/file1
+
+# This should ensure /.shard is created on the bricks.
+TEST stat $B0/${V0}0/.shard
+TEST stat $B0/${V0}1/.shard
+TEST stat $B0/${V0}2/.shard
+TEST stat $B0/${V0}3/.shard
+
+EXPECT "2097152" echo `find $B0 -name $gfid_file1.1 | xargs stat -c %s`
+EXPECT "1" file_all_zeroes $M0/file1
+
+
+# On file2 confirm that fallocate to already allocated region of the
+# file does not change the content of the file.
+TEST truncate -s 6M $M0/file2
+TEST dd if=$M0/tmp of=$M0/file2 bs=1 seek=3145728 count=26 conv=notrunc
+md5sum_file2=$(md5sum $M0/file2 | awk '{print $1}')
+
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "3145728" "26" /file2 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT '6291456' stat -c %s $M0/file2
+EXPECT "$md5sum_file2" echo `md5sum $M0/file2 | awk '{print $1}'`
+
+# On file3 confirm that fallocate to a region of the file that consists
+#of holes creates a new shard in its place, fallocates it and there is no
+#change in the file content seen by the application.
+TEST touch $M0/file3
+
+gfid_file3=$(get_gfid_string $M0/file3)
+
+TEST dd if=$M0/tmp of=$M0/file3 bs=1 seek=9437184 count=26 conv=notrunc
+TEST ! stat $B0/$V0*/.shard/$gfid_file3.1
+TEST stat $B0/$V0*/.shard/$gfid_file3.2
+md5sum_file3=$(md5sum $M0/file3 | awk '{print $1}')
+EXPECT "1048602" echo `find $B0 -name $gfid_file3.2 | xargs stat -c %s`
+
+TEST $(dirname $0)/shard-fallocate $H0 $V0 "0" "5242880" "1048576" /file3 `gluster --print-logdir`/glfs-$V0.log
+EXPECT "$md5sum_file3" echo `md5sum $M0/file3 | awk '{print $1}'`
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+rm -f $(dirname $0)/shard-fallocate
+cleanup