summaryrefslogtreecommitdiffstats
path: root/tests/bugs/distribute/bug-1099890.t
diff options
context:
space:
mode:
authorvmallika <vmallika@redhat.com>2015-05-25 13:35:48 +0530
committerRaghavendra G <rgowdapp@redhat.com>2015-05-25 11:34:22 -0700
commit225ff553106396066d68d8c757e5c001f5d9ab15 (patch)
tree34acc904eb69ec0ee5507ab3cb9e2632bb34a426 /tests/bugs/distribute/bug-1099890.t
parentb51ee5f8d1f80d66effffc06c1e49099c04014a4 (diff)
Quota: fix testcases not to send parallel writes for accurate
quota enforcement Currently quota enforcer doesn't consider parallel writes and allows quota to exceed limit where there are high rate of parallel writes. Bug# 1223658 tracks the issue. This patch fixes the spurious failures by not sending parallel writes. Using O_SYNC and O_APPEND flags and block size not more that 256k (For higher block size NFS client splits the block into 256k chinks and does parallel writes) Change-Id: I297c164b030cecb87ce5b494c02b09e8b073b276 BUG: 1223798 Signed-off-by: vmallika <vmallika@redhat.com> Reviewed-on: http://review.gluster.org/10878 Tested-by: NetBSD Build System Tested-by: Gluster Build System <jenkins@build.gluster.com> Reviewed-by: Raghavendra G <rgowdapp@redhat.com> Tested-by: Raghavendra G <rgowdapp@redhat.com>
Diffstat (limited to 'tests/bugs/distribute/bug-1099890.t')
-rw-r--r--tests/bugs/distribute/bug-1099890.t15
1 files changed, 10 insertions, 5 deletions
diff --git a/tests/bugs/distribute/bug-1099890.t b/tests/bugs/distribute/bug-1099890.t
index f48162c972b..40f70d4938b 100644
--- a/tests/bugs/distribute/bug-1099890.t
+++ b/tests/bugs/distribute/bug-1099890.t
@@ -10,6 +10,10 @@
cleanup;
+QDD=$(dirname $0)/quota
+# compile the test write program and run it
+build_tester $(dirname $0)/../../basic/quota.c -o $QDD
+
TEST glusterd;
TEST pidof glusterd;
@@ -49,14 +53,14 @@ EXPECT "150M" echo `df -h $M0 -P | tail -1 | awk {'print $2'}`
# Create a new file 'foo' under the root of the volume, which hashes to subvol-0
# of DHT, that consumes 40M
-TEST dd if=/dev/zero of=$M0/foo bs=5120k count=8
+TEST $QDD $M0/foo 256 160
TEST stat $B0/${V0}1/foo
TEST ! stat $B0/${V0}2/foo
# Create a new file 'bar' under the root of the volume, which hashes to subvol-1
# of DHT, that consumes 40M
-TEST dd if=/dev/zero of=$M0/bar bs=5120k count=8
+TEST $QDD $M0/bar 256 160
TEST ! stat $B0/${V0}1/bar
TEST stat $B0/${V0}2/bar
@@ -84,7 +88,7 @@ TEST touch $M0/empty1;
# If this bug is fixed, then DHT should be routing the creation to subvol-1 only
# as it has more than min-free-disk space available.
-TEST dd if=/dev/zero of=$M0/file bs=1k count=1
+TEST $QDD $M0/file 1 1
sleep 1;
TEST ! stat $B0/${V0}1/file
TEST stat $B0/${V0}2/file
@@ -96,7 +100,7 @@ TEST touch $M0/empty2;
# Now I create a new file that hashes to subvol-0, at the end of which, there
# will be less than min-free-disk space available on it.
-TEST dd if=/dev/zero of=$M0/fil bs=5120k count=4
+TEST $QDD $M0/fil 256 80
sleep 1;
TEST stat $B0/${V0}1/fil
TEST ! stat $B0/${V0}2/fil
@@ -108,7 +112,7 @@ TEST touch $M0/empty3;
# Now I create a file that hashes to subvol-0 but since it has less than
# min-free-disk space available, its data will be cached on subvol-1.
-TEST dd if=/dev/zero of=$M0/zz bs=5120k count=1
+TEST $QDD $M0/zz 256 20
TEST stat $B0/${V0}1/zz
TEST stat $B0/${V0}2/zz
@@ -123,4 +127,5 @@ EXPECT "1" get_aux
UMOUNT_LOOP ${B0}/${V0}{1,2}
rm -f ${B0}/brick{1,2}
+rm -f $QDD
cleanup