summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/basic/accept-v6v4.t122
-rwxr-xr-xtests/basic/dht-min-free-space.t78
-rw-r--r--tests/basic/ec/ec-common2
-rw-r--r--tests/basic/ec/self-heal.t2
-rw-r--r--tests/basic/exports_parsing.t15
-rw-r--r--tests/basic/fop-sampling.t78
-rwxr-xr-xtests/basic/fops-sanity-gfproxy.t32
-rw-r--r--tests/basic/gfproxy.t74
-rw-r--r--tests/basic/glusterd/volfile_server_switch.t3
-rw-r--r--tests/basic/halo-failover-disabled.t77
-rw-r--r--tests/basic/halo-failover-enabled.t87
-rw-r--r--tests/basic/halo-hybrid.t70
-rw-r--r--tests/basic/halo.t51
-rwxr-xr-xtests/basic/mount-nfs-auth.t17
-rw-r--r--tests/basic/write-behind.t53
-rw-r--r--tests/bugs/distribute/bug-1099890.t2
-rwxr-xr-xtests/bugs/distribute/bug-1161311.t10
-rw-r--r--tests/bugs/fuse/bug-858488-min-free-disk.t1
-rw-r--r--tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t22
-rwxr-xr-xtests/bugs/glusterd/bug-859927.t8
-rw-r--r--tests/bugs/quota/bug-1292020.t7
-rw-r--r--tests/cluster.rc9
-rw-r--r--tests/configfiles/exports-v61
-rw-r--r--tests/env.rc.in3
-rwxr-xr-xtests/features/brick-min-free-space.t113
-rw-r--r--tests/features/lock_revocation.t52
-rw-r--r--tests/halo.rc52
-rw-r--r--tests/include.rc3
28 files changed, 1009 insertions, 35 deletions
diff --git a/tests/basic/accept-v6v4.t b/tests/basic/accept-v6v4.t
new file mode 100644
index 00000000000..7128c12c6be
--- /dev/null
+++ b/tests/basic/accept-v6v4.t
@@ -0,0 +1,122 @@
+#!/bin/bash
+
+. $(dirname $0)/../nfs.rc
+
+#
+# This test ensures that GlusterFS provides NFS, Mount and its Management daemon
+# over both IPv4 and IPv6. It uses netcat to check the services running on both
+# IPv4 & IPv6 addresses as well as a mount to test that mount & nfs work.
+#
+
+IPV4_SUPPORT=false
+IPV6_SUPPORT=false
+
+host $HOSTNAME | grep -q "has address" && IPV4_SUPPORT=true
+host $HOSTNAME | grep -q "has IPv6 address" && IPV6_SUPPORT=true
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+mkdir -p $B0/b{0,1,2}
+
+# make sure no registered rpcbind services are running
+service rpcbind restart
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI vol create $V0 replica 3 $H0:$B0/b0 $H0:$B0/b1 $H0:$B0/b2
+
+TEST $CLI vol set $V0 cluster.self-heal-daemon off
+TEST $CLI vol set $V0 nfs.disable off
+TEST $CLI vol set $V0 cluster.choose-local off
+TEST $CLI vol start $V0
+
+MOUNTD_PORT=38465
+MGMTD_PORT=24007
+NFSD_PORT=2049
+
+function check_ip_port {
+ ip=$1
+ port=$2
+ type=$3
+
+ nc_flags=""
+ if [ "$type" == "v6" ] && [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ else
+ nc_flags="-6"
+ fi
+
+ if [ "$type" == "v4" ] && [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ fi
+
+ if exec 3<>/dev/tcp/$ip/$port; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function check_nfs {
+ ip=$1
+ type=$2
+
+ if [ "$ip" == "NONE" ]; then
+ echo "Y"
+ return
+ fi
+
+ if [ "$type" == "v6" ]; then
+ addr="[$ip]"
+ else
+ addr="$ip"
+ fi
+
+ if mount_nfs $addr:/$V0 $N0; then
+ umount_nfs $N0
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+if [ ! $IPV4_SUPPORT ] && [ ! $IPV6_SUPPORT ]; then
+ exit 1
+fi
+
+# Get the V4 & V6 addresses of this host
+if $IPV4_SUPPORT; then
+ V4=$(host $HOSTNAME | head -n1 | awk -F ' ' '{print $4}')
+else
+ V4="NONE"
+fi
+
+if $IPV6_SUPPORT; then
+ V6=$(host $HOSTNAME | tail -n1 | awk -F ' ' '{print $5}')
+else
+ V6="NONE"
+fi
+
+# First check the management daemon
+EXPECT "Y" check_ip_port $V6 $MGMTD_PORT "v6"
+EXPECT "Y" check_ip_port $V4 $MGMTD_PORT "v4"
+
+# Give the MOUNT/NFS Daemon some time to start up
+sleep 4
+
+EXPECT "Y" check_ip_port $V4 $MOUNTD_PORT "v6"
+EXPECT "Y" check_ip_port $V6 $MOUNTD_PORT "v4"
+
+EXPECT "Y" check_ip_port $V4 $NFSD_PORT "v6"
+EXPECT "Y" check_ip_port $V6 $NFSD_PORT "v4"
+
+# Mount the file system
+EXPECT "Y" check_nfs $V6 "v6"
+EXPECT "Y" check_nfs $V4 "v4"
+
+cleanup;
diff --git a/tests/basic/dht-min-free-space.t b/tests/basic/dht-min-free-space.t
new file mode 100755
index 00000000000..17d10cc39a5
--- /dev/null
+++ b/tests/basic/dht-min-free-space.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+grep $B0/patchy1 /proc/mounts &> /dev/null && umount $B0/patchy1
+grep $B0/patchy2 /proc/mounts &> /dev/null && umount $B0/patchy2
+losetup -d /dev/loop0 2> /dev/null
+losetup -d /dev/loop1 2> /dev/null
+mkdir $B0/${V0}{1..2}
+
+TEST glusterd
+
+TEST dd if=/dev/zero of=/tmp/${V0}-dev1 bs=1M count=30
+TEST dd if=/dev/zero of=/tmp/${V0}-dev2 bs=1M count=30
+
+TEST losetup /dev/loop0 /tmp/${V0}-dev1
+TEST losetup /dev/loop1 /tmp/${V0}-dev2
+
+TEST mkfs.xfs /dev/loop0
+TEST mkfs.xfs /dev/loop1
+
+TEST mount /dev/loop0 $B0/${V0}1
+TEST mount /dev/loop1 $B0/${V0}2
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}2
+TEST $CLI volume set $V0 cluster.min-free-disk 2MB
+TEST $CLI volume set $V0 cluster.min-free-strict-mode on
+TEST $CLI volume set $V0 cluster.du-refresh-interval-sec 0
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+####################################
+# Test re-directs of file creation #
+####################################
+
+# This should work, no redirects
+TEST dd if=/dev/zero of=$M0/testfile1 bs=1M count=8
+TEST [ -f /d/backends/${V0}2/testfile1 ] && [ ! -k /d/backends/${V0}1/testfile1 ]
+
+TEST $CLI volume set $V0 cluster.min-free-disk 19MB
+
+# This should work, & the file redirected
+# Subvolume 2 should have the linkto &
+# Subvolume 1 should have the original
+TEST dd if=/dev/zero of=$M0/testfile3 bs=1M count=4
+TEST [ -f /d/backends/${V0}1/testfile3 ] && [ ! -k /d/backends/${V0}1/testfile3 ]
+TEST [ -k /d/backends/${V0}2/testfile3 ]
+
+# This should fail, cluster is full
+TEST ! dd if=/dev/zero of=$M0/testfile2 bs=1M count=23
+
+###################
+# Strict mode off #
+###################
+TEST $CLI volume set $V0 cluster.min-free-strict-mode off
+TEST dd if=/dev/zero of=$M0/testfile1 bs=1M count=20
+TEST rm -f $M0/testfile1
+
+###################
+# Strict mode on #
+###################
+TEST $CLI volume set $V0 cluster.min-free-strict-mode on
+TEST ! dd if=/dev/zero of=$M0/testfile1 bs=1M count=16
+TEST rm -f $M0/testfile1
+
+killall gluster{fs,fsd,d}
+
+umount -lf $B0/${V0}1
+umount -lf $B0/${V0}2
+
+losetup -d /dev/loop0
+losetup -d /dev/loop1
+
+cleanup;
diff --git a/tests/basic/ec/ec-common b/tests/basic/ec/ec-common
index 83c4463a912..152e3b51236 100644
--- a/tests/basic/ec/ec-common
+++ b/tests/basic/ec/ec-common
@@ -45,7 +45,7 @@ for size in $SIZE_LIST; do
eval cs_big_truncate[$size]=$(sha1sum $tmp/big1 | awk '{ print $1 }')
done
-TEST df -h
+TEST df -h $M0
TEST stat $M0
for idx in `seq 0 $LAST_BRICK`; do
diff --git a/tests/basic/ec/self-heal.t b/tests/basic/ec/self-heal.t
index 98dd9232c73..3e3467535fb 100644
--- a/tests/basic/ec/self-heal.t
+++ b/tests/basic/ec/self-heal.t
@@ -136,7 +136,7 @@ TEST dd if=/dev/urandom of=$tmp/test bs=1024 count=1024
cs=$(sha1sum $tmp/test | awk '{ print $1 }')
-TEST df -h
+TEST df -h $M0
TEST stat $M0
for idx in {0..5}; do
diff --git a/tests/basic/exports_parsing.t b/tests/basic/exports_parsing.t
index fdaf9c2822e..da88bbcb2cc 100644
--- a/tests/basic/exports_parsing.t
+++ b/tests/basic/exports_parsing.t
@@ -32,7 +32,20 @@ function test_bad_opt ()
glusterfsd --print-exports $1 2>&1 | sed -n 1p
}
-EXPECT_KEYWORD "/test @test(rw,anonuid=0,sec=sys,) 10.35.11.31(rw,anonuid=0,sec=sys,)" test_good_file $EXP_FILES/exports
+function check_export_line() {
+ if [ "$1" == "$2" ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+ return
+}
+
+export_result=$(test_good_file $EXP_FILES/exports)
+EXPECT "Y" check_export_line '/test @test(rw,anonuid=0,sec=sys,) 10.35.11.31(rw,anonuid=0,sec=sys,) ' "$export_result"
+
+export_result=$(test_good_file $EXP_FILES/exports-v6)
+EXPECT "Y" check_export_line '/test @test(rw,anonuid=0,sec=sys,) 2401:db00:11:1:face:0:3d:0(rw,anonuid=0,sec=sys,) ' "$export_result"
EXPECT_KEYWORD "Error parsing netgroups for:" test_bad_line $EXP_FILES/bad_exports
EXPECT_KEYWORD "Error parsing netgroups for:" test_long_netgroup $EXP_FILES/bad_exports
diff --git a/tests/basic/fop-sampling.t b/tests/basic/fop-sampling.t
index cea8aa737c0..713c7e27579 100644
--- a/tests/basic/fop-sampling.t
+++ b/tests/basic/fop-sampling.t
@@ -2,13 +2,27 @@
#
. $(dirname $0)/../include.rc
+. $(dirname $0)/../nfs.rc
. $(dirname $0)/../volume.rc
-SAMPLE_FILE="$(gluster --print-logdir)/samples/glusterfs_${V0}.samp"
+BRICK_SAMPLES="$(gluster --print-logdir)/samples/glusterfsd__d_backends_${V0}0.samp"
+NFS_SAMPLES="$(gluster --print-logdir)/samples/glusterfs_nfsd.samp"
+
+function check_path {
+ op=$1
+ path=$2
+ file=$3
+ grep $op $file | awk -F, '{print $11}' | grep $path 2>&1 > /dev/null
+ if [ $? -eq 0 ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
function print_cnt() {
local FOP_TYPE=$1
- local FOP_CNT=$(grep ,${FOP_TYPE} ${SAMPLE_FILE} | wc -l)
+ local FOP_CNT=$(grep ,${FOP_TYPE} ${BRICK_SAMPLES} | wc -l)
echo $FOP_CNT
}
@@ -42,12 +56,18 @@ TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
TEST $CLI volume set $V0 nfs.disable off
TEST $CLI volume set $V0 diagnostics.latency-measurement on
TEST $CLI volume set $V0 diagnostics.count-fop-hits on
-TEST $CLI volume set $V0 diagnostics.stats-dump-interval 2
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 5
TEST $CLI volume set $V0 diagnostics.fop-sample-buf-size 65535
TEST $CLI volume set $V0 diagnostics.fop-sample-interval 1
TEST $CLI volume set $V0 diagnostics.stats-dnscache-ttl-sec 3600
-
TEST $CLI volume start $V0
+
+>${NFS_SAMPLES}
+>${BRICK_SAMPLES}
+
+#################
+# Basic Samples #
+#################
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
for i in {1..5}
@@ -58,4 +78,52 @@ done
TEST ls -l $M0
EXPECT_WITHIN 6 "OK" check_samples
-cleanup
+sleep 2
+
+################################
+# Paths in the samples #
+################################
+
+TEST mount_nfs $H0:$V0 $N0
+
+ls $N0 &> /dev/null
+touch $N0/file1
+stat $N0/file1 &> /dev/null
+echo "some data" > $N0/file1
+dd if=/dev/zero of=$N0/file2 bs=1M count=10 conv=fsync
+dd if=/dev/zero of=$N0/file1 bs=1M count=1
+cat $N0/file2 &> /dev/null
+mkdir -p $N0/dir1
+rmdir $N0/dir1
+rm $N0/file1
+rm $N0/file2
+
+EXPECT_WITHIN 10 "Y" check_path CREATE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path LOOKUP /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path SETATTR /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path WRITE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FINODELK /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ENTRYLK / $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FLUSH /file2 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path TRUNCATE /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path MKDIR /dir1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path RMDIR /dir1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file1 $BRICK_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file2 $BRICK_SAMPLES
+
+
+EXPECT_WITHIN 10 "Y" check_path CREATE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path LOOKUP /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ACCESS /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path SETATTR /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path WRITE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path FLUSH /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path ACCESS /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path READ /file2 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path TRUNCATE /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path MKDIR /dir1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path RMDIR /dir1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file1 $NFS_SAMPLES
+EXPECT_WITHIN 10 "Y" check_path UNLINK /file2 $NFS_SAMPLES
+
+cleanup;
diff --git a/tests/basic/fops-sanity-gfproxy.t b/tests/basic/fops-sanity-gfproxy.t
new file mode 100755
index 00000000000..b3bb8a502cc
--- /dev/null
+++ b/tests/basic/fops-sanity-gfproxy.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+#gfproxy server
+TEST glusterfs --volfile-id=gfproxy/$V0 --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy.log
+
+#mount on a random dir
+TEST glusterfs --entry-timeout=3600 --attribute-timeout=3600 -s $H0 --volfile-id=gfproxy-client/$V0 $M0 --direct-io-mode=yes
+TEST grep gfproxy-client /proc/mounts
+
+build_tester $(dirname $0)/fops-sanity.c
+
+TEST cp $(dirname $0)/fops-sanity $M0
+cd $M0
+TEST ./fops-sanity $V0
+cd -
+rm -f $(dirname $0)/fops-sanity
+
+cleanup;
diff --git a/tests/basic/gfproxy.t b/tests/basic/gfproxy.t
new file mode 100644
index 00000000000..71c6788db76
--- /dev/null
+++ b/tests/basic/gfproxy.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
+
+cleanup;
+
+function start_gfproxyd {
+ glusterfs --volfile-id=gfproxy/${V0} --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy.log
+}
+
+function restart_gfproxyd {
+ pkill -f gfproxy/${V0}
+ start_gfproxyd
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 config.gfproxyd-remote-host $H0
+TEST $CLI volume start $V0
+
+sleep 2
+
+REGULAR_CLIENT_VOLFILE="/var/lib/glusterd/vols/${V0}/trusted-${V0}.tcp-fuse.vol"
+GFPROXY_CLIENT_VOLFILE="/var/lib/glusterd/vols/${V0}/trusted-${V0}.tcp-gfproxy-fuse.vol"
+GFPROXYD_VOLFILE="/var/lib/glusterd/vols/${V0}/${V0}.gfproxyd.vol"
+
+# Client volfile must exist
+TEST [ -f $GFPROXY_CLIENT_VOLFILE ]
+
+# AHA & write-behind translators must exist
+TEST grep "cluster/aha" $GFPROXY_CLIENT_VOLFILE
+TEST grep "performance/write-behind" $GFPROXY_CLIENT_VOLFILE
+
+# Make sure we didn't screw up the existing client
+TEST grep "performance/write-behind" $REGULAR_CLIENT_VOLFILE
+TEST grep "cluster/replicate" $REGULAR_CLIENT_VOLFILE
+TEST grep "cluster/distribute" $REGULAR_CLIENT_VOLFILE
+
+TEST [ -f $GFPROXYD_VOLFILE ]
+
+TEST grep "cluster/replicate" $GFPROXYD_VOLFILE
+TEST grep "cluster/distribute" $GFPROXYD_VOLFILE
+
+# AHA & write-behind must *not* exist
+TEST ! grep "cluster/aha" $GFPROXYD_VOLFILE
+TEST ! grep "performance/write-behind" $GFPROXYD_VOLFILE
+
+# Test that we can start the server and the client
+TEST start_gfproxyd
+TEST glusterfs --volfile-id=gfproxy-client/${V0} --volfile-server=$H0 -l /var/log/glusterfs/${V0}-gfproxy-client.log $M0
+sleep 2
+TEST grep gfproxy-client/${V0} /proc/mounts
+
+# Write data to the mount and checksum it
+TEST dd if=/dev/urandom bs=1M count=10 of=/tmp/testfile1
+md5=$(md5sum /tmp/testfile1 | awk '{print $1}')
+TEST cp -v /tmp/testfile1 $M0/testfile1
+TEST [ "$(md5sum $M0/testfile1 | awk '{print $1}')" == "$md5" ]
+
+rm /tmp/testfile1
+
+dd if=/dev/zero of=$N0/bigfile bs=1M count=3072 &
+BG_STRESS_PID=$!
+
+sleep 3
+
+restart_gfproxyd
+
+TEST wait $BG_STRESS_PID
+
+cleanup;
diff --git a/tests/basic/glusterd/volfile_server_switch.t b/tests/basic/glusterd/volfile_server_switch.t
index 0b0e6470244..0b01398215c 100644
--- a/tests/basic/glusterd/volfile_server_switch.t
+++ b/tests/basic/glusterd/volfile_server_switch.t
@@ -1,5 +1,8 @@
#!/bin/bash
+#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
+
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../cluster.rc
diff --git a/tests/basic/halo-failover-disabled.t b/tests/basic/halo-failover-disabled.t
new file mode 100644
index 00000000000..f3655eaef3b
--- /dev/null
+++ b/tests/basic/halo-failover-disabled.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+#
+# Tests that fail-over works correctly for Halo Geo-replication
+#
+# 1. Create a volume @ 3x replication w/ halo + quorum enabled
+# 2. Write some data, background it & fail a brick
+# 3. The expected result is that the writes fail-over to the 3rd
+# brick immediatelly, and md5s will show they are equal once
+# the write completes.
+# 4. The mount should also be RW after the brick is killed as
+# quorum will be immediately restored by swapping in the
+# other brick.
+#
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../halo.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-max-latency 9999
+TEST $CLI volume set $V0 cluster.halo-shd-max-latency 9999
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.halo-failover-enabled off
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 2
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+TEST $CLI volume set $V0 nfs.log-level DEBUG
+
+# Use a large ping time here so the spare brick is not marked up
+# based on the ping time. The only way it can get marked up is
+# by being swapped in via the down event (which is what we are disabling).
+TEST $CLI volume set $V0 network.ping-timeout 1000
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# Make sure two children are up and one is down.
+EXPECT_WITHIN 10 "2 1" halo_sum_child_states 3
+
+# Write some data to the mount
+TEST dd if=/dev/urandom of=$M0/test bs=1k count=200 conv=fsync
+
+UP_IDX=$(cat /var/log/glusterfs/$M0LOG | grep "halo state: UP" | tail -n1 | grep -Eo "Child [0-9]+" | grep -Eo "[0-9]+")
+TEST kill_brick $V0 $H0 $B0/${V0}${UP_IDX}
+
+# Make sure two children are down and one is up.
+EXPECT_WITHIN 10 "1 2" halo_sum_child_states 3
+
+# Test that quorum should fail and the mount is RO, the reason here
+# is that although there _is_ another brick running which _could_
+# take the failed bricks place, it is not marked "up" so quorum
+# will not be fullfilled. If we waited 1000 second the brick would
+# indeed be activated based on ping time, but for our test we want
+# the decision to be solely "down event" driven, not ping driven.
+TEST ! dd if=/dev/urandom of=$M0/test_rw bs=1M count=1 conv=fsync
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 $UP_IDX
+
+# Test that quorum should be restored and the file is writable
+TEST dd if=/dev/urandom of=$M0/test_rw bs=1M count=1
+
+cleanup
diff --git a/tests/basic/halo-failover-enabled.t b/tests/basic/halo-failover-enabled.t
new file mode 100644
index 00000000000..2dddf9951fa
--- /dev/null
+++ b/tests/basic/halo-failover-enabled.t
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+# Tests that fail-over works correctly for Halo Geo-replication
+#
+# 1. Create a volume @ 3x replication w/ halo + quorum enabled
+# 2. Write some data, background it & fail a brick
+# 3. The expected result is that the writes fail-over to the 3rd
+# brick immediatelly, and md5s will show they are equal once
+# the write completes.
+# 4. The mount should also be RW after the brick is killed as
+# quorum will be immediately restored by swapping in the
+# other brick.
+#
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../halo.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-failover-enabled on
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 2
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 network.ping-timeout 20
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+TEST $CLI volume set $V0 nfs.log-level DEBUG
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+# Make sure two children are up and one is down.
+EXPECT_WITHIN 10 "2 1" halo_sum_child_states 3
+
+# Write some data to the mount
+TEST dd if=/dev/urandom of=$M0/test bs=1k count=200 conv=fsync
+
+KILL_IDX=$(cat /var/log/glusterfs/$M0LOG | grep "halo state: UP" | tail -n1 | grep -Eo "Child [0-9]+" | grep -Eo "[0-9]+")
+TEST [ -n "$KILL_IDX" ]
+# NB: UP_CHILDREN is the set of children that should be up after we kill
+# the brick indicated by KILL_IDX, *not* the set of children which are
+# currently up!
+UP_CHILDREN=($(echo "0 1 2" | sed "s/${KILL_IDX}//g"))
+UP1_HAS_TEST="$(ls $B0/${V0}${UP_CHILDREN[0]}/test 2>/dev/null)"
+UP2_HAS_TEST="$(ls $B0/${V0}${UP_CHILDREN[1]}/test 2>/dev/null)"
+VICTIM_HAS_TEST="$(ls $B0/${V0}${KILL_IDX}/test 2>/dev/null)"
+
+# The victim brick should have a copy of the file.
+TEST [ -n "$VICTIM_HAS_TEST" ]
+
+# Of the bricks which will remain standing, there should be only one
+# brick which has the file called test. If the both have the first
+# test file, the test is invalid as all the bricks are up and the
+# halo-max-replicas is not being honored; e.g. bug exists.
+ONLY_ONE=$((([ -z "$UP2_HAS_TEST" ] || [ -z "$UP1_HAS_TEST" ]) &&
+ ([ -n "$UP2_HAS_TEST" ] || [ -n "$UP1_HAS_TEST" ])) && echo true)
+TEST [ "x$ONLY_ONE" == "xtrue" ]
+
+echo "Failing child ${KILL_IDX}..."
+TEST kill_brick $V0 $H0 $B0/${V0}${KILL_IDX}
+
+# Test the mount is still RW (i.e. quorum works)
+TEST dd if=/dev/urandom of=$M0/test_failover bs=1M count=1 conv=fsync
+
+# Calulate the MD5s
+MD5_UP1=$(md5sum $B0/${V0}${UP_CHILDREN[0]}/test_failover | cut -d' ' -f1)
+MD5_UP2=$(md5sum $B0/${V0}${UP_CHILDREN[1]}/test_failover | cut -d' ' -f1)
+
+# Verify the two up bricks have identical MD5s, if both are identical
+# then we must have successfully failed-over to the brick which was
+# previously proven to be down (via the ONLY_ONE test).
+TEST [ "$MD5_UP1" == "$MD5_UP2" ]
+
+cleanup
diff --git a/tests/basic/halo-hybrid.t b/tests/basic/halo-hybrid.t
new file mode 100644
index 00000000000..4574fdfe41e
--- /dev/null
+++ b/tests/basic/halo-hybrid.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# Test for the Halo hybrid feature
+#
+# 1. Create volume w/ 3x replication w/ max-replicas = 2 for clients,
+# heal daemon is off to start.
+# 2. Write some data
+# 3. Verify hybrid code chose children for lookups
+# 4. Verify hybrid code chose child for reads
+# 5. Verify hybrid code wrote synchronously to all replicas
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function found_fuse_log_msg {
+ local dir="$1"
+ local msg="$2"
+ local cnt=$(cat /var/log/glusterfs/$M0LOG | grep "$msg" | tail -n1 | wc -l)
+ if (( $cnt == 1 )); then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-hybrid-mode True
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume set $V0 diagnostics.client-log-level TRACE
+TEST $CLI volume start $V0
+
+# Start a synchronous mount
+TEST glusterfs --volfile-id=/$V0 \
+ --xlator-option *replicate*.halo-max-latency=9999 \
+ --volfile-server=$H0 $M0 \
+ --attribute-timeout=0 --entry-timeout=0
+sleep 2
+cd $M0
+
+TEST mkdir testdir
+TEST cd testdir
+for i in {1..5}
+do
+ dd if=/dev/urandom of=testfile$i bs=1M count=1 2>/dev/null
+done
+TEST ls -l
+
+EXPECT_WITHIN "60" "Y" found_fuse_log_msg "children for LOOKUPs"
+EXPECT_WITHIN "60" "Y" found_fuse_log_msg "Selected hybrid child"
+
+B0_CNT=$(ls $B0/${V0}0/testdir | wc -l)
+B1_CNT=$(ls $B0/${V0}1/testdir | wc -l)
+B2_CNT=$(ls $B0/${V0}2/testdir | wc -l)
+
+# Writes should be synchronous, all should have same
+# file count
+TEST "(($B0_CNT == 5 && $B1_CNT == 5 && $B2_CNT == 5))"
+
+cleanup
diff --git a/tests/basic/halo.t b/tests/basic/halo.t
new file mode 100644
index 00000000000..25aca3442ab
--- /dev/null
+++ b/tests/basic/halo.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+#
+# Test for the Halo geo-replication feature
+#
+# 1. Create volume w/ 3x replication w/ max-replicas = 2 for clients,
+# heal daemon is off to start.
+# 2. Write some data
+# 3. Verify at least one of the bricks did not receive the writes.
+# 4. Turn the heal daemon on
+# 5. Within 30 seconds the SHD should async heal the data over
+# to the 3rd brick.
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.shd-max-threads 1
+TEST $CLI volume set $V0 cluster.halo-enabled True
+TEST $CLI volume set $V0 cluster.halo-max-replicas 2
+TEST $CLI volume set $V0 cluster.halo-min-samples 1
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 cluster.choose-local off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+cd $M0
+
+for i in {1..5}
+do
+ dd if=/dev/urandom of=f bs=1M count=1 2>/dev/null
+ mkdir a; cd a;
+done
+
+B0_CNT=$(ls $B0/${V0}0 | wc -l)
+B1_CNT=$(ls $B0/${V0}1 | wc -l)
+B2_CNT=$(ls $B0/${V0}2 | wc -l)
+
+# One of the brick dirs should be empty
+TEST "(($B0_CNT == 0 || $B1_CNT == 0 || $B2_CNT == 0))"
+
+# Ok, turn the heal daemon on and verify it heals it up
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN 30 "0" get_pending_heal_count $V0
+cleanup
diff --git a/tests/basic/mount-nfs-auth.t b/tests/basic/mount-nfs-auth.t
index 9df5cb45c3b..99f032cbd44 100755
--- a/tests/basic/mount-nfs-auth.t
+++ b/tests/basic/mount-nfs-auth.t
@@ -15,6 +15,9 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
+H0IP=$(ip addr show |grep -w inet |grep -v 127.0.0.1|awk '{ print $2 }'| cut -d "/" -f 1)
+H0IP6=$(host $HOSTNAME | grep IPv6 | awk '{print $NF}')
+
# Export variables for allow & deny
EXPORT_ALLOW="/$V0 $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
EXPORT_ALLOW_SLASH="/$V0/ $H0(sec=sys,rw,anonuid=0) @ngtop(sec=sys,rw,anonuid=0)"
@@ -37,6 +40,10 @@ function build_dirs () {
mkdir -p $B0/b{0,1,2}/L1/L2/L3
}
+function export_allow_this_host_ipv6 () {
+ printf "$EXPORT_ALLOW6\n" > ${NFSDIR}/exports
+}
+
function export_allow_this_host () {
printf "$EXPORT_ALLOW\n" > ${NFSDIR}/exports
}
@@ -150,10 +157,7 @@ setup_cluster
TEST $CLI vol set $V0 nfs.disable off
TEST $CLI vol start $V0
-# Get NFS state directory
-NFSDIR=$( $CLI volume get patchy nfs.mount-rmtab | \
- awk '/^nfs.mount-rmtab/{print $2}' | \
- xargs dirname )
+NFSDIR=/var/lib/glusterd/nfs
## Wait for volume to register with rpc.mountd
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
@@ -186,6 +190,11 @@ EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available
## Mount NFS
EXPECT "Y" check_mount_success $V0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
+
+## Mount NFS using the IPv6 export
+export_allow_this_host_ipv6
+EXPECT "Y" check_mount_success $V0
## Disallow host
TEST export_deny_this_host
diff --git a/tests/basic/write-behind.t b/tests/basic/write-behind.t
new file mode 100644
index 00000000000..edad59786af
--- /dev/null
+++ b/tests/basic/write-behind.t
@@ -0,0 +1,53 @@
+#!/bin/bash
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function clear_stats {
+ > /var/lib/glusterfs/stats/glusterfs_d_backends_${V0}0.dump
+}
+
+function got_expected_write_count {
+ expected_size=$1
+ expected_value=$2
+ grep aggr.write_${expected_size} "/var/lib/glusterd/stats/glusterfsd__d_backends_${V0}0.dump" | grep $expected_value
+ if [ $? == 0 ]; then
+ echo "Y";
+ else
+ echo "N";
+ fi
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+
+# These are needed for our tracking of write sizes
+TEST $CLI volume set $V0 diagnostics.latency-measurement on
+TEST $CLI volume set $V0 diagnostics.count-fop-hits on
+TEST $CLI volume set $V0 diagnostics.stats-dump-interval 2
+
+# Disable this in testing to get deterministic results
+TEST $CLI volume set $V0 performance.write-behind-trickling-writes off
+
+TEST $CLI volume start $V0
+
+sleep 2;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+# Write a 100MB file with a window-size 1MB, we should get 100 writes of 1MB each
+TEST dd if=/dev/zero of=$M0/100mb_file bs=1M count=100
+EXPECT_WITHIN 5 "Y" got_expected_write_count "1mb" 100
+
+TEST $CLI volume set $V0 performance.write-behind-window-size 512KB
+
+# Write a 100MB file with a window-size 512KB, we should get 200 writes of 512KB each
+TEST dd if=/dev/zero of=$M0/100mb_file_2 bs=1M count=100
+EXPECT_WITHIN 5 "Y" got_expected_write_count "512kb" 200
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1099890.t b/tests/bugs/distribute/bug-1099890.t
index 40f70d4938b..29ceccf2309 100644
--- a/tests/bugs/distribute/bug-1099890.t
+++ b/tests/bugs/distribute/bug-1099890.t
@@ -44,6 +44,8 @@ TEST $CLI volume set $V0 features.quota-deem-statfs on
TEST $CLI volume quota $V0 limit-usage / 150MB;
+TEST $CLI volume set $V0 cluster.du-refresh-interval-sec 1
+
TEST $CLI volume set $V0 cluster.min-free-disk 50%
TEST glusterfs -s $H0 --volfile-id=$V0 $M0
diff --git a/tests/bugs/distribute/bug-1161311.t b/tests/bugs/distribute/bug-1161311.t
index c5a7f041ac8..8cf905a8f0b 100755
--- a/tests/bugs/distribute/bug-1161311.t
+++ b/tests/bugs/distribute/bug-1161311.t
@@ -53,8 +53,14 @@ TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST mkdir $M0/dir1
TEST mkdir -p $M0/dir2/dir3
-# Create a large file (1GB), so that rebalance takes time
-dd if=/dev/urandom of=$M0/dir1/FILE2 bs=64k count=10240
+# Create a large file (6.4 GB), so that rebalance takes time
+# Reading from /dev/urandom is slow, so we'll cat it together
+dd if=/dev/urandom of=/tmp/FILE2 bs=64k count=10240
+for i in {1..10}; do
+ cat /tmp/FILE2 >> $M0/dir1/FILE2
+done
+
+#dd if=/dev/urandom of=$M0/dir1/FILE2 bs=64k count=10240
# Rename the file to create a linkto, for rebalance to
# act on the file
diff --git a/tests/bugs/fuse/bug-858488-min-free-disk.t b/tests/bugs/fuse/bug-858488-min-free-disk.t
index 635dc04d1e6..ab636575d3f 100644
--- a/tests/bugs/fuse/bug-858488-min-free-disk.t
+++ b/tests/bugs/fuse/bug-858488-min-free-disk.t
@@ -23,6 +23,7 @@ TEST MOUNT_LOOP $LO2 $B0/${V0}2
## Lets create volume
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+TEST $CLI volume set $V0 cluster.du-refresh-interval-sec 1
## Verify volume is created
EXPECT "$V0" volinfo_field $V0 'Volume Name';
diff --git a/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t b/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
index 9fc7ac3b845..3bc80ab9dab 100644
--- a/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
+++ b/tests/bugs/glusterd/bug-1163108-min-free-disk-option-validation.t
@@ -1,6 +1,6 @@
#!/bin/bash
-## Test case for cluster.min-free-disk option validation.
+## Test case for cluster.cluster.min-free-disk option validation.
. $(dirname $0)/../../include.rc
@@ -17,21 +17,21 @@ TEST $CLI volume create $V0 $H0:$B0/brick1 $H0:$B0/brick2
TEST $CLI volume start $V0
## Setting invalid value for option cluster.min-free-disk should fail
-TEST ! $CLI volume set $V0 min-free-disk ""
-TEST ! $CLI volume set $V0 min-free-disk 143.!/12
-TEST ! $CLI volume set $V0 min-free-disk 123%
-TEST ! $CLI volume set $V0 min-free-disk 194.34%
+TEST ! $CLI volume set $V0 cluster.min-free-disk ""
+TEST ! $CLI volume set $V0 cluster.min-free-disk 143.!/12
+TEST ! $CLI volume set $V0 cluster.min-free-disk 123%
+TEST ! $CLI volume set $V0 cluster.min-free-disk 194.34%
## Setting fractional value as a size (unit is byte) for option
## cluster.min-free-disk should fail
-TEST ! $CLI volume set $V0 min-free-disk 199.051
-TEST ! $CLI volume set $V0 min-free-disk 111.999
+TEST ! $CLI volume set $V0 cluster.min-free-disk 199.051
+TEST ! $CLI volume set $V0 cluster.min-free-disk 111.999
## Setting valid value for option cluster.min-free-disk should pass
-TEST $CLI volume set $V0 min-free-disk 12%
-TEST $CLI volume set $V0 min-free-disk 56.7%
-TEST $CLI volume set $V0 min-free-disk 120
-TEST $CLI volume set $V0 min-free-disk 369.0000
+TEST $CLI volume set $V0 cluster.min-free-disk 12%
+TEST $CLI volume set $V0 cluster.min-free-disk 56.7%
+TEST $CLI volume set $V0 cluster.min-free-disk 120
+TEST $CLI volume set $V0 cluster.min-free-disk 369.0000
cleanup;
diff --git a/tests/bugs/glusterd/bug-859927.t b/tests/bugs/glusterd/bug-859927.t
index c30d2b852d4..1b9ca18c08a 100755
--- a/tests/bugs/glusterd/bug-859927.t
+++ b/tests/bugs/glusterd/bug-859927.t
@@ -44,12 +44,12 @@ TEST ! $CLI volume set $V0 min-free-inodes " "
TEST $CLI volume set $V0 min-free-inodes 60%
EXPECT "60%" volume_option $V0 cluster.min-free-inodes
-TEST ! $CLI volume set $V0 min-free-disk ""
-TEST ! $CLI volume set $V0 min-free-disk " "
-TEST $CLI volume set $V0 min-free-disk 60%
+TEST ! $CLI volume set $V0 cluster.min-free-disk ""
+TEST ! $CLI volume set $V0 cluster.min-free-disk " "
+TEST $CLI volume set $V0 cluster.min-free-disk 60%
EXPECT "60%" volume_option $V0 cluster.min-free-disk
-TEST $CLI volume set $V0 min-free-disk 120
+TEST $CLI volume set $V0 cluster.min-free-disk 120
EXPECT "120" volume_option $V0 cluster.min-free-disk
TEST ! $CLI volume set $V0 frame-timeout ""
diff --git a/tests/bugs/quota/bug-1292020.t b/tests/bugs/quota/bug-1292020.t
index 14b311c9d76..f713c74859b 100644
--- a/tests/bugs/quota/bug-1292020.t
+++ b/tests/bugs/quota/bug-1292020.t
@@ -4,10 +4,12 @@
. $(dirname $0)/../../volume.rc
function write_sample_data () {
- dd if=/dev/zero of=$M0/f1 bs=256k count=400 2>&1 | grep -i exceeded
+ dd if=/dev/zero of=$M0/f1 bs=256k count=400 2>&1 |
+ egrep -i 'exceeded|no space' && echo 'passed'
}
cleanup;
+rm -f /tmp/kbv.log
TEST glusterd;
TEST pidof glusterd;
@@ -18,7 +20,8 @@ TEST $CLI volume quota $V0 enable;
TEST $CLI volume quota $V0 limit-usage / 1
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
-EXPECT "exceeded" write_sample_data
+
+EXPECT "passed" write_sample_data
TEST $CLI volume stop $V0
TEST $CLI volume delete $V0
diff --git a/tests/cluster.rc b/tests/cluster.rc
index 467bbcb06e1..42547f09e37 100644
--- a/tests/cluster.rc
+++ b/tests/cluster.rc
@@ -46,17 +46,18 @@ function define_glusterds() {
bopt="management.transport.socket.bind-address=${!h}";
popt="--pid-file=${!b}/glusterd.pid";
sopt="management.glusterd-sockfile=${!b}/glusterd/gd.sock"
+ aopt="*.transport.address-family=inet"
#Get the logdir
logdir=`gluster --print-logdir`
#Fetch the testcases name and prefix the glusterd log with it
logfile=`echo ${0##*/}`_glusterd$i.log
lopt="--log-file=$logdir/$logfile"
if [ "$2" == "-LDEBUG" ]; then
- eval "glusterd_$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'";
- eval "glusterd$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'";
+ eval "glusterd_$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt --xlator-option $aopt $lopt $popt'";
+ eval "glusterd$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt --xlator-option $aopt $lopt $popt'";
else
- eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'";
- eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt $lopt $popt'";
+ eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt --xlator-option $aopt $lopt $popt'";
+ eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $sopt --xlator-option $aopt $lopt $popt'";
fi
done
}
diff --git a/tests/configfiles/exports-v6 b/tests/configfiles/exports-v6
new file mode 100644
index 00000000000..426b1ef5705
--- /dev/null
+++ b/tests/configfiles/exports-v6
@@ -0,0 +1 @@
+/test @test(rw,anonuid=0,sec=sys,) 2401:db00:11:1:face:0:3d:0(rw,anonuid=0,sec=sys,)
diff --git a/tests/env.rc.in b/tests/env.rc.in
index 82971c4a8de..87befc3711d 100644
--- a/tests/env.rc.in
+++ b/tests/env.rc.in
@@ -28,3 +28,6 @@ export PYTHON
PYTHONPATH=@BUILD_PYTHON_SITE_PACKAGES@:$PYTHON_PATH
export PYTHONPATH
+
+TESTER_CFLAGS="@TESTER_CFLAGS@"
+export TESTER_CFLAGS
diff --git a/tests/features/brick-min-free-space.t b/tests/features/brick-min-free-space.t
new file mode 100755
index 00000000000..4372998681f
--- /dev/null
+++ b/tests/features/brick-min-free-space.t
@@ -0,0 +1,113 @@
+#!/bin/bash
+#
+# Test storage.min-free-disk option works.
+#
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST truncate -s 16M $B0/brick0
+TEST LOOPDEV=$(losetup --find --show $B0/brick0)
+TEST mkfs.xfs $LOOPDEV
+
+mkdir -p $B0/$V0
+
+TEST mount -t xfs $LOOPDEV $B0/$V0
+
+###########
+# AIO on #
+###########
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 readdir-ahead on
+TEST $CLI vol set $V0 storage.linux-aio on
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Filesystem has ~12MB capacity after XFS and glusterfs overhead.
+# A 16MB write should blow up.
+TEST ! dd if=/dev/zero of=$M0/test bs=1M count=16 oflag=direct
+TEST rm $M0/test
+
+# But we should be able to write 10MB
+TEST dd if=/dev/zero of=$M0/test bs=1M count=10 oflag=direct
+
+# Now enable limit and set to at least 8MB free space
+TEST $CLI volume set $V0 storage.freespace-check-interval 1
+TEST $CLI volume set $V0 storage.min-free-disk 8388608
+
+# Now even a tiny write ought fail.
+TEST ! dd if=/dev/zero of=$M0/test1 bs=1M count=1 oflag=direct
+TEST rm $M0/test1
+
+# Repeat using percent syntax.
+TEST $CLI volume set $V0 storage.min-free-disk 33%
+
+TEST ! dd if=/dev/zero of=$M0/test1 bs=4K count=1 oflag=direct
+TEST rm $M0/test1
+
+# Disable limit.
+TEST $CLI volume set $V0 storage.freespace-check-interval 0
+
+# Now we can write again.
+TEST dd if=/dev/zero of=$M0/test1 bs=4K count=1 oflag=direct
+
+TEST rm $M0/test1
+TEST rm $M0/test
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+############
+# AIO off #
+############
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 readdir-ahead on
+TEST $CLI vol set $V0 storage.linux-aio off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Filesystem has ~12MB capacity after XFS and glusterfs overhead.
+# A 16MB write should blow up.
+TEST ! dd if=/dev/zero of=$M0/test bs=1M count=16 oflag=direct
+TEST rm $M0/test
+
+# But we should be able to write 10MB
+TEST dd if=/dev/zero of=$M0/test bs=1M count=10 oflag=direct
+
+# Now enable limit and set to at least 8MB free space
+TEST $CLI volume set $V0 storage.freespace-check-interval 1
+TEST $CLI volume set $V0 storage.min-free-disk 8388608
+
+# Now even a tiny write ought fail.
+TEST ! dd if=/dev/zero of=$M0/test1 bs=1M count=1 oflag=direct
+TEST rm $M0/test1
+
+# Repeat using percent syntax.
+TEST $CLI volume set $V0 storage.min-free-disk 33%
+
+TEST ! dd if=/dev/zero of=$M0/test1 bs=4K count=1 oflag=direct
+TEST rm $M0/test1
+
+# Disable limit.
+TEST $CLI volume set $V0 storage.freespace-check-interval 0
+
+# Now we can write again.
+TEST dd if=/dev/zero of=$M0/test1 bs=4K count=1 oflag=direct
+
+TEST rm $M0/test1
+TEST rm $M0/test
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/features/lock_revocation.t b/tests/features/lock_revocation.t
new file mode 100644
index 00000000000..cbf21b71650
--- /dev/null
+++ b/tests/features/lock_revocation.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+logdir=$(gluster --print-logdir)
+BRICK_LOGFILES="$logdir/bricks/d-backends-brick?.log"
+rm -f $BRICK_LOGFILES &> /dev/null
+
+# Test that lock revocation works
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+cleanup;
+
+function deadlock_fop() {
+ local MNT=$1
+ for i in {1..1000}; do
+ dd if=/dev/zero of=$MNT/testfile bs=1k count=10 &> /dev/null
+ if grep "MONKEY LOCKING" $BRICK_LOGFILES &> /dev/null; then
+ break
+ fi
+ done
+}
+
+function monkey_unlock() {
+ grep "MONKEY LOCKING" $BRICK_LOGFILES &> /dev/null && echo SUCCESS
+ return 0
+}
+
+function append_to_file() {
+ local FILE_PATH=$1
+ echo "hello" >> $FILE_PATH
+ return 0
+}
+
+#Init
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 features.locks-monkey-unlocking on
+TEST $CLI volume set $V0 features.locks-revocation-secs 2
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 -s $H0 $M0;
+TEST $GFS --volfile-id=$V0 -s $H0 $M1;
+
+# Deadlock writes to a file using monkey unlocking
+deadlock_fop $M0 &
+EXPECT_WITHIN 60 "SUCCESS" monkey_unlock
+
+# Sleep > unlock timeout and attempt to write to the file
+sleep 3
+TEST append_to_file $M1/testfile
+
+cleanup
diff --git a/tests/halo.rc b/tests/halo.rc
new file mode 100644
index 00000000000..4cb7c81da85
--- /dev/null
+++ b/tests/halo.rc
@@ -0,0 +1,52 @@
+# Return the current Halo state of a given child (by index, i.e. 0
+# is first child).
+function halo_child_state {
+ grep "Child $1 .*halo state: " /var/log/glusterfs/$M0LOG |
+ tail -n1 | sed 's/^.* halo state: //' | sed 's/ .*$//'
+}
+
+# Return number of Halo children which are in a given state.
+# First parameter is total # children.
+# Second parameter is state to match (e.g. "UP").
+function halo_children_in_state {
+ local CHILD_COUNT=$1
+ local SUM=0
+ for CHILD in $(seq 0 $((CHILD_COUNT-1))); do
+ if [ x"$(halo_child_state $CHILD)" == x"$2" ]; then
+ SUM=$((SUM+1))
+ fi
+ done
+ echo $SUM
+}
+
+# Return number of up halo children,
+# First parameter is total # children,
+function halo_children_up {
+ echo $(halo_children_in_state $1 "UP")
+}
+
+# Return number of down halo children,
+# First parameter is total # children,
+function halo_children_down {
+ echo $(halo_children_in_state $1 "DOWN")
+}
+
+# Return number of up & down halo children.
+# First parameter is total number of children.
+function halo_sum_child_states {
+ local CHILD_COUNT=$1
+
+ local UP=0
+ local DOWN=0
+
+ for CHILD in $(seq 0 $((CHILD_COUNT-1))); do
+ local STATE=$(halo_child_state $CHILD)
+ if [ x"$STATE" == x"UP" ]; then
+ UP=$((UP+1))
+ elif [ x"$STATE" == x"DOWN" ]; then
+ DOWN=$((DOWN+1))
+ fi
+ done
+
+ echo "$UP $DOWN"
+}
diff --git a/tests/include.rc b/tests/include.rc
index 492e35a7b6c..9f32e88f5f5 100644
--- a/tests/include.rc
+++ b/tests/include.rc
@@ -19,6 +19,8 @@ META_MNT=${META_MNT:=/var/run/gluster/shared_storage}; # Mount point of shared g
CC=cc
OSTYPE=$(uname -s)
+M0LOG=${M0LOG:="mnt-glusterfs-0.log"}; # Log file for 0th FUSE mount point
+
ENV_RC=$(dirname $0)/../env.rc
if [ ! -f $ENV_RC ]; then
ENV_RC=$(dirname $0)/../../env.rc
@@ -612,6 +614,7 @@ function build_tester ()
then
cflags="$cflags $(pkg-config glusterfs-api --cflags-only-I --libs-only-L)"
fi
+ cflags="$cflags ${TESTER_CFLAGS}"
$CC -g -o $(dirname $cfile)/$execname $cfile $cflags
}