summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/00-geo-rep/00-georep-verify-non-root-setup.t294
-rw-r--r--tests/00-geo-rep/00-georep-verify-setup.t16
-rw-r--r--tests/00-geo-rep/01-georep-glusterd-tests.t213
-rw-r--r--tests/00-geo-rep/bug-1708603.t63
-rw-r--r--tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t15
-rw-r--r--tests/00-geo-rep/georep-basic-dr-rsync.t41
-rw-r--r--tests/00-geo-rep/georep-basic-dr-tarssh-arbiter.t11
-rw-r--r--tests/00-geo-rep/georep-basic-dr-tarssh.t11
-rw-r--r--tests/00-geo-rep/georep-basic-rsync-ec.t224
-rw-r--r--tests/00-geo-rep/georep-basic-tarssh-ec.t223
-rw-r--r--tests/00-geo-rep/georep-config-upgrade.t132
-rw-r--r--tests/00-geo-rep/georep-stderr-hang.t128
-rw-r--r--tests/00-geo-rep/georep-upgrade.t79
-rw-r--r--tests/00-geo-rep/gsyncd.conf.old47
-rw-r--r--tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t (renamed from tests/basic/afr/split-brain-favorite-child-policy.t)11
-rw-r--r--tests/000-flaky/basic_changelog_changelog-snapshot.t60
-rw-r--r--tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t (renamed from tests/basic/distribute/rebal-all-nodes-migrate.t)8
-rw-r--r--tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t50
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/basic_mount-nfs-auth.t (renamed from tests/basic/mount-nfs-auth.t)0
-rw-r--r--tests/000-flaky/bugs_core_multiplex-limit-issue-151.t (renamed from tests/bugs/core/multiplex-limit-issue-151.t)6
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/bugs_distribute_bug-1117851.t (renamed from tests/bugs/distribute/bug-1117851.t)4
-rw-r--r--tests/000-flaky/bugs_distribute_bug-1122443.t (renamed from tests/bugs/distribute/bug-1122443.t)17
-rw-r--r--tests/000-flaky/bugs_glusterd_bug-857330/common.rc (renamed from tests/bugs/glusterd/bug-857330/common.rc)2
-rwxr-xr-xtests/000-flaky/bugs_glusterd_bug-857330/normal.t (renamed from tests/bugs/glusterd/bug-857330/normal.t)4
-rwxr-xr-xtests/000-flaky/bugs_glusterd_bug-857330/xml.t (renamed from tests/bugs/glusterd/bug-857330/xml.t)4
-rw-r--r--[-rwxr-xr-x]tests/000-flaky/bugs_glusterd_quorum-value-check.t (renamed from tests/bugs/glusterd/quorum-value-check.t)6
-rw-r--r--tests/000-flaky/bugs_nfs_bug-1116503.t (renamed from tests/bugs/nfs/bug-1116503.t)6
-rw-r--r--tests/000-flaky/features_lock-migration_lkmigration-set-option.t (renamed from tests/features/lock-migration/lkmigration-set-option.t)4
-rw-r--r--tests/afr.rc16
-rw-r--r--tests/basic/afr/afr-anon-inode-no-quorum.t63
-rw-r--r--tests/basic/afr/afr-anon-inode.t114
-rw-r--r--tests/basic/afr/afr-seek.t55
-rw-r--r--tests/basic/afr/durability-off.t2
-rw-r--r--tests/basic/afr/entry-self-heal-anon-dir-off.t459
-rw-r--r--tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t9
-rw-r--r--tests/basic/afr/gfid-self-heal.t16
-rw-r--r--tests/basic/afr/halo.t61
-rw-r--r--tests/basic/afr/rename-data-loss.t72
-rw-r--r--tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t124
-rw-r--r--tests/basic/afr/split-brain-heal-info.t2
-rw-r--r--tests/basic/afr/split-brain-healing-ctime.t252
-rw-r--r--tests/basic/afr/split-brain-healing.t37
-rw-r--r--tests/basic/afr/split-brain-resolution.t12
-rw-r--r--tests/basic/afr/ta-read.t16
-rw-r--r--tests/basic/afr/ta-shd.t6
-rw-r--r--tests/basic/afr/ta-write-on-bad-brick.t14
-rw-r--r--tests/basic/changelog/changelog-api.t37
-rw-r--r--tests/basic/changelog/changelog-history.t13
-rw-r--r--tests/basic/changelog/history-api.t42
-rw-r--r--tests/basic/cloudsync-sanity.t7
-rw-r--r--tests/basic/ctime/ctime-ec-heal.t70
-rw-r--r--tests/basic/ctime/ctime-ec-rebalance.t43
-rw-r--r--tests/basic/ctime/ctime-mdata-legacy-files.t83
-rw-r--r--tests/basic/ctime/ctime-readdir.c29
-rw-r--r--tests/basic/ctime/ctime-readdir.t50
-rw-r--r--tests/basic/ctime/ctime-rep-heal.t70
-rw-r--r--tests/basic/ctime/ctime-rep-rebalance.t41
-rw-r--r--tests/basic/ctime/ctime-utimesat.t28
-rw-r--r--tests/basic/distribute/brick-down.t83
-rw-r--r--tests/basic/distribute/dir-heal.t145
-rw-r--r--tests/basic/distribute/file-rename.t1021
-rw-r--r--tests/basic/distribute/spare_file_rebalance.t51
-rw-r--r--tests/basic/ec/ec-1468261.t18
-rw-r--r--tests/basic/ec/ec-badfd.c124
-rwxr-xr-xtests/basic/ec/ec-badfd.t26
-rw-r--r--tests/basic/ec/ec-cpu-extensions.t3
-rw-r--r--tests/basic/ec/ec-dirty-flags.t23
-rw-r--r--tests/basic/ec/ec-fix-openfd.t2
-rw-r--r--tests/basic/ec/ec-quorum-count.t167
-rw-r--r--tests/basic/ec/ec-read-mask.t114
-rw-r--r--tests/basic/ec/ec-reset-brick.t50
-rw-r--r--tests/basic/ec/ec-root-heal.t3
-rw-r--r--tests/basic/ec/ec-seek.t3
-rw-r--r--tests/basic/ec/ec-stripe.t2
-rw-r--r--tests/basic/ec/gfapi-ec-open-truncate.c171
-rw-r--r--tests/basic/ec/gfapi-ec-open-truncate.t48
-rw-r--r--tests/basic/ec/self-heal-read-write-fail.t69
-rw-r--r--tests/basic/ec/self-heal.t2
-rw-r--r--tests/basic/fencing/afr-lock-heal-advanced.c227
-rw-r--r--tests/basic/fencing/afr-lock-heal-advanced.t115
-rw-r--r--tests/basic/fencing/afr-lock-heal-basic.c182
-rw-r--r--tests/basic/fencing/afr-lock-heal-basic.t102
-rwxr-xr-xtests/basic/first-test.t10
-rw-r--r--tests/basic/fops-sanity.c1
-rw-r--r--tests/basic/fuse/active-io-graph-switch.t65
-rw-r--r--tests/basic/gfapi/bug-1507896.c49
-rw-r--r--tests/basic/gfapi/bug-1507896.t33
-rw-r--r--tests/basic/gfapi/gfapi-async-calls-test.c358
-rw-r--r--tests/basic/gfapi/gfapi-copy-file-range.t16
-rw-r--r--tests/basic/gfapi/gfapi-graph-switch-open-fd.t44
-rw-r--r--tests/basic/gfapi/gfapi-keep-writing.c129
-rw-r--r--tests/basic/gfapi/gfapi-ssl-load-volfile-test.c127
-rwxr-xr-xtests/basic/gfapi/gfapi-ssl-load-volfile-test.t76
-rw-r--r--tests/basic/gfapi/glfs_h_creat_open.c118
-rwxr-xr-xtests/basic/gfapi/glfs_h_creat_open.t27
-rw-r--r--tests/basic/gfapi/glfsxmp-coverage.c1900
-rw-r--r--tests/basic/gfapi/glfsxmp.t30
-rw-r--r--tests/basic/gfapi/protocol-client-ssl.vol.in15
-rw-r--r--tests/basic/global-threading.t104
-rw-r--r--tests/basic/glusterd-restart-shd-mux.t96
-rw-r--r--tests/basic/glusterd/arbiter-volume.t29
-rw-r--r--tests/basic/glusterd/check-cloudsync-ancestry.t48
-rw-r--r--tests/basic/glusterd/disperse-create.t4
-rw-r--r--tests/basic/glusterd/heald.t49
-rw-r--r--tests/basic/glusterd/thin-arbiter-volume-probe.t25
-rw-r--r--tests/basic/glusterd/thin-arbiter-volume.t45
-rw-r--r--tests/basic/glusterd/volfile_server_switch.t2
-rw-r--r--tests/basic/glusterd/volume-brick-count.t61
-rw-r--r--tests/basic/graph-cleanup-brick-down-shd-mux.t64
-rw-r--r--tests/basic/metadisp/fsyncdir.c29
-rw-r--r--tests/basic/metadisp/ftruncate.c34
-rw-r--r--tests/basic/metadisp/fxattr.c107
-rw-r--r--tests/basic/metadisp/gfs-fsetxattr.c141
-rw-r--r--tests/basic/metadisp/metadisp.t316
-rw-r--r--tests/basic/metadisp/metadisp.vol14
-rwxr-xr-xtests/basic/mount.t3
-rw-r--r--tests/basic/multiple-volume-shd-mux.t46
-rwxr-xr-xtests/basic/nl-cache.t30
-rw-r--r--tests/basic/open-behind/open-behind.t183
-rw-r--r--tests/basic/open-behind/tester-fd.c99
-rw-r--r--tests/basic/open-behind/tester.c444
-rw-r--r--tests/basic/open-behind/tester.h145
-rwxr-xr-xtests/basic/playground/template-xlator-sanity.t18
-rw-r--r--tests/basic/posix/shared-statfs.t11
-rw-r--r--tests/basic/posix/zero-fill-enospace.c7
-rw-r--r--tests/basic/quick-read-with-upcall.t13
-rwxr-xr-xtests/basic/rpc-coverage.sh14
-rwxr-xr-xtests/basic/rpc-coverage.t4
-rw-r--r--tests/basic/sdfs-sanity.t6
-rw-r--r--tests/basic/seek.c (renamed from tests/basic/ec/seek.c)0
-rw-r--r--tests/basic/shd-mux-afr.t70
-rw-r--r--tests/basic/shd-mux-ec.t75
-rwxr-xr-xtests/basic/trace.t55
-rw-r--r--tests/basic/uss.t34
-rw-r--r--tests/basic/volume-scale-shd-mux.t116
-rw-r--r--tests/basic/volume-snap-scheduler.t49
-rwxr-xr-xtests/basic/volume-snapshot-xml.t6
-rw-r--r--tests/basic/volume-status.t20
-rw-r--r--[-rwxr-xr-x]tests/basic/volume.t36
-rw-r--r--tests/bitrot/br-signer-threads-config-1797869.t73
-rw-r--r--tests/bitrot/bug-1373520.t3
-rw-r--r--tests/bitrot/bug-1700078.t87
-rw-r--r--tests/bugs/bitrot/bug-1227996.t1
-rw-r--r--tests/bugs/bitrot/bug-1245981.t4
-rwxr-xr-xtests/bugs/bug-1064147.t72
-rw-r--r--tests/bugs/bug-1371806.t1
-rw-r--r--tests/bugs/bug-1371806_acl.t6
-rw-r--r--tests/bugs/bug-1620580.t67
-rw-r--r--tests/bugs/bug-1694920.t63
-rw-r--r--tests/bugs/bug-1702299.t67
-rwxr-xr-xtests/bugs/cli/bug-1320388.t2
-rw-r--r--tests/bugs/cli/bug-1378842-volume-get-all.t3
-rw-r--r--tests/bugs/cli/bug-983317-volume-get.t13
-rwxr-xr-xtests/bugs/core/bug-1402841.t-mt-dir-scan-race.t9
-rw-r--r--tests/bugs/core/bug-1650403.t3
-rw-r--r--tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t33
-rwxr-xr-xtests/bugs/ctime/issue-832.t32
-rwxr-xr-xtests/bugs/distribute/bug-1161311.t27
-rw-r--r--tests/bugs/distribute/bug-1193636.t4
-rw-r--r--tests/bugs/distribute/bug-1600379.t54
-rwxr-xr-xtests/bugs/distribute/bug-1786679.t69
-rwxr-xr-xtests/bugs/distribute/issue-1327.t33
-rwxr-xr-xtests/bugs/distribute/overlap.py2
-rw-r--r--tests/bugs/ec/bug-1236065.t1
-rw-r--r--tests/bugs/ec/bug-1699866-check-reopen-fd.t34
-rw-r--r--tests/bugs/ec/bug-1708156-honor-inodelk-contention-notify-on-partial-locks.t54
-rwxr-xr-xtests/bugs/fuse/bug-858215.t4
-rw-r--r--tests/bugs/fuse/bug-985074.t5
-rwxr-xr-xtests/bugs/fuse/many-groups-for-acl.t13
-rw-r--r--tests/bugs/gfapi/bug-1319374.c1
-rw-r--r--tests/bugs/gfapi/bug-1447266/bug-1447266.t2
-rw-r--r--tests/bugs/glusterd/brick-mux-validation-in-cluster.t59
-rw-r--r--tests/bugs/glusterd/brick-mux-validation.t4
-rw-r--r--tests/bugs/glusterd/brick-mux.t2
-rw-r--r--tests/bugs/glusterd/brick-order-check-add-brick.t61
-rw-r--r--tests/bugs/glusterd/bug-1595320.t2
-rw-r--r--tests/bugs/glusterd/bug-1696046.t113
-rw-r--r--tests/bugs/glusterd/bug-1699339.t73
-rw-r--r--tests/bugs/glusterd/bug-1720566.t50
-rw-r--r--tests/bugs/glusterd/check_elastic_server.t63
-rw-r--r--tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t18
-rw-r--r--tests/bugs/glusterd/optimized-basic-testcases.t27
-rw-r--r--tests/bugs/glusterd/quorum-validation.t6
-rw-r--r--tests/bugs/glusterd/rebalance-in-cluster.t9
-rw-r--r--tests/bugs/glusterd/rebalance-operations-in-single-node.t4
-rw-r--r--tests/bugs/glusterd/remove-brick-validation.t (renamed from tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t)14
-rw-r--r--tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t1
-rw-r--r--tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t8
-rw-r--r--tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t54
-rw-r--r--tests/bugs/glusterd/validating-options-for-replicated-volume.t13
-rwxr-xr-xtests/bugs/glusterfs-server/bug-852147.t2
-rw-r--r--tests/bugs/glusterfs-server/bug-873549.t2
-rwxr-xr-xtests/bugs/glusterfs-server/bug-877992.t4
-rwxr-xr-xtests/bugs/glusterfs-server/bug-887145.t14
-rwxr-xr-xtests/bugs/glusterfs/bug-844688.t43
-rw-r--r--tests/bugs/glusterfs/bug-873962-spb.t1
-rwxr-xr-xtests/bugs/glusterfs/bug-902610.t2
-rwxr-xr-xtests/bugs/logging/bug-823081.t8
-rw-r--r--tests/bugs/md-cache/bug-1726205.t22
-rw-r--r--tests/bugs/nfs/showmount-many-clients.t2
-rwxr-xr-xtests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t8
-rw-r--r--tests/bugs/posix/bug-1651445.t54
-rw-r--r--tests/bugs/protocol/bug-1321578.t53
-rw-r--r--tests/bugs/protocol/bug-1390914.t36
-rw-r--r--tests/bugs/protocol/bug-1433815-auth-allow.t1
-rwxr-xr-xtests/bugs/readdir-ahead/bug-1436090.t12
-rwxr-xr-xtests/bugs/replicate/bug-1046624.t3
-rw-r--r--tests/bugs/replicate/bug-1101647.t2
-rw-r--r--tests/bugs/replicate/bug-1130892.t13
-rw-r--r--tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t2
-rw-r--r--tests/bugs/replicate/bug-1180545.t35
-rw-r--r--tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t10
-rw-r--r--tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t18
-rw-r--r--tests/bugs/replicate/bug-1468279-source-not-blaming-sinks.t64
-rw-r--r--tests/bugs/replicate/bug-1493415-gfid-heal.t10
-rw-r--r--tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t2
-rw-r--r--tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t38
-rwxr-xr-xtests/bugs/replicate/bug-1696599-io-hang.t47
-rw-r--r--tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t136
-rw-r--r--tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t116
-rw-r--r--tests/bugs/replicate/bug-1728770-pass-xattrs.t52
-rw-r--r--tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t102
-rw-r--r--tests/bugs/replicate/bug-1744548-heal-timeout.t47
-rw-r--r--tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t89
-rw-r--r--tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t111
-rw-r--r--tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t74
-rw-r--r--tests/bugs/replicate/bug-1801624-entry-heal.t58
-rw-r--r--tests/bugs/replicate/bug-880898.t7
-rwxr-xr-xtests/bugs/replicate/bug-977797.t4
-rw-r--r--tests/bugs/replicate/issue-1254-prioritize-enospc.t80
-rw-r--r--tests/bugs/replicate/mdata-heal-no-xattrs.t59
-rw-r--r--tests/bugs/replicate/ta-inode-refresh-read.t40
-rwxr-xr-xtests/bugs/rpc/bug-954057.t10
-rw-r--r--tests/bugs/shard/bug-1272986.t6
-rw-r--r--tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t34
-rw-r--r--tests/bugs/shard/bug-1696136.c122
-rw-r--r--tests/bugs/shard/bug-1696136.t33
-rw-r--r--tests/bugs/shard/bug-1705884.t32
-rw-r--r--tests/bugs/shard/bug-1738419.t29
-rw-r--r--tests/bugs/shard/bug-shard-discard.c5
-rw-r--r--tests/bugs/shard/issue-1243.t43
-rw-r--r--tests/bugs/shard/issue-1281.t34
-rw-r--r--tests/bugs/shard/issue-1425.t45
-rw-r--r--tests/bugs/shard/shard-fallocate.c7
-rw-r--r--tests/bugs/snapshot/bug-1109889.t4
-rwxr-xr-xtests/bugs/snapshot/bug-1111041.t10
-rw-r--r--tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t43
-rw-r--r--tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t1
-rw-r--r--tests/bugs/snapshot/bug-1597662.t3
-rwxr-xr-xtests/bugs/transport/bug-873367.t2
-rw-r--r--tests/bugs/write-behind/issue-884.c267
-rwxr-xr-xtests/bugs/write-behind/issue-884.t40
-rw-r--r--tests/cluster.rc41
-rw-r--r--tests/ec.rc9
-rwxr-xr-xtests/encryption/crypt.t90
-rw-r--r--tests/encryption/frag.c324
-rw-r--r--tests/env.rc.in2
-rwxr-xr-xtests/features/delay-gen.t12
-rw-r--r--tests/features/flock_interrupt.t9
-rw-r--r--tests/features/fuse-lru-limit.t1
-rw-r--r--tests/features/interrupt.t14
-rwxr-xr-xtests/features/ssl-authz.t26
-rw-r--r--tests/features/ssl-ciphers.t85
-rw-r--r--tests/features/subdir-mount.t11
-rwxr-xr-xtests/features/trash.t74
-rwxr-xr-xtests/features/worm.t39
-rw-r--r--tests/geo-rep.rc104
-rw-r--r--tests/glusterfind/glusterfind-basic.t84
-rw-r--r--tests/include.rc71
-rw-r--r--tests/line-coverage/afr-heal-info.t43
-rwxr-xr-xtests/line-coverage/arbiter-coverage.t32
-rw-r--r--tests/line-coverage/cli-peer-and-volume-operations.t135
-rw-r--r--tests/line-coverage/cli-volume-top-profile-coverage.t62
-rwxr-xr-xtests/line-coverage/errorgen-coverage.t42
-rw-r--r--tests/line-coverage/log-and-brick-ops-negative-case.t82
-rwxr-xr-xtests/line-coverage/meta-max-coverage.t33
-rw-r--r--tests/line-coverage/namespace-linecoverage.t39
-rwxr-xr-xtests/line-coverage/old-protocol.t37
-rwxr-xr-xtests/line-coverage/quiesce-coverage.t44
-rw-r--r--tests/line-coverage/shard-coverage.t33
-rw-r--r--tests/line-coverage/some-features-in-libglusterfs.t67
-rw-r--r--tests/line-coverage/volfile-with-all-graph-syntax.t73
-rw-r--r--tests/ssl.rc2
-rw-r--r--tests/thin-arbiter.rc45
-rw-r--r--tests/tier.rc159
-rw-r--r--tests/utils/changelog/changelog.h6
-rw-r--r--tests/utils/changelog/test-changelog-api.c98
-rw-r--r--tests/utils/changelog/test-history-api.c111
-rw-r--r--tests/utils/changelogparser.py5
-rwxr-xr-xtests/utils/create-files.py9
-rw-r--r--tests/utils/get-mdata-xattr.c152
-rwxr-xr-xtests/utils/gfid-access.py62
-rw-r--r--tests/utils/libcxattr.py22
-rw-r--r--tests/utils/py2py3.py186
-rw-r--r--tests/volume.rc171
295 files changed, 17120 insertions, 1225 deletions
diff --git a/tests/00-geo-rep/00-georep-verify-non-root-setup.t b/tests/00-geo-rep/00-georep-verify-non-root-setup.t
new file mode 100644
index 00000000000..a55fd3e5e6a
--- /dev/null
+++ b/tests/00-geo-rep/00-georep-verify-non-root-setup.t
@@ -0,0 +1,294 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=600
+
+### Basic Non-root geo-rep setup test with Distribute Replicate volumes
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+
+##Variables
+GEOREP_CLI="$CLI volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+num_active=2
+num_passive=2
+master_mnt=$M0
+slave_mnt=$M1
+
+##User and group to be used for non-root geo-rep setup
+usr="nroot"
+grp="ggroup"
+
+slave_url=$usr@$slave
+slave_vol=$GSV0
+ssh_url=$usr@$SH0
+
+############################################################
+#SETUP VOLUMES AND VARIABLES
+
+##create_and_start_master_volume
+TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+TEST $CLI volume start $GMV0
+
+##create_and_start_slave_volume
+TEST $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
+TEST $CLI volume start $GSV0
+
+##Mount master
+#TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+#TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+
+##########################################################
+#TEST FUNCTIONS
+
+function distribute_key_non_root()
+{
+ ${GLUSTER_LIBEXECDIR}/set_geo_rep_pem_keys.sh $usr $master $slave_vol
+ echo $?
+}
+
+
+function check_status_non_root()
+{
+ local search_key=$1
+ $GEOREP_CLI $master $slave_url status | grep -F "$search_key" | wc -l
+}
+
+
+function check_and_clean_group()
+{
+ if [ $(getent group $grp) ]
+ then
+ groupdel $grp;
+ echo $?
+ else
+ echo 0
+ fi
+}
+
+function clean_lock_files()
+{
+ if [ ! -f /etc/passwd.lock ];
+ then
+ rm -rf /etc/passwd.lock;
+ fi
+
+ if [ ! -f /etc/group.lock ];
+ then
+ rm -rf /etc/group.lock;
+ fi
+
+ if [ ! -f /etc/shadow.lock ];
+ then
+ rm -rf /etc/shadow.lock;
+ fi
+
+ if [ ! -f /etc/gshadow.lock ];
+ then
+ rm -rf /etc/gshadow.lock;
+ fi
+}
+
+
+###########################################################
+#SETUP NON-ROOT GEO REPLICATION
+
+##Create ggroup group
+##First test if group exists and then create new one
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_and_clean_group
+
+##cleanup *.lock files
+
+clean_lock_files
+
+TEST /usr/sbin/groupadd $grp
+
+clean_lock_files
+##Del if exists and create non-root user and assign it to newly created group
+userdel -r -f $usr
+TEST /usr/sbin/useradd -G $grp $usr
+
+##Modify password for non-root user to have control over distributing ssh-key
+echo "$usr:pass" | chpasswd
+
+##Set up mountbroker root
+TEST gluster-mountbroker setup /var/mountbroker-root $grp
+
+##Associate volume and non-root user to the mountbroker
+TEST gluster-mountbroker add $slave_vol $usr
+
+##Check ssh setting for clear text passwords
+sed '/^PasswordAuthentication /{s/no/yes/}' -i /etc/ssh/sshd_config && grep '^PasswordAuthentication ' /etc/ssh/sshd_config && service sshd restart
+
+
+##Restart glusterd to reflect mountbroker changages
+TEST killall_gluster;
+TEST glusterd;
+TEST pidof glusterd;
+
+##Create, start and mount meta_volume
+TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
+TEST $CLI volume start $META_VOL
+TEST mkdir -p $META_MNT
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "3" brick_count ${META_VOL}
+TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
+
+##Mount master
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "4" brick_count $GMV0
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "4" brick_count $GSV0
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+## Check status of mount-broker
+TEST gluster-mountbroker status
+
+
+##Setup password-less ssh for non-root user
+#sshpass -p "pass" ssh-copy-id -i ~/.ssh/id_rsa.pub $ssh_url
+##Run ssh agent
+eval "$(ssh-agent -s)"
+PASS="pass"
+
+
+##Create a temp script to echo the SSH password, used by SSH_ASKPASS
+
+SSH_ASKPASS_SCRIPT=/tmp/ssh-askpass-script
+cat > ${SSH_ASKPASS_SCRIPT} <<EOL
+#!/bin/bash
+echo "${PASS}"
+EOL
+chmod u+x ${SSH_ASKPASS_SCRIPT}
+
+##set no display, necessary for ssh to use with setsid and SSH_ASKPASS
+export DISPLAY
+
+export SSH_ASKPASS=${SSH_ASKPASS_SCRIPT}
+
+DISPLAY=: setsid ssh-copy-id -o 'PreferredAuthentications=password' -o 'StrictHostKeyChecking=no' -i ~/.ssh/id_rsa.pub $ssh_url
+
+##Setting up PATH for gluster binaries in case of source installation
+##ssh -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $ssh_url "echo "export PATH=$PATH:/usr/local/sbin" >> ~/.bashrc"
+
+##Creating secret pem pub file
+TEST gluster-georep-sshkey generate
+
+##Create geo-rep non-root setup
+
+TEST $GEOREP_CLI $master $slave_url create push-pem
+
+#check for session creation
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_non_root "Created"
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave_url config gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave_url config slave-gluster-command-dir ${GLUSTER_CMD_DIR}
+
+## Test for key distribution
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 distribute_key_non_root
+
+##Wait for common secret pem file to be created
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file
+
+#Enable_metavolume
+TEST $GEOREP_CLI $master $slave config use_meta_volume true
+
+#Start_georep
+TEST $GEOREP_CLI $master $slave_url start
+
+## Meta volume is enabled so looking for 2 Active and 2 Passive sessions
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_non_root "Active"
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_non_root "Passive"
+
+#Pause geo-replication session
+TEST $GEOREP_CLI $master $slave_url pause
+
+#Resume geo-replication session
+TEST $GEOREP_CLI $master $slave_url resume
+
+#Validate failure of volume stop when geo-rep is running
+TEST ! $CLI volume stop $GMV0
+
+#Negative test for ssh-port
+#Port should be integer and between 1-65535 range
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port -22
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port abc
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port 6875943
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port 4.5
+
+TEST ! $GEOREP_CLI $master $slave_url config ssh-port 22a
+
+#Config Set ssh-port to validate int validation
+TEST $GEOREP_CLI $master $slave config ssh-port 22
+
+#Hybrid directory rename test BZ#1763439
+
+TEST $GEOREP_CLI $master $slave_url config change_detector xsync
+#verify master and slave mount
+
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT "^1$" check_mounted ${master_mnt}
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT "^1$" check_mounted ${slave_mnt}
+
+#Create test data for hybrid crawl
+TEST mkdir ${master_mnt}/dir1
+TEST mkdir ${master_mnt}/dir1/dir2
+TEST mkdir ${master_mnt}/dir1/dir3
+TEST mkdir ${master_mnt}/hybrid_d1
+
+mv ${master_mnt}/hybrid_d1 ${master_mnt}/hybrid_rn_d1
+mv ${master_mnt}/dir1/dir2 ${master_mnt}/rn_dir2
+mv ${master_mnt}/dir1/dir3 ${master_mnt}/dir1/rn_dir3
+
+#Verify hybrid crawl data on slave
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/hybrid_rn_d1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/rn_dir2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/dir1/rn_dir3
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave_url stop
+
+#Delete Geo-rep
+TEST $GEOREP_CLI $master $slave_url delete
+
+#Cleanup authorized_keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' /home/$usr/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' /home/$usr/.ssh/authorized_keys
+
+#clear mountbroker
+gluster-mountbroker remove --user $usr
+gluster-mountbroker remove --volume $slave_vol
+
+#delete group and user created for non-root setup
+TEST userdel -r -f $usr
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_and_clean_group
+
+##password script cleanup
+rm -rf /tmp/ssh-askpass-script
+
+
+cleanup;
+
diff --git a/tests/00-geo-rep/00-georep-verify-setup.t b/tests/00-geo-rep/00-georep-verify-setup.t
index 8cbf203a207..0d46c04102d 100644
--- a/tests/00-geo-rep/00-georep-verify-setup.t
+++ b/tests/00-geo-rep/00-georep-verify-setup.t
@@ -5,7 +5,8 @@
. $(dirname $0)/../geo-rep.rc
. $(dirname $0)/../env.rc
-SCRIPT_TIMEOUT=300
+SCRIPT_TIMEOUT=400
+GEO_REP_TIMEOUT=200
##Cleanup and start glusterd
cleanup;
@@ -51,6 +52,10 @@ TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
#BASIC GEO-REPLICATION TESTS
############################################################
+#Test invalid slave url
+TEST ! $GEOREP_CLI $master ${SH0}:${GSV0} create push-pem
+TEST ! $GEOREP_CLI $master ${SH0}:::${GSV0} create push-pem
+
#Create geo-rep session
TEST create_georep_session $master $slave
@@ -82,6 +87,15 @@ TEST pidof glusterd
TEST $CLI get-state detail
TEST pidof glusterd
+#Pause geo-replication session
+TEST $GEOREP_CLI $master $slave pause
+
+#Resume geo-replication session
+TEST $GEOREP_CLI $master $slave resume
+
+#Validate failure of volume stop when geo-rep is running
+TEST ! $CLI volume stop $GMV0
+
#Stop Geo-rep
TEST $GEOREP_CLI $master $slave stop
diff --git a/tests/00-geo-rep/01-georep-glusterd-tests.t b/tests/00-geo-rep/01-georep-glusterd-tests.t
new file mode 100644
index 00000000000..47d5116af26
--- /dev/null
+++ b/tests/00-geo-rep/01-georep-glusterd-tests.t
@@ -0,0 +1,213 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=300
+
+#Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+
+#Variables
+GEOREP_CLI="$CLI volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+slave1=root@${SH0}::${GSV1}
+num_active=2
+num_passive=2
+master_mnt=$M0
+slave_mnt=$M1
+
+############################################################
+#SETUP VOLUMES AND GEO-REPLICATION
+############################################################
+
+#create_and_start_master_volume
+TEST $CLI volume create $GMV0 replica 3 $H0:$B0/${GMV0}{1,2,3};
+
+#Negative testase: Create geo-rep session, master is not started
+TEST ! $GEOREP_CLI $master $slave create push-pem
+
+TEST $CLI volume start $GMV0
+
+#create_and_start_slave_volume
+TEST $CLI volume create $GSV0 replica 3 $H0:$B0/${GSV0}{1,2,3};
+
+#Negative testcase: Create geo-rep session, slave is not started
+TEST ! $GEOREP_CLI $master $slave create push-pem
+
+TEST $CLI volume start $GSV0
+
+#create_and_start_slave1_volume
+TEST $CLI volume create $GSV1 replica 3 $H0:$B0/${GSV1}{1,2,3};
+TEST $CLI volume start $GSV1
+
+#Create, start and mount meta_volume
+TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
+TEST $CLI volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
+
+#Mount master
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+#Mount slave
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+############################################################
+#BASIC GEO-REPLICATION GLUSTERD TESTS WITH FANOUT SETUP
+############################################################
+
+#Negative testcase: Test invalid master
+TEST ! $GEOREP_CLI master1 ${SH0}::${GSV0} create push-pem
+
+#Negatvie testcase: Test invalid slave
+TEST ! $GEOREP_CLI $master ${SH0}::slave3 create push-pem
+
+##------------------- Session 1 Creation Begin-----------------##
+#Create geo-rep session
+TEST create_georep_session $master $slave
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config slave-gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Enable_metavolume
+TEST $GEOREP_CLI $master $slave config use_meta_volume true
+##------------------- Session 1 Creation End-----------------##
+
+##------------------- Session 2 Creation Begin-----------------##
+#Create geo-rep session2
+TEST $GEOREP_CLI $master $slave1 create ssh-port 22 no-verify
+
+#Config gluster-command-dir for session2
+TEST $GEOREP_CLI $master $slave1 config gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Config gluster-command-dir for session2
+TEST $GEOREP_CLI $master $slave1 config slave-gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Enable_metavolume for session2
+TEST $GEOREP_CLI $master $slave1 config use_meta_volume true
+##------------------- Session 2 Creation End-----------------##
+
+#Wait for common secret pem file to be created
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file
+
+#Verify the keys are distributed
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_keys_distributed
+
+#Start_georep
+TEST $GEOREP_CLI $master $slave start
+
+#check geo-rep status without specifying master and slave volumes
+TEST $GEOREP_CLI status
+
+#Start_georep force
+TEST $GEOREP_CLI $master $slave1 start force
+
+#Negative testcase: Create the same session after start, fails
+#With root@ prefix
+TEST ! $GEOREP_CLI $master $slave1 create push-pem
+#Without root@ prefix
+TEST ! $GEOREP_CLI $master ${SH0}::${GSV1} create push-pem
+TEST $GEOREP_CLI $master $slave1 create push-pem force
+
+##------------------- Fanout status testcases Begin --------------##
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Passive"
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_fanout_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_fanout_status_num_rows "Passive"
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_fanout_status_detail_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_fanout_status_detail_num_rows "Passive"
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_all_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_all_status_num_rows "Passive"
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_all_status_detail_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_all_status_detail_num_rows "Passive"
+
+##------------------- Fanout status testcases End --------------##
+
+##------Checkpoint Testcase Begin---------------##
+#Write I/O
+echo "test data" > $M0/file1
+TEST $GEOREP_CLI $master $slave config checkpoint now
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_checkpoint_met $master $slave
+touch $M0
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 verify_checkpoint_met $master $slave
+##------Checkpoint Testcase End---------------##
+
+##------------------ Geo-rep config testcases Begin--------------------##
+TEST $GEOREP_CLI $master $slave config
+TEST ! $GEOREP_CLI $master $slave config arsync-options '-W'
+TEST $GEOREP_CLI $master $slave config rsync-options '-W'
+TEST $GEOREP_CLI $master $slave config rsync-options
+TEST $GEOREP_CLI $master $slave config \!rsync-options
+TEST $GEOREP_CLI $master $slave config sync-xattrs false
+##------------------ Geo-rep config testcases End --------------------##
+
+##---------------- Pause/Resume testcase Begin-------------##
+#Negative testcase: Resume geo-replication session when not paused
+TEST ! $GEOREP_CLI $master $slave1 resume
+TEST $GEOREP_CLI $master $slave1 resume force
+
+#Pause geo-replication session with root@
+TEST $GEOREP_CLI $master $slave1 pause force
+
+#Resume geo-replication session with root@
+TEST $GEOREP_CLI $master $slave1 resume force
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave1 stop force
+
+#Negative testcase: Resume geo-replication session after geo-rep stop
+TEST ! $GEOREP_CLI $master $slave1 resume
+##---------------- Pause/Resume testcase End-------------##
+
+##-----------------glusterd slave key/value upgrade testcase Begin ---------##
+#Upgrade test of slave key stored in glusterd info file
+src=$(grep slave2 /var/lib/glusterd/vols/$master/info)
+#Remove slave uuuid (last part after divided by : )
+dst=${src%:*}
+
+#Update glusterd info file with old slave format
+sed -i "s|$src|$dst|g" /var/lib/glusterd/vols/$master/info
+TEST ! grep $src /var/lib/glusterd/vols/$master/info
+
+#Restart glusterd to update in-memory volinfo
+TEST pkill glusterd
+TEST glusterd;
+TEST pidof glusterd
+
+#Start geo-rep and validate slave format is updated
+TEST $GEOREP_CLI $master $slave1 start force
+TEST grep $src /var/lib/glusterd/vols/$master/info
+##-----------------glusted slave key/value upgrade testcase End ---------##
+
+#Negative testcase: Delete Geo-rep 2 fails as geo-rep is running
+TEST ! $GEOREP_CLI $master $slave1 delete
+
+#Stop and Delete Geo-rep 2
+TEST $GEOREP_CLI $master $slave1 stop force
+TEST $GEOREP_CLI $master $slave1 delete reset-sync-time
+
+#Stop and Delete Geo-rep 1
+TEST $GEOREP_CLI $master $slave stop
+TEST $GEOREP_CLI $master $slave delete
+
+#Cleanup authorized keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/00-geo-rep/bug-1708603.t b/tests/00-geo-rep/bug-1708603.t
new file mode 100644
index 00000000000..26913f1d318
--- /dev/null
+++ b/tests/00-geo-rep/bug-1708603.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=300
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+
+##Variables
+GEOREP_CLI="gluster volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+num_active=2
+num_passive=2
+master_mnt=$M0
+slave_mnt=$M1
+
+############################################################
+#SETUP VOLUMES AND GEO-REPLICATION
+############################################################
+
+##create_and_start_master_volume
+TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+TEST $CLI volume start $GMV0
+
+##create_and_start_slave_volume
+TEST $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
+TEST $CLI volume start $GSV0
+
+##Mount master
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+#Create geo-rep session
+TEST create_georep_session $master $slave
+
+echo n | $GEOREP_CLI $master $slave config ignore-deletes true >/dev/null 2>&1
+EXPECT "false" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+echo y | $GEOREP_CLI $master $slave config ignore-deletes true
+EXPECT "true" echo $($GEOREP_CLI $master $slave config ignore-deletes)
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Delete Geo-rep
+TEST $GEOREP_CLI $master $slave delete
+
+#Cleanup authorized keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t b/tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t
index 67ac1678e42..c45d2ff62ce 100644
--- a/tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t
+++ b/tests/00-geo-rep/georep-basic-dr-rsync-arbiter.t
@@ -159,7 +159,7 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/changelog_chown_f1
#logrotate
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/logrotate
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
#CREATE+RENAME
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 create_rename_ok ${slave_mnt}/create_rename_test_file
@@ -203,8 +203,19 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rsnapshot_data ${slave_mnt}
TEST gluster volume geo-rep $master $slave config rsync-options "--whole-file"
TEST "echo sampledata > $master_mnt/rsync_option_test_file"
+#rename with existing destination case BUG:1694820
+TEST create_rename_with_existing_destination ${master_mnt}
+#verify rename with existing destination case BUG:1694820
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rename_with_existing_destination ${slave_mnt}
+
#Verify arequal for whole volume
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 arequal_checksum ${master_mnt} ${slave_mnt}
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt}
+
+#Pause geo-replication session
+TEST $GEOREP_CLI $master $slave pause force
+
+#Resume geo-replication session
+TEST $GEOREP_CLI $master $slave resume force
#Stop Geo-rep
TEST $GEOREP_CLI $master $slave stop
diff --git a/tests/00-geo-rep/georep-basic-dr-rsync.t b/tests/00-geo-rep/georep-basic-dr-rsync.t
index 4a03930efe2..d785aa59fc9 100644
--- a/tests/00-geo-rep/georep-basic-dr-rsync.t
+++ b/tests/00-geo-rep/georep-basic-dr-rsync.t
@@ -71,6 +71,22 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Created"
#Config gluster-command-dir
TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR}
+#Negative test for ssh-port
+#Port should be integer and between 1-65535 range
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port -22
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port abc
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port 6875943
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port 4.5
+
+TEST ! $GEOREP_CLI $master $slave config ssh-port 22a
+
+#Config Set ssh-port to validate int validation
+TEST $GEOREP_CLI $master $slave config ssh-port 22
+
#Config gluster-command-dir
TEST $GEOREP_CLI $master $slave config slave-gluster-command-dir ${GLUSTER_CMD_DIR}
@@ -110,6 +126,7 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/hybrid_chown_f1
#Check History Crawl.
TEST $GEOREP_CLI $master $slave stop
TEST create_data "history"
+TEST create_rename_symlink_case
TEST $GEOREP_CLI $master $slave start
EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Passive"
@@ -159,7 +176,7 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/changelog_chown_f1
#logrotate
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/logrotate
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
#CREATE+RENAME
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 create_rename_ok ${slave_mnt}/create_rename_test_file
@@ -203,9 +220,27 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rsnapshot_data ${slave_mnt}
TEST gluster volume geo-rep $master $slave config rsync-options "--whole-file"
TEST "echo sampledata > $master_mnt/rsync_option_test_file"
-#Verify arequal for whole volume
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 arequal_checksum ${master_mnt} ${slave_mnt}
+#rename with existing destination case BUG:1694820
+TEST create_rename_with_existing_destination ${master_mnt}
+#verify rename with existing destination case BUG:1694820
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rename_with_existing_destination ${slave_mnt}
+#Verify arequal for whole volume
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt}
+
+#Test config upgrade BUG: 1707731
+config_file=$GLUSTERD_WORKDIR/geo-replication/${GMV0}_${SH0}_${GSV0}/gsyncd.conf
+cat >> $config_file<<EOL
+[peers ${GMV0} ${GSV0}]
+use_tarssh = true
+timeout = 1
+EOL
+TEST $GEOREP_CLI $master $slave stop
+TEST $GEOREP_CLI $master $slave start
+#verify that the config file is updated
+EXPECT "1" echo $(grep -Fc "vars" $config_file)
+EXPECT "1" echo $(grep -Fc "sync-method = tarssh" $config_file)
+EXPECT "1" echo $(grep -Fc "slave-timeout = 1" $config_file)
#Stop Geo-rep
TEST $GEOREP_CLI $master $slave stop
diff --git a/tests/00-geo-rep/georep-basic-dr-tarssh-arbiter.t b/tests/00-geo-rep/georep-basic-dr-tarssh-arbiter.t
index 1726d0bb0d1..8fed929ffca 100644
--- a/tests/00-geo-rep/georep-basic-dr-tarssh-arbiter.t
+++ b/tests/00-geo-rep/georep-basic-dr-tarssh-arbiter.t
@@ -81,7 +81,7 @@ TEST $GEOREP_CLI $master $slave config use_meta_volume true
TEST $CLI volume set $GMV0 changelog.rollover-time 3
#Config tarssh as sync-engine
-TEST $GEOREP_CLI $master $slave config use_tarssh true
+TEST $GEOREP_CLI $master $slave config sync-method tarssh
#Wait for common secret pem file to be created
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file
@@ -162,7 +162,7 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/changelog_chown_f1
#logrotate
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/logrotate
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
#CREATE+RENAME
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 create_rename_ok ${slave_mnt}/create_rename_test_file
@@ -202,8 +202,13 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_symlink_rename_mkdir_data ${slave_mnt}/s
#rsnapshot usecase
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rsnapshot_data ${slave_mnt}
+#rename with existing destination case BUG:1694820
+TEST create_rename_with_existing_destination ${master_mnt}
+#verify rename with existing destination case BUG:1694820
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rename_with_existing_destination ${slave_mnt}
+
#Verify arequal for whole volume
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 arequal_checksum ${master_mnt} ${slave_mnt}
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt}
#Stop Geo-rep
TEST $GEOREP_CLI $master $slave stop
diff --git a/tests/00-geo-rep/georep-basic-dr-tarssh.t b/tests/00-geo-rep/georep-basic-dr-tarssh.t
index c5d16ac86b2..feb2de74c90 100644
--- a/tests/00-geo-rep/georep-basic-dr-tarssh.t
+++ b/tests/00-geo-rep/georep-basic-dr-tarssh.t
@@ -81,7 +81,7 @@ TEST $GEOREP_CLI $master $slave config use_meta_volume true
TEST $CLI volume set $GMV0 changelog.rollover-time 3
#Config tarssh as sync-engine
-TEST $GEOREP_CLI $master $slave config use_tarssh true
+TEST $GEOREP_CLI $master $slave config sync-method tarssh
#Wait for common secret pem file to be created
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file
@@ -162,7 +162,7 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/changelog_chown_f1
#logrotate
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/logrotate
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
#CREATE+RENAME
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 create_rename_ok ${slave_mnt}/create_rename_test_file
@@ -202,8 +202,13 @@ EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_hardlink_rename_data ${slave_mnt}
#rsnapshot usecase
EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rsnapshot_data ${slave_mnt}
+#rename with existing destination case BUG:1694820
+TEST create_rename_with_existing_destination ${master_mnt}
+#verify rename with existing destination case BUG:1694820
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rename_with_existing_destination ${slave_mnt}
+
#Verify arequal for whole volume
-EXPECT_WITHIN $GEO_REP_TIMEOUT 0 arequal_checksum ${master_mnt} ${slave_mnt}
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt}
#Stop Geo-rep
TEST $GEOREP_CLI $master $slave stop
diff --git a/tests/00-geo-rep/georep-basic-rsync-ec.t b/tests/00-geo-rep/georep-basic-rsync-ec.t
new file mode 100644
index 00000000000..dd1f94edbc9
--- /dev/null
+++ b/tests/00-geo-rep/georep-basic-rsync-ec.t
@@ -0,0 +1,224 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=500
+
+AREQUAL_PATH=$(dirname $0)/../utils
+test "`uname -s`" != "Linux" && {
+ CFLAGS="$CFLAGS -lintl";
+}
+build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS
+
+### Basic Tests with Distributed Disperse volumes
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+
+##Variables
+GEOREP_CLI="$CLI volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+num_active=2
+num_passive=10
+master_mnt=$M0
+slave_mnt=$M1
+
+############################################################
+#SETUP VOLUMES AND GEO-REPLICATION
+############################################################
+
+##create_and_start_master_volume
+TEST $CLI volume create $GMV0 disperse 3 redundancy 1 $H0:$B0/${GMV0}{0..5};
+TEST $CLI volume start $GMV0
+
+##create_and_start_slave_volume
+TEST $CLI volume create $GSV0 disperse 3 redundancy 1 $H0:$B0/${GSV0}{0..5};
+TEST $CLI volume start $GSV0
+
+##Create, start and mount meta_volume
+TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
+TEST $CLI volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
+
+##Mount master
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+############################################################
+#BASIC GEO-REPLICATION TESTS
+############################################################
+
+#Check Hybrid Crawl
+TEST create_data "hybrid"
+TEST create_georep_session $master $slave
+EXPECT_WITHIN $GEO_REP_TIMEOUT 6 check_status_num_rows "Created"
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config slave-gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Enable_metavolume
+TEST $GEOREP_CLI $master $slave config use_meta_volume true
+
+#Set changelog roll-over time to 3 secs
+TEST $CLI volume set $GMV0 changelog.rollover-time 3
+
+#Wait for common secret pem file to be created
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file
+
+#Verify the keys are distributed
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_keys_distributed
+
+#Verify "features.read-only" Option
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_slave_read_only $GSV0
+
+#Start_georep
+TEST $GEOREP_CLI $master $slave start
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Passive"
+
+#data_tests "hybrid"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 regular_file_ok ${slave_mnt}/hybrid_f1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/hybrid_d1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_file_ok ${slave_mnt}/hybrid_f3 ${slave_mnt}/hybrid_f4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_dir_ok ${slave_mnt}/hybrid_d3 ${slave_mnt}/hybrid_d4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 symlink_ok hybrid_f1 ${slave_mnt}/hybrid_sl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_file_ok ${slave_mnt}/hybrid_f1 ${slave_mnt}/hybrid_hl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/hybrid_f2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/hybrid_d2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 data_ok ${slave_mnt}/hybrid_f1 "HelloWorld!"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/hybrid_chown_f1
+
+#Check History Crawl.
+TEST $GEOREP_CLI $master $slave stop
+TEST create_data "history"
+TEST $GEOREP_CLI $master $slave start
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Passive"
+
+#data_tests "history"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 regular_file_ok ${slave_mnt}/history_f1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/history_d1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_file_ok ${slave_mnt}/history_f3 ${slave_mnt}/history_f4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_dir_ok ${slave_mnt}/history_d3 ${slave_mnt}/history_d4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 symlink_ok history_f1 ${slave_mnt}/history_sl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_file_ok ${slave_mnt}/history_f1 ${slave_mnt}/history_hl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/history_f2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/history_d2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 data_ok ${slave_mnt}/history_f1 "HelloWorld!"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/history_chown_f1
+
+#Check Changelog Crawl.
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Changelog Crawl"
+TEST create_data "changelog"
+
+# logrotate test
+logrotate_file=${master_mnt}/logrotate/lg_test_file
+TEST mkdir -p ${master_mnt}/logrotate
+logrotate_simulate $logrotate_file 2
+logrotate_simulate $logrotate_file 2
+logrotate_simulate $logrotate_file 2
+logrotate_simulate $logrotate_file 2
+
+# CREATE + RENAME
+create_rename ${master_mnt}/rename_test_file
+
+# hard-link rename
+hardlink_rename ${master_mnt}/hardlink_rename_test_file
+
+#SYNC CHECK
+#data_tests "changelog"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 regular_file_ok ${slave_mnt}/changelog_f1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/changelog_d1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_file_ok ${slave_mnt}/changelog_f3 ${slave_mnt}/changelog_f4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_dir_ok ${slave_mnt}/changelog_d3 ${slave_mnt}/changelog_d4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 symlink_ok changelog_f1 ${slave_mnt}/changelog_sl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_file_ok ${slave_mnt}/changelog_f1 ${slave_mnt}/changelog_hl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/changelog_f2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/changelog_d2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 data_ok ${slave_mnt}/changelog_f1 "HelloWorld!"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/changelog_chown_f1
+
+#logrotate
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/logrotate
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
+
+#CREATE+RENAME
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 create_rename_ok ${slave_mnt}/create_rename_test_file
+
+#hardlink rename
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_rename_ok ${slave_mnt}/hardlink_rename_test_file
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Symlink testcase: Rename symlink and create dir with same name
+TEST create_symlink_rename_mkdir_data
+
+#hardlink-rename-unlink usecase. Sonatype Nexus3 Usecase. BUG:1512483
+TEST create_hardlink_rename_data
+
+#rsnapshot usecase
+#TEST create_rsnapshot_data
+
+#Start Geo-rep
+TEST $GEOREP_CLI $master $slave start
+
+#Wait for geo-rep to come up
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Passive"
+
+#Check for hardlink rename case. BUG: 1296174
+#It should not create src file again on changelog reprocessing
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_rename_ok ${slave_mnt}/hardlink_rename_test_file
+
+#Symlink testcase: Rename symlink and create dir with same name
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_symlink_rename_mkdir_data ${slave_mnt}/symlink_test1
+
+#hardlink-rename-unlink usecase. Sonatype Nexus3 Usecase. BUG:1512483
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_hardlink_rename_data ${slave_mnt}
+
+#rsnapshot usecase
+#EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rsnapshot_data ${slave_mnt}
+
+#Test rsync-options set BUG:1629561
+TEST gluster volume geo-rep $master $slave config rsync-options "--whole-file"
+TEST "echo sampledata > $master_mnt/rsync_option_test_file"
+
+#rename with existing destination case BUG:1694820
+#TEST create_rename_with_existing_destination ${master_mnt}
+#verify rename with existing destination case BUG:1694820
+#EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rename_with_existing_destination ${slave_mnt}
+
+#Verify arequal for whole volume
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt}
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Delete Geo-rep
+TEST $GEOREP_CLI $master $slave delete
+
+#Cleanup are-equal binary
+TEST rm $AREQUAL_PATH/arequal-checksum
+
+#Cleanup authorized keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/00-geo-rep/georep-basic-tarssh-ec.t b/tests/00-geo-rep/georep-basic-tarssh-ec.t
new file mode 100644
index 00000000000..987bd9391c8
--- /dev/null
+++ b/tests/00-geo-rep/georep-basic-tarssh-ec.t
@@ -0,0 +1,223 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=500
+
+AREQUAL_PATH=$(dirname $0)/../utils
+test "`uname -s`" != "Linux" && {
+ CFLAGS="$CFLAGS -lintl";
+}
+build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS
+
+### Basic Tests with Distributed Disperse volumes
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+
+##Variables
+GEOREP_CLI="$CLI volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+num_active=2
+num_passive=10
+master_mnt=$M0
+slave_mnt=$M1
+
+############################################################
+#SETUP VOLUMES AND GEO-REPLICATION
+############################################################
+
+##create_and_start_master_volume
+TEST $CLI volume create $GMV0 disperse 3 redundancy 1 $H0:$B0/${GMV0}{0..5};
+TEST $CLI volume start $GMV0
+
+##create_and_start_slave_volume
+TEST $CLI volume create $GSV0 disperse 3 redundancy 1 $H0:$B0/${GSV0}{0..5};
+TEST $CLI volume start $GSV0
+
+##Create, start and mount meta_volume
+TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
+TEST $CLI volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
+
+##Mount master
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+############################################################
+#BASIC GEO-REPLICATION TESTS
+############################################################
+
+#Check Hybrid Crawl
+TEST create_data "hybrid"
+TEST create_georep_session $master $slave
+EXPECT_WITHIN $GEO_REP_TIMEOUT 6 check_status_num_rows "Created"
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config slave-gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Enable_metavolume
+TEST $GEOREP_CLI $master $slave config use_meta_volume true
+
+#Set changelog roll-over time to 3 secs
+TEST $CLI volume set $GMV0 changelog.rollover-time 3
+
+#Config tarssh as sync-engine
+TEST $GEOREP_CLI $master $slave config sync-method tarssh
+
+#Wait for common secret pem file to be created
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file
+
+#Verify the keys are distributed
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_keys_distributed
+
+#Verify "features.read-only" Option
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_slave_read_only $GSV0
+
+#Start_georep
+TEST $GEOREP_CLI $master $slave start
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Passive"
+
+#data_tests "hybrid"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 regular_file_ok ${slave_mnt}/hybrid_f1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/hybrid_d1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_file_ok ${slave_mnt}/hybrid_f3 ${slave_mnt}/hybrid_f4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_dir_ok ${slave_mnt}/hybrid_d3 ${slave_mnt}/hybrid_d4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 symlink_ok hybrid_f1 ${slave_mnt}/hybrid_sl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_file_ok ${slave_mnt}/hybrid_f1 ${slave_mnt}/hybrid_hl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/hybrid_f2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/hybrid_d2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 data_ok ${slave_mnt}/hybrid_f1 "HelloWorld!"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/hybrid_chown_f1
+
+#Check History Crawl.
+TEST $GEOREP_CLI $master $slave stop
+TEST create_data "history"
+TEST $GEOREP_CLI $master $slave start
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Passive"
+
+#data_tests "history"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 regular_file_ok ${slave_mnt}/history_f1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/history_d1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_file_ok ${slave_mnt}/history_f3 ${slave_mnt}/history_f4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_dir_ok ${slave_mnt}/history_d3 ${slave_mnt}/history_d4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 symlink_ok history_f1 ${slave_mnt}/history_sl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_file_ok ${slave_mnt}/history_f1 ${slave_mnt}/history_hl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/history_f2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/history_d2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 data_ok ${slave_mnt}/history_f1 "HelloWorld!"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/history_chown_f1
+
+#Check Changelog Crawl.
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Changelog Crawl"
+TEST create_data "changelog"
+
+# logrotate test
+logrotate_file=${master_mnt}/logrotate/lg_test_file
+TEST mkdir -p ${master_mnt}/logrotate
+logrotate_simulate $logrotate_file 2
+logrotate_simulate $logrotate_file 2
+logrotate_simulate $logrotate_file 2
+logrotate_simulate $logrotate_file 2
+
+# CREATE + RENAME
+create_rename ${master_mnt}/rename_test_file
+
+# hard-link rename
+hardlink_rename ${master_mnt}/hardlink_rename_test_file
+
+#SYNC CHECK
+#data_tests "changelog"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 regular_file_ok ${slave_mnt}/changelog_f1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/changelog_d1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_file_ok ${slave_mnt}/changelog_f3 ${slave_mnt}/changelog_f4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 rename_dir_ok ${slave_mnt}/changelog_d3 ${slave_mnt}/changelog_d4
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 symlink_ok changelog_f1 ${slave_mnt}/changelog_sl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_file_ok ${slave_mnt}/changelog_f1 ${slave_mnt}/changelog_hl1
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/changelog_f2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 unlink_ok ${slave_mnt}/changelog_d2
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 data_ok ${slave_mnt}/changelog_f1 "HelloWorld!"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 chown_file_ok ${slave_mnt}/changelog_chown_f1
+
+#logrotate
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 directory_ok ${slave_mnt}/logrotate
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt}/logrotate ${slave_mnt}/logrotate
+
+#CREATE+RENAME
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 create_rename_ok ${slave_mnt}/create_rename_test_file
+
+#hardlink rename
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_rename_ok ${slave_mnt}/hardlink_rename_test_file
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Symlink testcase: Rename symlink and create dir with same name
+TEST create_symlink_rename_mkdir_data
+
+#hardlink-rename-unlink usecase. Sonatype Nexus3 Usecase. BUG:1512483
+TEST create_hardlink_rename_data
+
+#rsnapshot usecase
+#TEST create_rsnapshot_data
+
+#Start Geo-rep
+TEST $GEOREP_CLI $master $slave start
+
+#Wait for geo-rep to come up
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 4 check_status_num_rows "Passive"
+
+#Check for hardlink rename case. BUG: 1296174
+#It should not create src file again on changelog reprocessing
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 hardlink_rename_ok ${slave_mnt}/hardlink_rename_test_file
+
+#Symlink testcase: Rename symlink and create dir with same name
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_symlink_rename_mkdir_data ${slave_mnt}/symlink_test1
+
+#hardlink-rename-unlink usecase. Sonatype Nexus3 Usecase. BUG:1512483
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_hardlink_rename_data ${slave_mnt}
+
+#rsnapshot usecase
+#EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rsnapshot_data ${slave_mnt}
+
+#rename with existing destination case BUG:1694820
+#TEST create_rename_with_existing_destination ${master_mnt}
+#verify rename with existing destination case BUG:1694820
+#EXPECT_WITHIN $GEO_REP_TIMEOUT 0 verify_rename_with_existing_destination ${slave_mnt}
+
+#Verify arequal for whole volume
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt}
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Delete Geo-rep
+TEST $GEOREP_CLI $master $slave delete
+
+#Cleanup are-equal binary
+TEST rm $AREQUAL_PATH/arequal-checksum
+
+#Cleanup authorized keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/00-geo-rep/georep-config-upgrade.t b/tests/00-geo-rep/georep-config-upgrade.t
new file mode 100644
index 00000000000..557461cd9c4
--- /dev/null
+++ b/tests/00-geo-rep/georep-config-upgrade.t
@@ -0,0 +1,132 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=300
+OLD_CONFIG_PATH=$(dirname $0)/gsyncd.conf.old
+WORKING_DIR=/var/lib/glusterd/geo-replication/master_127.0.0.1_slave
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+##Variables
+GEOREP_CLI="$CLI volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+num_active=2
+num_passive=2
+master_mnt=$M0
+slave_mnt=$M1
+
+############################################################
+#SETUP VOLUMES AND GEO-REPLICATION
+############################################################
+
+##create_and_start_master_volume
+TEST $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+TEST $CLI volume start $GMV0
+
+##create_and_start_slave_volume
+TEST $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
+TEST $CLI volume start $GSV0
+
+##Create, start and mount meta_volume
+TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
+TEST $CLI volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
+
+##Mount master
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+############################################################
+#BASIC GEO-REPLICATION TESTS
+############################################################
+
+#Create geo-rep session
+TEST create_georep_session $master $slave
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config slave-gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Enable_metavolume
+TEST $GEOREP_CLI $master $slave config use_meta_volume true
+
+#Wait for common secret pem file to be created
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file
+
+#Verify the keys are distributed
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_keys_distributed
+
+#Start_georep
+TEST $GEOREP_CLI $master $slave start
+
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 2 check_status_num_rows "Passive"
+
+TEST $GEOREP_CLI $master $slave config sync-method tarssh
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Copy old config file
+mv -f $WORKING_DIR/gsyncd.conf $WORKING_DIR/gsyncd.conf.org
+cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf
+
+#Check if config get all updates config_file
+TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf
+TEST $GEOREP_CLI $master $slave config
+TEST grep "sync-method" $WORKING_DIR/gsyncd.conf
+
+#Check if config get updates config_file
+rm -f $WORKING_DIR/gsyncd.conf
+cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf
+TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf
+TEST $GEOREP_CLI $master $slave config sync-method
+TEST grep "sync-method" $WORKING_DIR/gsyncd.conf
+
+#Check if config set updates config_file
+rm -f $WORKING_DIR/gsyncd.conf
+cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf
+TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf
+TEST $GEOREP_CLI $master $slave config sync-xattrs false
+TEST grep "sync-method" $WORKING_DIR/gsyncd.conf
+
+#Check if config reset updates config_file
+rm -f $WORKING_DIR/gsyncd.conf
+cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf
+TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf
+TEST $GEOREP_CLI $master $slave config \!sync-xattrs
+TEST grep "sync-method" $WORKING_DIR/gsyncd.conf
+
+#Check if geo-rep start updates config_file
+rm -f $WORKING_DIR/gsyncd.conf
+cp -p $OLD_CONFIG_PATH $WORKING_DIR/gsyncd.conf
+TEST ! grep "sync-method" $WORKING_DIR/gsyncd.conf
+TEST $GEOREP_CLI $master $slave start
+TEST grep "sync-method" $WORKING_DIR/gsyncd.conf
+
+#Stop geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Delete Geo-rep
+TEST $GEOREP_CLI $master $slave delete
+
+#Cleanup authorized keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/00-geo-rep/georep-stderr-hang.t b/tests/00-geo-rep/georep-stderr-hang.t
new file mode 100644
index 00000000000..496f0e6577d
--- /dev/null
+++ b/tests/00-geo-rep/georep-stderr-hang.t
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../geo-rep.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=500
+
+AREQUAL_PATH=$(dirname $0)/../utils
+test "`uname -s`" != "Linux" && {
+ CFLAGS="$CFLAGS -lintl";
+}
+build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS
+
+### Basic Tests with Distribute Replicate volumes
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+
+##Variables
+GEOREP_CLI="$CLI volume geo-replication"
+master=$GMV0
+SH0="127.0.0.1"
+slave=${SH0}::${GSV0}
+num_active=2
+num_passive=2
+master_mnt=$M0
+slave_mnt=$M1
+
+############################################################
+#SETUP VOLUMES AND GEO-REPLICATION
+############################################################
+
+##create_and_start_master_volume
+TEST $CLI volume create $GMV0 $H0:$B0/${GMV0}1;
+TEST $CLI volume start $GMV0
+
+##create_and_start_slave_volume
+TEST $CLI volume create $GSV0 $H0:$B0/${GSV0}1;
+TEST $CLI volume start $GSV0
+TEST $CLI volume set $GSV0 performance.stat-prefetch off
+TEST $CLI volume set $GSV0 performance.quick-read off
+TEST $CLI volume set $GSV0 performance.readdir-ahead off
+TEST $CLI volume set $GSV0 performance.read-ahead off
+
+##Mount master
+TEST glusterfs -s $H0 --volfile-id $GMV0 $M0
+
+##Mount slave
+TEST glusterfs -s $H0 --volfile-id $GSV0 $M1
+
+############################################################
+#BASIC GEO-REPLICATION TESTS
+############################################################
+
+TEST create_georep_session $master $slave
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 check_status_num_rows "Created"
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Config gluster-command-dir
+TEST $GEOREP_CLI $master $slave config slave-gluster-command-dir ${GLUSTER_CMD_DIR}
+
+#Set changelog roll-over time to 45 secs
+TEST $CLI volume set $GMV0 changelog.rollover-time 45
+
+#Wait for common secret pem file to be created
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_common_secret_file
+
+#Verify the keys are distributed
+EXPECT_WITHIN $GEO_REP_TIMEOUT 0 check_keys_distributed
+
+#Set sync-jobs to 1
+TEST $GEOREP_CLI $master $slave config sync-jobs 1
+
+#Start_georep
+TEST $GEOREP_CLI $master $slave start
+
+touch $M0
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 check_status_num_rows "Active"
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 check_status_num_rows "Changelog Crawl"
+
+#Check History Crawl.
+TEST $GEOREP_CLI $master $slave stop
+TEST create_data_hang "rsync_hang"
+TEST create_data "history_rsync"
+TEST $GEOREP_CLI $master $slave start
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 check_status_num_rows "Active"
+
+#Verify arequal for whole volume
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt}
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Config tarssh as sync-engine
+TEST $GEOREP_CLI $master $slave config sync-method tarssh
+
+#Create tarssh hang data
+TEST create_data_hang "tarssh_hang"
+TEST create_data "history_tar"
+
+TEST $GEOREP_CLI $master $slave start
+EXPECT_WITHIN $GEO_REP_TIMEOUT 1 check_status_num_rows "Active"
+
+#Verify arequal for whole volume
+EXPECT_WITHIN $GEO_REP_TIMEOUT "x0" arequal_checksum ${master_mnt} ${slave_mnt}
+
+#Stop Geo-rep
+TEST $GEOREP_CLI $master $slave stop
+
+#Delete Geo-rep
+TEST $GEOREP_CLI $master $slave delete
+
+#Cleanup are-equal binary
+TEST rm $AREQUAL_PATH/arequal-checksum
+
+#Cleanup authorized keys
+sed -i '/^command=.*SSH_ORIGINAL_COMMAND#.*/d' ~/.ssh/authorized_keys
+sed -i '/^command=.*gsyncd.*/d' ~/.ssh/authorized_keys
+
+cleanup;
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/00-geo-rep/georep-upgrade.t b/tests/00-geo-rep/georep-upgrade.t
new file mode 100644
index 00000000000..7523068ed50
--- /dev/null
+++ b/tests/00-geo-rep/georep-upgrade.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+SCRIPT_TIMEOUT=500
+
+###############################################################################################
+#Before upgrade
+###############################################################################################
+brick=/bricks/brick1
+epoch1=$(date '+%s')
+sleep 1
+epoch2=$(date '+%s')
+mkdir -p /bricks/brick1/.glusterfs/changelogs/htime
+mkdir -p /bricks/brick1/.glusterfs/changelogs
+
+#multiple htime files(changelog enable/disable scenario)
+TEST touch /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1
+TEST touch /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2
+
+#changelog files
+TEST touch /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch1
+TEST touch /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch2
+
+htime_file1=/bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1
+htime_file2=/bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2
+
+#data inside htime files before upgrade
+data1=/bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch1
+data2=/bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch2
+
+#data inside htime files after upgrade
+updated_data1=/bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')`/CHANGELOG.$epoch1
+updated_data2=/bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')`/CHANGELOG.$epoch2
+
+echo -n $data1>$htime_file1
+echo -n $data2>$htime_file2
+
+echo "Before upgrade:"
+EXPECT '1' echo $(grep $data1 $htime_file1 | wc -l)
+EXPECT '1' echo $(grep $data2 $htime_file2 | wc -l)
+
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch1 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch2 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2 | wc -l)
+###############################################################################################
+#Upgrade
+###############################################################################################
+### This needed to be fixed as this very vague finding a file with name in '/'
+### multiple file with same name can exist
+### for temp fix picking only 1st result
+TEST upgrade_script=$(find / -type f -name glusterfs-georep-upgrade.py -print | head -n 1)
+TEST python3 $upgrade_script $brick
+
+###############################################################################################
+#After upgrade
+###############################################################################################
+echo "After upgrade:"
+EXPECT '1' echo $(grep $updated_data1 $htime_file1 | wc -l)
+EXPECT '1' echo $(grep $updated_data2 $htime_file2 | wc -l)
+
+#Check directory structure inside changelogs
+TEST ! ls /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch1
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch1.bak | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y')` | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m')` | wc -l)
+EXPECT '2' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')` | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')`/CHANGELOG.$epoch1 | wc -l)
+
+TEST ! ls /bricks/brick1/.glusterfs/changelogs/CHANGELOG.$epoch2
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2 | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/htime/HTIME.$epoch2.bak | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y')` | wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m')`| wc -l)
+EXPECT '1' echo $(ls /bricks/brick1/.glusterfs/changelogs/`echo $(date '+%Y/%m/%d')`/CHANGELOG.$epoch2 | wc -l)
+
+TEST rm -rf /bricks
diff --git a/tests/00-geo-rep/gsyncd.conf.old b/tests/00-geo-rep/gsyncd.conf.old
new file mode 100644
index 00000000000..519acaf8f3e
--- /dev/null
+++ b/tests/00-geo-rep/gsyncd.conf.old
@@ -0,0 +1,47 @@
+[__meta__]
+version = 2.0
+
+[peersrx . .]
+remote_gsyncd = /usr/local/libexec/glusterfs/gsyncd
+georep_session_working_dir = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/
+ssh_command_tar = ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i /var/lib/glusterd/geo-replication/tar_ssh.pem
+changelog_log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}${local_id}-changes.log
+working_dir = /var/lib/misc/glusterfsd/${mastervol}/${eSlave}
+ignore_deletes = false
+pid_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.pid
+state_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.status
+gluster_command_dir = /usr/local/sbin/
+gluster_params = aux-gfid-mount acl
+ssh_command = ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i /var/lib/glusterd/geo-replication/secret.pem
+state_detail_file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/${eSlave}-detail.status
+state_socket_unencoded = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/${eSlave}.socket
+socketdir = /var/run/gluster
+log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}.log
+gluster_log_file = /var/log/glusterfs/geo-replication/${mastervol}/${eSlave}${local_id}.gluster.log
+special_sync_mode = partial
+change_detector = changelog
+pid-file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.pid
+state-file = /var/lib/glusterd/geo-replication/${mastervol}_${remotehost}_${slavevol}/monitor.status
+
+[__section_order__]
+peersrx . . = 0
+peersrx . %5essh%3a = 2
+peersrx . = 3
+peers master slave = 4
+
+[peersrx . %5Essh%3A]
+remote_gsyncd = /nonexistent/gsyncd
+
+[peersrx .]
+gluster_command_dir = /usr/local/sbin/
+gluster_params = aux-gfid-mount acl
+log_file = /var/log/glusterfs/geo-replication-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.log
+log_file_mbr = /var/log/glusterfs/geo-replication-slaves/mbr/${session_owner}:${local_node}${local_id}.${slavevol}.log
+gluster_log_file = /var/log/glusterfs/geo-replication-slaves/${session_owner}:${local_node}${local_id}.${slavevol}.gluster.log
+
+[peers master slave]
+session_owner = 0732cbd1-3ec5-4920-ab0d-aa5a896d5214
+master.stime_xattr_name = trusted.glusterfs.0732cbd1-3ec5-4920-ab0d-aa5a896d5214.07a9005c-ace4-4f67-b3c0-73938fb236c4.stime
+volume_id = 0732cbd1-3ec5-4920-ab0d-aa5a896d5214
+use_tarssh = true
+
diff --git a/tests/basic/afr/split-brain-favorite-child-policy.t b/tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t
index 0e321c6f095..77d82a4996f 100644
--- a/tests/basic/afr/split-brain-favorite-child-policy.t
+++ b/tests/000-flaky/basic_afr_split-brain-favorite-child-policy.t
@@ -1,8 +1,8 @@
#!/bin/bash
#Test the split-brain resolution CLI commands.
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
cleanup;
@@ -16,6 +16,7 @@ TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume set $V0 cluster.entry-self-heal off
TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.heal-timeout 5
TEST $CLI volume start $V0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST touch $M0/file
@@ -38,7 +39,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-#file fill in split-brain
+#file still in split-brain
cat $M0/file > /dev/null
EXPECT "1" echo $?
@@ -124,7 +125,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
-#file fill in split-brain
+#file still in split-brain
cat $M0/file > /dev/null
EXPECT "1" echo $?
@@ -179,7 +180,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
TEST $CLI volume heal $V0
-#file fill in split-brain
+#file still in split-brain
cat $M0/file > /dev/null
EXPECT "1" echo $?
diff --git a/tests/000-flaky/basic_changelog_changelog-snapshot.t b/tests/000-flaky/basic_changelog_changelog-snapshot.t
new file mode 100644
index 00000000000..f6cd0b04d47
--- /dev/null
+++ b/tests/000-flaky/basic_changelog_changelog-snapshot.t
@@ -0,0 +1,60 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../snapshot.rc
+
+cleanup;
+ROLLOVER_TIME=3
+
+TEST verify_lvm_version;
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST setup_lvm 1
+
+TEST $CLI volume create $V0 $H0:$L1
+BRICK_LOG=$(echo "$L1" | tr / - | sed 's/^-//g')
+TEST $CLI volume start $V0
+
+#Enable changelog
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Create snapshot
+S1="${V0}-snap1"
+
+mkdir $M0/RENAME
+mkdir $M0/LINK
+mkdir $M0/UNLINK
+mkdir $M0/RMDIR
+mkdir $M0/SYMLINK
+
+for i in {1..400} ; do touch $M0/RENAME/file$i; done
+for i in {1..400} ; do touch $M0/LINK/file$i; done
+for i in {1..400} ; do touch $M0/UNLINK/file$i; done
+for i in {1..400} ; do mkdir $M0/RMDIR/dir$i; done
+for i in {1..400} ; do touch $M0/SYMLINK/file$i; done
+
+#Write I/O in background
+for i in {1..400} ; do touch $M0/file$i 2>/dev/null; done &
+for i in {1..400} ; do mknod $M0/mknod-file$i p 2>/dev/null; done &
+for i in {1..400} ; do mkdir $M0/dir$i 2>/dev/null; done & 2>/dev/null
+for i in {1..400} ; do mv $M0/RENAME/file$i $M0/RENAME/rn-file$i 2>/dev/null; done &
+for i in {1..400} ; do ln $M0/LINK/file$i $M0/LINK/ln-file$i 2>/dev/null; done &
+for i in {1..400} ; do rm -f $M0/UNLINK/file$i 2>/dev/null; done &
+for i in {1..400} ; do rmdir $M0/RMDIR/dir$i 2>/dev/null; done &
+for i in {1..400} ; do ln -s $M0/SYMLINK/file$i $M0/SYMLINK/sym-file$i 2>/dev/null; done &
+
+sleep 1
+TEST $CLI snapshot create $S1 $V0 no-timestamp
+TEST snapshot_exists 0 $S1
+
+TEST grep '"Enabled changelog barrier"' /var/log/glusterfs/bricks/$BRICK_LOG.log
+TEST grep '"Disabled changelog barrier"' /var/log/glusterfs/bricks/$BRICK_LOG.log
+
+TEST glusterfs -s $H0 --volfile-id=/snaps/$S1/$V0 $M1
+
+#Clean up
+TEST $CLI volume stop $V0 force
+cleanup;
diff --git a/tests/basic/distribute/rebal-all-nodes-migrate.t b/tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t
index acc4ffefecc..eb5d3305ac1 100644
--- a/tests/basic/distribute/rebal-all-nodes-migrate.t
+++ b/tests/000-flaky/basic_distribute_rebal-all-nodes-migrate.t
@@ -1,8 +1,8 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../cluster.rc
-. $(dirname $0)/../../dht.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../dht.rc
# Check if every single rebalance process migrated some files
@@ -10,11 +10,9 @@
function cluster_rebal_all_nodes_migrated_files {
val=0
a=$($CLI_1 volume rebalance $V0 status | grep "completed" | awk '{print $2}');
-# echo $a
b=($a)
for i in "${b[@]}"
do
-# echo "$i";
if [ "$i" -eq "0" ]; then
echo "false";
val=1;
diff --git a/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t b/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t
new file mode 100644
index 00000000000..42808ce0c0e
--- /dev/null
+++ b/tests/000-flaky/basic_ec_ec-quorum-count-partial-failure.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#This test checks that partial failure of fop results in main fop failure only
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5}
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=/$V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST dd if=/dev/urandom of=$M0/a bs=12347 count=1
+TEST dd if=/dev/urandom of=$M0/b bs=12347 count=1
+TEST cp $M0/b $M0/c
+TEST fallocate -p -l 101 $M0/c
+TEST $CLI volume stop $V0
+TEST $CLI volume set $V0 debug.delay-gen posix;
+TEST $CLI volume set $V0 delay-gen.delay-duration 10000000;
+TEST $CLI volume set $V0 delay-gen.enable WRITE;
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 disperse.quorum-count 6
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+cksum=$(dd if=$M0/a bs=12345 count=1 | md5sum | awk '{print $1}')
+truncate -s 12345 $M0/a & #While write is waiting for 5 seconds, introduce failure
+fallocate -p -l 101 $M0/b &
+sleep 1
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST wait
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+EXPECT "12345" stat --format=%s $M0/a
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
+cksum_after_heal=$(dd if=$M0/a | md5sum | awk '{print $1}')
+TEST [[ $cksum == $cksum_after_heal ]]
+cksum=$(dd if=$M0/c | md5sum | awk '{print $1}')
+cksum_after_heal=$(dd if=$M0/b | md5sum | awk '{print $1}')
+TEST [[ $cksum == $cksum_after_heal ]]
+
+cleanup;
diff --git a/tests/basic/mount-nfs-auth.t b/tests/000-flaky/basic_mount-nfs-auth.t
index 3d4a9cff00b..3d4a9cff00b 100755..100644
--- a/tests/basic/mount-nfs-auth.t
+++ b/tests/000-flaky/basic_mount-nfs-auth.t
diff --git a/tests/bugs/core/multiplex-limit-issue-151.t b/tests/000-flaky/bugs_core_multiplex-limit-issue-151.t
index dc9013061b0..5a88f97d726 100644
--- a/tests/bugs/core/multiplex-limit-issue-151.t
+++ b/tests/000-flaky/bugs_core_multiplex-limit-issue-151.t
@@ -1,8 +1,8 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../traps.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../traps.rc
+. $(dirname $0)/../volume.rc
function count_up_bricks {
$CLI --xml volume status all | grep '<status>1' | wc -l
diff --git a/tests/bugs/distribute/bug-1117851.t b/tests/000-flaky/bugs_distribute_bug-1117851.t
index 62cb6b66ab4..5980bf2fd4b 100755..100644
--- a/tests/bugs/distribute/bug-1117851.t
+++ b/tests/000-flaky/bugs_distribute_bug-1117851.t
@@ -2,8 +2,8 @@
SCRIPT_TIMEOUT=250
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
create_files () {
for i in {1..1000}; do
diff --git a/tests/bugs/distribute/bug-1122443.t b/tests/000-flaky/bugs_distribute_bug-1122443.t
index 906be7072bd..abd37082b33 100644
--- a/tests/bugs/distribute/bug-1122443.t
+++ b/tests/000-flaky/bugs_distribute_bug-1122443.t
@@ -1,8 +1,8 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../dht.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
make_files() {
mkdir $1 && \
@@ -42,8 +42,8 @@ TEST glusterfs -s $H0 --volfile-id $V0 $M0
TEST make_files $M0/subdir
# Get mtime before migration
-BEFORE="$(stat -c %n:%Y $M0/subdir/* | tr '\n' ',')"
-
+BEFORE="$(stat -c %n:%Y $M0/subdir/* | sort | tr '\n' ',')"
+echo $BEFORE
# Migrate brick
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 start
@@ -51,9 +51,10 @@ EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}0 commit
# Get mtime after migration
-EXPECT_WITHIN 5 RECONNECTED bug_1113050_workaround $M0/subdir/*
-AFTER="$(stat -c %n:%Y $M0/subdir/* | tr '\n' ',')"
-
+EXPECT_WITHIN 30 RECONNECTED bug_1113050_workaround $M0/subdir/symlink
+sleep 3
+AFTER="$(stat -c %n:%Y $M0/subdir/* | sort | tr '\n' ',')"
+echo $AFTER
# Check if mtime is unchanged
TEST [ "$AFTER" == "$BEFORE" ]
diff --git a/tests/bugs/glusterd/bug-857330/common.rc b/tests/000-flaky/bugs_glusterd_bug-857330/common.rc
index d0aa4b1a640..bd122eff18c 100644
--- a/tests/bugs/glusterd/bug-857330/common.rc
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/common.rc
@@ -1,4 +1,4 @@
-. $(dirname $0)/../../../include.rc
+. $(dirname $0)/../../include.rc
UUID_REGEX='[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}'
diff --git a/tests/bugs/glusterd/bug-857330/normal.t b/tests/000-flaky/bugs_glusterd_bug-857330/normal.t
index ad0c8844fae..6c1cf54ec3c 100755
--- a/tests/bugs/glusterd/bug-857330/normal.t
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/normal.t
@@ -1,7 +1,7 @@
#!/bin/bash
. $(dirname $0)/common.rc
-. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
@@ -14,7 +14,7 @@ TEST $CLI volume start $V0;
TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
-TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \
+TEST $PYTHON $(dirname $0)/../../utils/create-files.py \
--multi -b 10 -d 10 -n 10 $M0;
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
diff --git a/tests/bugs/glusterd/bug-857330/xml.t b/tests/000-flaky/bugs_glusterd_bug-857330/xml.t
index 8383d2a0711..11785adacdb 100755
--- a/tests/bugs/glusterd/bug-857330/xml.t
+++ b/tests/000-flaky/bugs_glusterd_bug-857330/xml.t
@@ -1,7 +1,7 @@
#!/bin/bash
. $(dirname $0)/common.rc
-. $(dirname $0)/../../../volume.rc
+. $(dirname $0)/../../volume.rc
cleanup;
@@ -15,7 +15,7 @@ TEST $CLI volume start $V0;
TEST glusterfs -s $H0 --volfile-id=$V0 $M0;
-TEST $PYTHON $(dirname $0)/../../../utils/create-files.py \
+TEST $PYTHON $(dirname $0)/../../utils/create-files.py \
--multi -b 10 -d 10 -n 10 $M0;
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
diff --git a/tests/bugs/glusterd/quorum-value-check.t b/tests/000-flaky/bugs_glusterd_quorum-value-check.t
index aaf636274b6..a431b8c4fd4 100755..100644
--- a/tests/bugs/glusterd/quorum-value-check.t
+++ b/tests/000-flaky/bugs_glusterd_quorum-value-check.t
@@ -1,7 +1,9 @@
#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
function check_quorum_nfs() {
local qnfs="$(less /var/lib/glusterd/nfs/nfs-server.vol | grep "quorum-count"| awk '{print $3}')"
diff --git a/tests/bugs/nfs/bug-1116503.t b/tests/000-flaky/bugs_nfs_bug-1116503.t
index dd3998df150..fc50021acc7 100644
--- a/tests/bugs/nfs/bug-1116503.t
+++ b/tests/000-flaky/bugs_nfs_bug-1116503.t
@@ -3,9 +3,9 @@
# Verify that mounting NFS over UDP (MOUNT service only) works.
#
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../nfs.rc
#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
diff --git a/tests/features/lock-migration/lkmigration-set-option.t b/tests/000-flaky/features_lock-migration_lkmigration-set-option.t
index 4340438591f..1327ef3579f 100644
--- a/tests/features/lock-migration/lkmigration-set-option.t
+++ b/tests/000-flaky/features_lock-migration_lkmigration-set-option.t
@@ -1,7 +1,7 @@
#!/bin/bash
# Test to check
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
#Check lock-migration set option sanity
cleanup;
diff --git a/tests/afr.rc b/tests/afr.rc
index 35f352df78f..241789903ba 100644
--- a/tests/afr.rc
+++ b/tests/afr.rc
@@ -105,3 +105,19 @@ function get_quorum_type()
local repl_id="$3"
cat $m/.meta/graphs/active/$v-replicate-$repl_id/private|grep quorum-type|awk '{print $3}'
}
+
+function afr_private_key_value()
+{
+ local v=$1
+ local m=$2
+ local replica_id=$3
+ local key=$4
+#xargs at the end will strip leading spaces
+ grep -E "^${key} = " $m/.meta/graphs/active/${v}-replicate-${replica_id}/private | cut -f2 -d'=' | xargs
+}
+
+function afr_anon_entry_count()
+{
+ local b=$1
+ ls $b/.glusterfs-anonymous-inode* | wc -l
+}
diff --git a/tests/basic/afr/afr-anon-inode-no-quorum.t b/tests/basic/afr/afr-anon-inode-no-quorum.t
new file mode 100644
index 00000000000..896ba0c9b2c
--- /dev/null
+++ b/tests/basic/afr/afr-anon-inode-no-quorum.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+#Test that anon-inode entry is not cleaned up as long as there exists at least
+#one valid entry
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.readdir-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST touch $M0/a $M0/b
+
+gfid_a=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/a))
+gfid_b=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/b))
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST mv $M0/a $M0/a-new
+TEST mv $M0/b $M0/b-new
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST ! ls $M0/a
+TEST ! ls $M0/b
+anon_inode_name=$(ls -a $B0/${V0}0 | grep glusterfs-anonymous-inode)
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_a
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_b
+#Make sure index heal doesn't happen after enabling heal
+TEST setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1
+TEST rm -f $B0/${V0}1/.glusterfs/indices/xattrop/*
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+#Allow time for a scan
+sleep 5
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_a
+TEST stat $B0/${V0}0/$anon_inode_name/$gfid_b
+inum_b=$(STAT_INO $B0/${V0}0/$anon_inode_name/$gfid_b)
+TEST rm -f $M0/a-new
+TEST stat $M0/b-new
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}1
+EXPECT "$inum_b" STAT_INO $B0/${V0}0/b-new
+
+cleanup
diff --git a/tests/basic/afr/afr-anon-inode.t b/tests/basic/afr/afr-anon-inode.t
new file mode 100644
index 00000000000..f4cf37a2fa0
--- /dev/null
+++ b/tests/basic/afr/afr-anon-inode.t
@@ -0,0 +1,114 @@
+#!/bin/bash
+#Tests that afr-anon-inode test cases work fine as expected
+#These are cases where in entry-heal/name-heal we dont know entry for an inode
+#so these inodes are kept in a special directory
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST $CLI volume set $V0 cluster.use-anonymous-inode no
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST $CLI volume set $V0 cluster.use-anonymous-inode yes
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "use-anonymous-inode"
+TEST mkdir -p $M0/d1/b $M0/d2/a
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST mv $M0/d2/a $M0/d1
+TEST mv $M0/d1/b $M0/d2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+anon_inode_name=$(ls -a $B0/${V0}0 | grep glusterfs-anonymous-inode)
+TEST [[ -d $B0/${V0}1/$anon_inode_name ]]
+TEST [[ -d $B0/${V0}2/$anon_inode_name ]]
+anon_gfid=$(gf_get_gfid_xattr $B0/${V0}0/$anon_inode_name)
+EXPECT "$anon_gfid" gf_get_gfid_xattr $B0/${V0}1/$anon_inode_name
+EXPECT "$anon_gfid" gf_get_gfid_xattr $B0/${V0}2/$anon_inode_name
+
+TEST ! ls $M0/$anon_inode_name
+EXPECT "^4$" echo $(ls -a $M0 | wc -l)
+
+#Test purging code path by shd
+TEST $CLI volume heal $V0 disable
+TEST mkdir $M0/l0 $M0/l1 $M0/l2
+TEST touch $M0/del-file $M0/del-file-nolink $M0/l0/file
+TEST ln $M0/del-file $M0/del-file-link
+TEST ln $M0/l0/file $M0/l1/file-link1
+TEST ln $M0/l0/file $M0/l2/file-link2
+TEST mkdir -p $M0/del-recursive-dir/d1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST rm -f $M0/del-file $M0/del-file-nolink
+TEST rm -rf $M0/del-recursive-dir
+TEST mv $M0/d1/a $M0/d2
+TEST mv $M0/l0/file $M0/l0/renamed-file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 0
+
+nolink_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-file-nolink))
+link_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-file))
+dir_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/del-recursive-dir))
+rename_dir_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/d1/a))
+rename_file_gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/l0/file))
+TEST ! stat $M0/del-file
+TEST stat $B0/${V0}0/$anon_inode_name/$link_gfid
+TEST ! stat $M0/del-file-nolink
+TEST ! stat $B0/${V0}0/$anon_inode_name/$nolink_gfid
+TEST ! stat $M0/del-recursive-dir
+TEST stat $B0/${V0}0/$anon_inode_name/$dir_gfid
+TEST ! stat $M0/d1/a
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_dir_gfid
+TEST ! stat $M0/l0/file
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_file_gfid
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/l1/file-link1 $M0/l1/renamed-file-link1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 1
+TEST ! stat $M0/l1/file-link1
+TEST stat $B0/${V0}1/$anon_inode_name/$rename_file_gfid
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST mv $M0/l2/file-link2 $M0/l2/renamed-file-link2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 2
+TEST ! stat $M0/l2/file-link2
+TEST stat $B0/${V0}2/$anon_inode_name/$rename_file_gfid
+
+#Simulate only anon-inodes present in all bricks
+TEST rm -f $M0/l0/renamed-file $M0/l1/renamed-file-link1 $M0/l2/renamed-file-link2
+
+#Test that shd doesn't cleanup anon-inodes when some bricks are down
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+$CLI volume heal $V0
+sleep 5 #Allow time for completion of one scan
+TEST stat $B0/${V0}0/$anon_inode_name/$link_gfid
+TEST stat $B0/${V0}0/$anon_inode_name/$rename_dir_gfid
+TEST stat $B0/${V0}0/$anon_inode_name/$dir_gfid
+rename_dir_inum=$(STAT_INO $B0/${V0}0/$anon_inode_name/$rename_dir_gfid)
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status $V0 1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/${V0}2
+
+#Test that rename indeed happened instead of rmdir/mkdir
+renamed_dir_inum=$(STAT_INO $B0/${V0}0/d2/a)
+EXPECT "$rename_dir_inum" echo $renamed_dir_inum
+cleanup;
diff --git a/tests/basic/afr/afr-seek.t b/tests/basic/afr/afr-seek.t
new file mode 100644
index 00000000000..c12ee011660
--- /dev/null
+++ b/tests/basic/afr/afr-seek.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+SEEK=$(dirname $0)/seek
+build_tester $(dirname $0)/../seek.c -o ${SEEK}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST mkdir -p $B0/${V0}{0..2}
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+
+TEST $CLI volume start $V0
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST ${SEEK} create ${M0}/test 0 1 1048576 1
+# Determine underlying filesystem allocation block size
+BSIZE="$(($(${SEEK} scan ${M0}/test hole 0) * 2))"
+
+TEST ${SEEK} create ${M0}/test 0 ${BSIZE} $((${BSIZE} * 4 + 512)) ${BSIZE}
+
+EXPECT "^0$" ${SEEK} scan ${M0}/test data 0
+EXPECT "^$((${BSIZE} / 2))$" ${SEEK} scan ${M0}/test data $((${BSIZE} / 2))
+EXPECT "^$((${BSIZE} - 1))$" ${SEEK} scan ${M0}/test data $((${BSIZE} - 1))
+EXPECT "^$((${BSIZE} * 4))$" ${SEEK} scan ${M0}/test data ${BSIZE}
+EXPECT "^$((${BSIZE} * 4))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 4))
+EXPECT "^$((${BSIZE} * 5))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5))
+EXPECT "^$((${BSIZE} * 5 + 511))$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5 + 511))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 5 + 512))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test data $((${BSIZE} * 6))
+
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole 0
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole $((${BSIZE} / 2))
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole $((${BSIZE} - 1))
+EXPECT "^${BSIZE}$" ${SEEK} scan ${M0}/test hole ${BSIZE}
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 4))
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5))
+EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 511))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 512))
+EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 6))
+
+rm -f ${SEEK}
+cleanup
+
+# Centos6 regression slaves seem to not support SEEK_DATA/SEEK_HOLE
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/basic/afr/durability-off.t b/tests/basic/afr/durability-off.t
index 155ffa09ef0..6e0f18b88f8 100644
--- a/tests/basic/afr/durability-off.t
+++ b/tests/basic/afr/durability-off.t
@@ -26,6 +26,8 @@ TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
EXPECT "^0$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l)
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
#Test that fsyncs happen when durability is on
TEST $CLI volume set $V0 cluster.ensure-durability on
TEST $CLI volume set $V0 performance.strict-write-ordering on
diff --git a/tests/basic/afr/entry-self-heal-anon-dir-off.t b/tests/basic/afr/entry-self-heal-anon-dir-off.t
new file mode 100644
index 00000000000..7bb6ee14193
--- /dev/null
+++ b/tests/basic/afr/entry-self-heal-anon-dir-off.t
@@ -0,0 +1,459 @@
+#!/bin/bash
+
+#This file checks if missing entry self-heal and entry self-heal are working
+#as expected.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function get_file_type {
+ stat -c "%a:%F:%g:%t:%T:%u" $1
+}
+
+function diff_dirs {
+ diff <(ls $1 | sort) <(ls $2 | sort)
+}
+
+function heal_status {
+ local f1_path="${1}/${3}"
+ local f2_path="${2}/${3}"
+ local insync=""
+ diff_dirs $f1_path $f2_path
+ if [ $? -eq 0 ];
+ then
+ insync="Y"
+ else
+ insync="N"
+ fi
+ local xattr11=$(get_hex_xattr trusted.afr.$V0-client-0 $f1_path)
+ local xattr12=$(get_hex_xattr trusted.afr.$V0-client-1 $f1_path)
+ local xattr21=$(get_hex_xattr trusted.afr.$V0-client-0 $f2_path)
+ local xattr22=$(get_hex_xattr trusted.afr.$V0-client-1 $f2_path)
+ local dirty1=$(get_hex_xattr trusted.afr.dirty $f1_path)
+ local dirty2=$(get_hex_xattr trusted.afr.dirty $f2_path)
+ if [ -z $xattr11 ]; then xattr11="000000000000000000000000"; fi
+ if [ -z $xattr12 ]; then xattr12="000000000000000000000000"; fi
+ if [ -z $xattr21 ]; then xattr21="000000000000000000000000"; fi
+ if [ -z $xattr22 ]; then xattr22="000000000000000000000000"; fi
+ if [ -z $dirty1 ]; then dirty1="000000000000000000000000"; fi
+ if [ -z $dirty2 ]; then dirty2="000000000000000000000000"; fi
+ echo ${insync}${xattr11}${xattr12}${xattr21}${xattr22}${dirty1}${dirty2}
+}
+
+function is_heal_done {
+ local zero_xattr="000000000000000000000000"
+ if [ "$(heal_status $@)" == "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function print_pending_heals {
+ local result=":"
+ for i in "$@";
+ do
+ if [ "N" == $(is_heal_done $B0/${V0}0 $B0/${V0}1 $i) ];
+ then
+ result="$result:$i"
+ fi
+ done
+#To prevent any match for EXPECT_WITHIN, print a char non-existent in file-names
+ if [ $result == ":" ]; then result="~"; fi
+ echo $result
+}
+
+zero_xattr="000000000000000000000000"
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 cluster.use-anonymous-inode off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.readdir-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --use-readdirp=no $M0
+cd $M0
+#_me_ is dir on which missing entry self-heal happens, _heal is where dir self-heal happens
+#spb is split-brain, fool is all fool
+
+#source_self_accusing means there exists source and a sink which self-accuses.
+#This simulates failures where fops failed on the bricks without it going down.
+#Something like EACCESS/EDQUOT etc
+
+TEST mkdir spb_heal spb spb_me_heal spb_me fool_heal fool_me v1_fool_heal v1_fool_me source_creations_heal source_deletions_heal source_creations_me source_deletions_me v1_dirty_me v1_dirty_heal source_self_accusing
+TEST mkfifo source_deletions_heal/fifo
+TEST mknod source_deletions_heal/block b 4 5
+TEST mknod source_deletions_heal/char c 1 5
+TEST touch source_deletions_heal/file
+TEST ln -s source_deletions_heal/file source_deletions_heal/slink
+TEST mkdir source_deletions_heal/dir1
+TEST mkdir source_deletions_heal/dir1/dir2
+
+TEST mkfifo source_deletions_me/fifo
+TEST mknod source_deletions_me/block b 4 5
+TEST mknod source_deletions_me/char c 1 5
+TEST touch source_deletions_me/file
+TEST ln -s source_deletions_me/file source_deletions_me/slink
+TEST mkdir source_deletions_me/dir1
+TEST mkdir source_deletions_me/dir1/dir2
+
+TEST mkfifo source_self_accusing/fifo
+TEST mknod source_self_accusing/block b 4 5
+TEST mknod source_self_accusing/char c 1 5
+TEST touch source_self_accusing/file
+TEST ln -s source_self_accusing/file source_self_accusing/slink
+TEST mkdir source_self_accusing/dir1
+TEST mkdir source_self_accusing/dir1/dir2
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST touch spb_heal/0 spb/0 spb_me_heal/0 spb_me/0 fool_heal/0 fool_me/0 v1_fool_heal/0 v1_fool_me/0 v1_dirty_heal/0 v1_dirty_me/0
+TEST rm -rf source_deletions_heal/fifo source_deletions_heal/block source_deletions_heal/char source_deletions_heal/file source_deletions_heal/slink source_deletions_heal/dir1
+TEST rm -rf source_deletions_me/fifo source_deletions_me/block source_deletions_me/char source_deletions_me/file source_deletions_me/slink source_deletions_me/dir1
+TEST rm -rf source_self_accusing/fifo source_self_accusing/block source_self_accusing/char source_self_accusing/file source_self_accusing/slink source_self_accusing/dir1
+
+#Test that the files are deleted
+TEST ! stat $B0/${V0}1/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}1/source_deletions_heal/block
+TEST ! stat $B0/${V0}1/source_deletions_heal/char
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/slink
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1
+TEST ! stat $B0/${V0}1/source_deletions_me/fifo
+TEST ! stat $B0/${V0}1/source_deletions_me/block
+TEST ! stat $B0/${V0}1/source_deletions_me/char
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/slink
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1
+TEST ! stat $B0/${V0}1/source_self_accusing/fifo
+TEST ! stat $B0/${V0}1/source_self_accusing/block
+TEST ! stat $B0/${V0}1/source_self_accusing/char
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/slink
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1
+
+
+TEST mkfifo source_creations_heal/fifo
+TEST mknod source_creations_heal/block b 4 5
+TEST mknod source_creations_heal/char c 1 5
+TEST touch source_creations_heal/file
+TEST ln -s source_creations_heal/file source_creations_heal/slink
+TEST mkdir source_creations_heal/dir1
+TEST mkdir source_creations_heal/dir1/dir2
+
+TEST mkfifo source_creations_me/fifo
+TEST mknod source_creations_me/block b 4 5
+TEST mknod source_creations_me/char c 1 5
+TEST touch source_creations_me/file
+TEST ln -s source_creations_me/file source_creations_me/slink
+TEST mkdir source_creations_me/dir1
+TEST mkdir source_creations_me/dir1/dir2
+
+$CLI volume stop $V0
+
+#simulate fool fool scenario for fool_* dirs
+setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}1/{v1_fool_heal,v1_fool_me}
+
+#Simulate v1-dirty(self-accusing but no pending ops on others) scenario for v1-dirty
+setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1/v1_dirty_{heal,me}
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}1/v1_dirty_{heal,me}
+
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+TEST touch spb_heal/1 spb/0 spb_me_heal/1 spb_me/0 fool_heal/1 fool_me/1 v1_fool_heal/1 v1_fool_me/1
+
+$CLI volume stop $V0
+
+#simulate fool fool scenario for fool_* dirs
+setfattr -x trusted.afr.$V0-client-1 $B0/${V0}0/{fool_heal,fool_me}
+setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/{fool_heal,fool_me}
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/{v1_fool_heal,v1_fool_me}
+
+#simulate self-accusing for source_self_accusing
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000006 $B0/${V0}0/source_self_accusing
+
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+# Check if conservative merges happened correctly on _me_ dirs
+TEST stat spb_me_heal/1
+TEST stat $B0/${V0}0/spb_me_heal/1
+TEST stat $B0/${V0}1/spb_me_heal/1
+
+TEST stat spb_me_heal/0
+TEST stat $B0/${V0}0/spb_me_heal/0
+TEST stat $B0/${V0}1/spb_me_heal/0
+
+TEST stat fool_me/1
+TEST stat $B0/${V0}0/fool_me/1
+TEST stat $B0/${V0}1/fool_me/1
+
+TEST stat fool_me/0
+TEST stat $B0/${V0}0/fool_me/0
+TEST stat $B0/${V0}1/fool_me/0
+
+TEST stat v1_fool_me/0
+TEST stat $B0/${V0}0/v1_fool_me/0
+TEST stat $B0/${V0}1/v1_fool_me/0
+
+TEST stat v1_fool_me/1
+TEST stat $B0/${V0}0/v1_fool_me/1
+TEST stat $B0/${V0}1/v1_fool_me/1
+
+TEST stat v1_dirty_me/0
+TEST stat $B0/${V0}0/v1_dirty_me/0
+TEST stat $B0/${V0}1/v1_dirty_me/0
+
+#Check if files that have gfid-mismatches in _me_ are giving EIO
+TEST ! stat spb_me/0
+
+#Check if stale files are deleted on access
+TEST ! stat source_deletions_me/fifo
+TEST ! stat $B0/${V0}0/source_deletions_me/fifo
+TEST ! stat $B0/${V0}1/source_deletions_me/fifo
+TEST ! stat source_deletions_me/block
+TEST ! stat $B0/${V0}0/source_deletions_me/block
+TEST ! stat $B0/${V0}1/source_deletions_me/block
+TEST ! stat source_deletions_me/char
+TEST ! stat $B0/${V0}0/source_deletions_me/char
+TEST ! stat $B0/${V0}1/source_deletions_me/char
+TEST ! stat source_deletions_me/file
+TEST ! stat $B0/${V0}0/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat source_deletions_me/file
+TEST ! stat $B0/${V0}0/source_deletions_me/file
+TEST ! stat $B0/${V0}1/source_deletions_me/file
+TEST ! stat source_deletions_me/dir1/dir2
+TEST ! stat $B0/${V0}0/source_deletions_me/dir1/dir2
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1/dir2
+TEST ! stat source_deletions_me/dir1
+TEST ! stat $B0/${V0}0/source_deletions_me/dir1
+TEST ! stat $B0/${V0}1/source_deletions_me/dir1
+
+#Test if the files created as part of access are healed correctly
+r=$(get_file_type source_creations_me/fifo)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/fifo
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/fifo
+TEST [ -p source_creations_me/fifo ]
+
+r=$(get_file_type source_creations_me/block)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/block
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}1/source_creations_me/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}0/source_creations_me/block
+TEST [ -b source_creations_me/block ]
+
+r=$(get_file_type source_creations_me/char)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/char
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}1/source_creations_me/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}0/source_creations_me/char
+TEST [ -c source_creations_me/char ]
+
+r=$(get_file_type source_creations_me/file)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/file
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/file
+TEST [ -f source_creations_me/file ]
+
+r=$(get_file_type source_creations_me/slink)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/slink
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/slink
+TEST [ -h source_creations_me/slink ]
+
+r=$(get_file_type source_creations_me/dir1/dir2)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/dir1/dir2
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1/dir2
+TEST [ -d source_creations_me/dir1/dir2 ]
+
+r=$(get_file_type source_creations_me/dir1)
+EXPECT "$r" get_file_type $B0/${V0}0/source_creations_me/dir1
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_me/dir1
+TEST [ -d source_creations_me/dir1 ]
+
+#Trigger heal and check _heal dirs are healed properly
+#Trigger change in event generation number. That way inodes would get refreshed during lookup
+TEST kill_brick $V0 $H0 $B0/${V0}1
+$CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST stat spb_heal
+TEST stat spb_me_heal
+TEST stat fool_heal
+TEST stat fool_me
+TEST stat v1_fool_heal
+TEST stat v1_fool_me
+TEST stat source_deletions_heal
+TEST stat source_deletions_me
+TEST stat source_self_accusing
+TEST stat source_creations_heal
+TEST stat source_creations_me
+TEST stat v1_dirty_heal
+TEST stat v1_dirty_me
+TEST $CLI volume stop $V0
+TEST rm -rf $B0/${V0}{0,1}/.glusterfs/indices/xattrop/*
+
+$CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+rm -f $M0/FILE
+EXPECT "1" count_index_entries $B0/${V0}0
+EXPECT "1" count_index_entries $B0/${V0}1
+
+TEST $CLI volume stop $V0;
+
+#Create entries for fool_heal and fool_me to ensure they are fully healed and dirty xattrs erased, before triggering index heal
+create_brick_xattrop_entry $B0/${V0}0 fool_heal fool_me source_creations_heal/dir1
+
+$CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+$CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0;
+EXPECT_WITHIN $HEAL_TIMEOUT "~" print_pending_heals spb_heal spb_me_heal fool_heal fool_me v1_fool_heal v1_fool_me source_deletions_heal source_deletions_me source_creations_heal source_creations_me v1_dirty_heal v1_dirty_me source_self_accusing
+
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 spb_me_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 fool_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 fool_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_fool_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_fool_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_deletions_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_deletions_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_self_accusing
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_creations_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 source_creations_me
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_dirty_heal
+EXPECT "Y${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}${zero_xattr}" heal_status $B0/${V0}0 $B0/${V0}1 v1_dirty_me
+
+#Don't access the files/dirs from mount point as that may cause self-heals
+# Check if conservative merges happened correctly on heal dirs
+TEST stat $B0/${V0}0/spb_heal/1
+TEST stat $B0/${V0}1/spb_heal/1
+
+TEST stat $B0/${V0}0/spb_heal/0
+TEST stat $B0/${V0}1/spb_heal/0
+
+TEST stat $B0/${V0}0/fool_heal/1
+TEST stat $B0/${V0}1/fool_heal/1
+
+TEST stat $B0/${V0}0/fool_heal/0
+TEST stat $B0/${V0}1/fool_heal/0
+
+TEST stat $B0/${V0}0/v1_fool_heal/0
+TEST stat $B0/${V0}1/v1_fool_heal/0
+
+TEST stat $B0/${V0}0/v1_fool_heal/1
+TEST stat $B0/${V0}1/v1_fool_heal/1
+
+TEST stat $B0/${V0}0/v1_dirty_heal/0
+TEST stat $B0/${V0}1/v1_dirty_heal/0
+
+#Check if files that have gfid-mismatches in spb are giving EIO
+TEST ! stat spb/0
+
+#Check if stale files are deleted on access
+TEST ! stat $B0/${V0}0/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}1/source_deletions_heal/fifo
+TEST ! stat $B0/${V0}0/source_deletions_heal/block
+TEST ! stat $B0/${V0}1/source_deletions_heal/block
+TEST ! stat $B0/${V0}0/source_deletions_heal/char
+TEST ! stat $B0/${V0}1/source_deletions_heal/char
+TEST ! stat $B0/${V0}0/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}0/source_deletions_heal/file
+TEST ! stat $B0/${V0}1/source_deletions_heal/file
+TEST ! stat $B0/${V0}0/source_deletions_heal/dir1/dir2
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1/dir2
+TEST ! stat $B0/${V0}0/source_deletions_heal/dir1
+TEST ! stat $B0/${V0}1/source_deletions_heal/dir1
+
+#Check if stale files are deleted on access
+TEST ! stat $B0/${V0}0/source_self_accusing/fifo
+TEST ! stat $B0/${V0}1/source_self_accusing/fifo
+TEST ! stat $B0/${V0}0/source_self_accusing/block
+TEST ! stat $B0/${V0}1/source_self_accusing/block
+TEST ! stat $B0/${V0}0/source_self_accusing/char
+TEST ! stat $B0/${V0}1/source_self_accusing/char
+TEST ! stat $B0/${V0}0/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}0/source_self_accusing/file
+TEST ! stat $B0/${V0}1/source_self_accusing/file
+TEST ! stat $B0/${V0}0/source_self_accusing/dir1/dir2
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1/dir2
+TEST ! stat $B0/${V0}0/source_self_accusing/dir1
+TEST ! stat $B0/${V0}1/source_self_accusing/dir1
+
+#Test if the files created as part of full self-heal correctly
+r=$(get_file_type $B0/${V0}0/source_creations_heal/fifo)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/fifo
+TEST [ -p $B0/${V0}0/source_creations_heal/fifo ]
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}1/source_creations_heal/block
+EXPECT "^4 5$" stat -c "%t %T" $B0/${V0}0/source_creations_heal/block
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/block)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/block
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/char)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}1/source_creations_heal/char
+EXPECT "^1 5$" stat -c "%t %T" $B0/${V0}0/source_creations_heal/char
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/file)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/file
+TEST [ -f $B0/${V0}0/source_creations_heal/file ]
+
+r=$(get_file_type source_creations_heal/file $B0/${V0}0/slink)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/file slink
+TEST [ -h $B0/${V0}0/source_creations_heal/slink ]
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/dir1/dir2)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/dir1/dir2
+TEST [ -d $B0/${V0}0/source_creations_heal/dir1/dir2 ]
+
+r=$(get_file_type $B0/${V0}0/source_creations_heal/dir1)
+EXPECT "$r" get_file_type $B0/${V0}1/source_creations_heal/dir1
+TEST [ -d $B0/${V0}0/source_creations_heal/dir1 ]
+
+cd -
+
+#Anonymous directory shouldn't be created
+TEST mkdir $M0/rename-dir
+before_rename=$(STAT_INO $B0/${V0}1/rename-dir)
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/rename-dir $M0/new-name
+TEST $CLI volume start $V0 force
+#'spb' is in split-brain so pending-heal-count will be 2
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+after_rename=$(STAT_INO $B0/${V0}1/new-name)
+EXPECT "0" echo $(ls -a $B0/${V0}0/ | grep anonymous-inode | wc -l)
+EXPECT "0" echo $(ls -a $B0/${V0}1/ | grep anonymous-inode | wc -l)
+EXPECT_NOT "$before_rename" echo $after_rename
+cleanup
diff --git a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
index f4aa351e461..35e295dc170 100644
--- a/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
+++ b/tests/basic/afr/gfid-mismatch-resolution-with-fav-child-policy.t
@@ -168,8 +168,8 @@ TEST [ "$gfid_1" != "$gfid_2" ]
#We know that second brick has the bigger size file
BIGGER_FILE_MD5=$(md5sum $B0/${V0}1/f3 | cut -d\ -f1)
-TEST ls $M0/f3
-TEST cat $M0/f3
+TEST ls $M0 #Trigger entry heal via readdir inode refresh
+TEST cat $M0/f3 #Trigger data heal via readv inode refresh
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#gfid split-brain should be resolved
@@ -215,8 +215,8 @@ TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
-TEST ls $M0/f4
-TEST cat $M0/f4
+TEST ls $M0 #Trigger entry heal via readdir inode refresh
+TEST cat $M0/f4 #Trigger data heal via readv inode refresh
EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#gfid split-brain should be resolved
@@ -227,4 +227,3 @@ HEALED_MD5=$(md5sum $B0/${V0}2/f4 | cut -d\ -f1)
TEST [ "$MAJORITY_MD5" == "$HEALED_MD5" ]
cleanup;
-#G_TESTDEF_TEST_STATUS_NETBSD7=1501390
diff --git a/tests/basic/afr/gfid-self-heal.t b/tests/basic/afr/gfid-self-heal.t
index b54edbcae85..5a530681186 100644
--- a/tests/basic/afr/gfid-self-heal.t
+++ b/tests/basic/afr/gfid-self-heal.t
@@ -50,6 +50,10 @@ TEST kill_brick $V0 $H0 $B0/${V0}0
TEST touch $M0/a
gfid_1=$(gf_get_gfid_xattr $B0/${V0}1/a)
TEST touch $B0/${V0}0/a
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 1
$CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST stat $M0/a
@@ -62,6 +66,10 @@ TEST kill_brick $V0 $H0 $B0/${V0}0
TEST touch $M0/b
TEST mkdir $B0/${V0}0/b
TEST setfattr -x trusted.afr.$V0-client-0 $B0/${V0}1
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 1
$CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
TEST ! stat $M0/b
@@ -71,6 +79,10 @@ TEST "[[ -z \"$gfid_0\" ]]"
#Check gfid assigning doesn't happen when there is type mismatch
TEST touch $B0/${V0}1/c
TEST mkdir $B0/${V0}0/c
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 1
TEST ! stat $M0/c
gfid_1=$(gf_get_gfid_xattr $B0/${V0}1/c)
gfid_0=$(gf_get_gfid_xattr $B0/${V0}0/c)
@@ -81,6 +93,10 @@ TEST "[[ -z \"$gfid_0\" ]]"
# gfid split-brain
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST touch $B0/${V0}1/d
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 1
TEST ! stat $M0/d
gfid_1=$(gf_get_gfid_xattr $B0/${V0}1/d)
TEST "[[ -z \"$gfid_1\" ]]"
diff --git a/tests/basic/afr/halo.t b/tests/basic/afr/halo.t
new file mode 100644
index 00000000000..3f61f5a0402
--- /dev/null
+++ b/tests/basic/afr/halo.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+#Tests that halo basic functionality works as expected
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function get_up_child()
+{
+ if [ "1" == $(afr_private_key_value $V0 $M0 0 "child_up\[0\]") ];
+ then
+ echo 0
+ elif [ "1" == $(afr_private_key_value $V0 $M0 0 "child_up\[1\]") ]
+ then
+ echo 1
+ fi
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 cluster.halo-enabled yes
+TEST $CLI volume set $V0 cluster.halo-max-replicas 1
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[0\]"
+EXPECT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[1\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[0\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[1\]"
+
+up_id=$(get_up_child)
+TEST [[ ! -z "$up_id" ]]
+
+down_id=$((1-up_id))
+
+TEST kill_brick $V0 $H0 $B0/${V0}${up_id}
+#As max-replicas is configured to be 1, down_child should be up now
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "child_up\[${up_id}\]"
+EXPECT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[${up_id}\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[${down_id}\]"
+
+#Bring the brick back up and the state should be restored
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+
+up_id=$(get_up_child)
+TEST [[ ! -z "$up_id" ]]
+down_id=$((1-up_id))
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" afr_private_key_value $V0 $M0 0 "child_up\[${down_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "halo_child_up\[${up_id}\]"
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" afr_private_key_value $V0 $M0 0 "child_up\[${up_id}\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[0\]"
+EXPECT_NOT "^-1$" afr_private_key_value $V0 $M0 0 "child_latency\[1\]"
+
+cleanup;
diff --git a/tests/basic/afr/rename-data-loss.t b/tests/basic/afr/rename-data-loss.t
new file mode 100644
index 00000000000..256ee2aafce
--- /dev/null
+++ b/tests/basic/afr/rename-data-loss.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+#Self-heal tests
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 write-behind off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+cd $M0
+TEST `echo "line1" >> file1`
+TEST mkdir dir1
+TEST mkdir dir2
+TEST mkdir -p dir1/dira/dirb
+TEST `echo "line1">>dir1/dira/dirb/file1`
+TEST mkdir delete_me
+TEST `echo "line1" >> delete_me/file1`
+
+#brick0 has witnessed the second write while brick1 is down.
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST `echo "line2" >> file1`
+TEST `echo "line2" >> dir1/dira/dirb/file1`
+TEST `echo "line2" >> delete_me/file1`
+
+#Toggle the bricks that are up/down.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/brick0
+
+#Rename when the 'source' brick0 for data-selfheals is down.
+mv file1 file2
+mv dir1/dira dir2
+
+#Delete a dir when brick0 is down.
+rm -rf delete_me
+cd -
+
+#Bring everything up and trigger heal
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/brick0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_anon_entry_count $B0/brick1
+
+#Remount to avoid reading from caches
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+EXPECT "line2" tail -1 $M0/file2
+EXPECT "line2" tail -1 $M0/dir2/dira/dirb/file1
+TEST ! stat $M0/delete_me/file1
+TEST ! stat $M0/delete_me
+
+anon_inode_name=$(ls -a $B0/brick0 | grep glusterfs-anonymous-inode)
+TEST [[ -d $B0/brick0/$anon_inode_name ]]
+TEST [[ -d $B0/brick1/$anon_inode_name ]]
+cleanup
diff --git a/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t b/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t
new file mode 100644
index 00000000000..7c249c4bcbd
--- /dev/null
+++ b/tests/basic/afr/split-brain-favorite-child-policy-client-side-healing.t
@@ -0,0 +1,124 @@
+#!/bin/bash
+
+#Test the client side split-brain resolution
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+
+count_files () {
+ ls $1 | wc -l
+}
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+TEST $CLI volume set $V0 cluster.quorum-count 1
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST mkdir $M0/data
+TEST touch $M0/data/file
+
+
+############ Client side healing using favorite-child-policy = mtime #################
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/data/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/data/file bs=1024 count=1024
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+mtime1=$(get_mtime $B0/${V0}0/data/file)
+mtime2=$(get_mtime $B0/${V0}1/data/file)
+if (( $(echo "$mtime1 > $mtime2" | bc -l) )); then
+ LATEST_MTIME_MD5=$(md5sum $B0/${V0}0/data/file | cut -d\ -f1)
+else
+ LATEST_MTIME_MD5=$(md5sum $B0/${V0}1/data/file | cut -d\ -f1)
+fi
+
+#file will be in split-brain
+cat $M0/data/file > /dev/null
+EXPECT "1" echo $?
+
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" afr_get_split_brain_count $V0
+cat $M0/data/file > /dev/null
+EXPECT "0" echo $?
+M0_MD5=$(md5sum $M0/data/file | cut -d\ -f1)
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_get_split_brain_count $V0
+TEST [ "$LATEST_MTIME_MD5" == "$M0_MD5" ]
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_MD5=$(md5sum $B0/${V0}0/data/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/data/file | cut -d\ -f1)
+TEST [ "$LATEST_MTIME_MD5" == "$B0_MD5" ]
+TEST [ "$LATEST_MTIME_MD5" == "$B1_MD5" ]
+
+############ Client side directory conservative merge #################
+TEST $CLI volume reset $V0 cluster.favorite-child-policy
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/data/test
+files=$(count_files $M0/data)
+EXPECT "2" echo $files
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/data/test1
+files=$(count_files $M0/data)
+EXPECT "2" echo $files
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#data dir will be in entry split-brain
+ls $M0/data > /dev/null
+EXPECT "2" echo $?
+
+TEST $CLI volume set $V0 cluster.favorite-child-policy mtime
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" afr_get_split_brain_count $V0
+
+
+ls $M0/data > /dev/null
+EXPECT "0" echo $?
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" afr_get_split_brain_count $V0
+#Entry Split-brain is gone, but data self-heal is pending on the files
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+cat $M0/data/test > /dev/null
+cat $M0/data/test1 > /dev/null
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+files=$(count_files $M0/data)
+EXPECT "3" echo $files
+
+TEST force_umount $M0
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+
+cleanup
diff --git a/tests/basic/afr/split-brain-heal-info.t b/tests/basic/afr/split-brain-heal-info.t
index 66275c57207..2e4742fff08 100644
--- a/tests/basic/afr/split-brain-heal-info.t
+++ b/tests/basic/afr/split-brain-heal-info.t
@@ -47,9 +47,11 @@ SPB_FILES=$(($SPB_FILES + 1))
#### Simulate entry-split-brain
TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
TEST touch $M0/espb/a
volume_start_force $V0
TEST kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
TEST mkdir $M0/espb/a
volume_start_force $V0
SPB_FILES=$(($SPB_FILES + 1))
diff --git a/tests/basic/afr/split-brain-healing-ctime.t b/tests/basic/afr/split-brain-healing-ctime.t
new file mode 100644
index 00000000000..676788fce3f
--- /dev/null
+++ b/tests/basic/afr/split-brain-healing-ctime.t
@@ -0,0 +1,252 @@
+#!/bin/bash
+
+#Test the split-brain resolution CLI commands.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function get_replicate_subvol_number {
+ local filename=$1
+ #get_backend_paths
+ if [ -f $B0/${V0}1/$filename ]
+ then
+ echo 0
+ elif [ -f $B0/${V0}3/$filename ]
+ then echo 1
+ else
+ echo -1
+ fi
+}
+
+cleanup;
+
+AREQUAL_PATH=$(dirname $0)/../../utils
+GET_MDATA_PATH=$(dirname $0)/../../utils
+CFLAGS=""
+test "`uname -s`" != "Linux" && {
+ CFLAGS="$CFLAGS -lintl";
+}
+build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+cd $M0
+for i in {1..10}
+do
+ echo "Initial content">>file$i
+done
+
+replica_0_files_list=(`ls $B0/${V0}1|grep -v '^\.'`)
+replica_1_files_list=(`ls $B0/${V0}3|grep -v '^\.'`)
+
+############ Create data split-brain in the files. ###########################
+TEST kill_brick $V0 $H0 $B0/${V0}1
+for file in ${!replica_0_files_list[*]}
+do
+ echo "B1 is down">>${replica_0_files_list[$file]}
+done
+TEST kill_brick $V0 $H0 $B0/${V0}3
+for file in ${!replica_1_files_list[*]}
+do
+ echo "B3 is down">>${replica_1_files_list[$file]}
+done
+
+SMALLER_FILE_SIZE=$(stat -c %s file1)
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+for file in ${!replica_0_files_list[*]}
+do
+ echo "B2 is down">>${replica_0_files_list[$file]}
+ echo "appending more content to make it the bigger file">>${replica_0_files_list[$file]}
+done
+TEST kill_brick $V0 $H0 $B0/${V0}4
+for file in ${!replica_1_files_list[*]}
+do
+ echo "B4 is down">>${replica_1_files_list[$file]}
+ echo "appending more content to make it the bigger file">>${replica_1_files_list[$file]}
+done
+
+BIGGER_FILE_SIZE=$(stat -c %s file1)
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+
+############### Acessing the files should now give EIO. ###############################
+TEST ! cat file1
+TEST ! cat file2
+TEST ! cat file3
+TEST ! cat file4
+TEST ! cat file5
+TEST ! cat file6
+TEST ! cat file7
+TEST ! cat file8
+TEST ! cat file9
+TEST ! cat file10
+###################
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+
+################ Heal file1 using the bigger-file option ##############
+$CLI volume heal $V0 split-brain bigger-file /file1
+EXPECT "0" echo $?
+EXPECT $BIGGER_FILE_SIZE stat -c %s file1
+
+################ Heal file2 using the bigger-file option and its gfid ##############
+subvolume=$(get_replicate_subvol_number file2)
+if [ $subvolume == 0 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file2)
+elif [ $subvolume == 1 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file2)
+fi
+GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+$CLI volume heal $V0 split-brain bigger-file $GFIDSTR
+EXPECT "0" echo $?
+
+################ Heal file3 using the source-brick option ##############
+################ Use the brick having smaller file size as source #######
+subvolume=$(get_replicate_subvol_number file3)
+if [ $subvolume == 0 ]
+then
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 /file3
+elif [ $subvolume == 1 ]
+then
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 /file3
+fi
+EXPECT "0" echo $?
+EXPECT $SMALLER_FILE_SIZE stat -c %s file3
+
+################ Heal file4 using the source-brick option and it's gfid ##############
+################ Use the brick having smaller file size as source #######
+subvolume=$(get_replicate_subvol_number file4)
+if [ $subvolume == 0 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file4)
+ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 $GFIDSTR
+elif [ $subvolume == 1 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file4)
+ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 $GFIDSTR
+fi
+EXPECT "0" echo $?
+EXPECT $SMALLER_FILE_SIZE stat -c %s file4
+
+# With ctime enabled, the ctime xattr ("trusted.glusterfs.mdata") gets healed
+# as part of metadata heal. So mtime would be same, hence it can't be healed
+# using 'latest-mtime' policy, use 'source-brick' option instead.
+################ Heal file5 using the source-brick option ##############
+subvolume=$(get_replicate_subvol_number file5)
+if [ $subvolume == 0 ]
+then
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /file5
+elif [ $subvolume == 1 ]
+then
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3 /file5
+fi
+EXPECT "0" echo $?
+
+if [ $subvolume == 0 ]
+then
+ mtime1_after_heal=$(get_mtime $B0/${V0}1/file5)
+ mtime2_after_heal=$(get_mtime $B0/${V0}2/file5)
+elif [ $subvolume == 1 ]
+then
+ mtime1_after_heal=$(get_mtime $B0/${V0}3/file5)
+ mtime2_after_heal=$(get_mtime $B0/${V0}4/file5)
+fi
+
+#TODO: To below comparisons on full sub-second resolution
+
+TEST [ $mtime1_after_heal -eq $mtime2_after_heal ]
+
+mtime_mount_after_heal=$(stat -c %Y file5)
+
+TEST [ $mtime1_after_heal -eq $mtime_mount_after_heal ]
+
+################ Heal file6 using the source-brick option and its gfid ##############
+subvolume=$(get_replicate_subvol_number file6)
+if [ $subvolume == 0 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}1/file6)
+ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 $GFIDSTR
+elif [ $subvolume == 1 ]
+then
+ GFID=$(gf_get_gfid_xattr $B0/${V0}3/file6)
+ GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
+ $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3 $GFIDSTR
+fi
+EXPECT "0" echo $?
+
+if [ $subvolume == 0 ]
+then
+ mtime1_after_heal=$(get_mtime $B0/${V0}1/file6)
+ mtime2_after_heal=$(get_mtime $B0/${V0}2/file6)
+elif [ $subvolume == 1 ]
+then
+ mtime1_after_heal=$(get_mtime $B0/${V0}3/file6)
+ mtime2_after_heal=$(get_mtime $B0/${V0}4/file6)
+fi
+
+#TODO: To below comparisons on full sub-second resolution
+
+TEST [ $mtime1_after_heal -eq $mtime2_after_heal ]
+
+mtime_mount_after_heal=$(stat -c %Y file6)
+
+TEST [ $mtime1_after_heal -eq $mtime_mount_after_heal ]
+
+################ Heal remaining SB'ed files of replica_0 using B1 as source ##############
+$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1
+EXPECT "0" echo $?
+
+################ Heal remaining SB'ed files of replica_1 using B3 as source ##############
+$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}3
+EXPECT "0" echo $?
+
+############### Reading the files should now succeed. ###############################
+TEST cat file1
+TEST cat file2
+TEST cat file3
+TEST cat file4
+TEST cat file5
+TEST cat file6
+TEST cat file7
+TEST cat file8
+TEST cat file9
+TEST cat file10
+
+################ File contents on the bricks must be same. ################################
+TEST diff <(arequal-checksum -p $B0/$V01 -i .glusterfs) <(arequal-checksum -p $B0/$V02 -i .glusterfs)
+TEST diff <(arequal-checksum -p $B0/$V03 -i .glusterfs) <(arequal-checksum -p $B0/$V04 -i .glusterfs)
+
+############### Trying to heal files not in SB should fail. ###############################
+$CLI volume heal $V0 split-brain bigger-file /file1
+EXPECT "1" echo $?
+$CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}4 /file3
+EXPECT "1" echo $?
+
+cd -
+TEST rm $AREQUAL_PATH/arequal-checksum
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+cleanup
diff --git a/tests/basic/afr/split-brain-healing.t b/tests/basic/afr/split-brain-healing.t
index c80f900b909..315e815eb7e 100644
--- a/tests/basic/afr/split-brain-healing.t
+++ b/tests/basic/afr/split-brain-healing.t
@@ -20,11 +20,14 @@ function get_replicate_subvol_number {
cleanup;
AREQUAL_PATH=$(dirname $0)/../../utils
+GET_MDATA_PATH=$(dirname $0)/../../utils
CFLAGS=""
test "`uname -s`" != "Linux" && {
CFLAGS="$CFLAGS -lintl";
}
build_tester $AREQUAL_PATH/arequal-checksum.c $CFLAGS
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4}
@@ -32,6 +35,7 @@ TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume set $V0 cluster.data-self-heal off
TEST $CLI volume set $V0 cluster.metadata-self-heal off
TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 ctime off
TEST $CLI volume start $V0
TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
@@ -152,13 +156,13 @@ EXPECT $SMALLER_FILE_SIZE stat -c %s file4
subvolume=$(get_replicate_subvol_number file5)
if [ $subvolume == 0 ]
then
- mtime1=$(stat -c %Y $B0/${V0}1/file5)
- mtime2=$(stat -c %Y $B0/${V0}2/file5)
+ mtime1=$(get_mtime $B0/${V0}1/file5)
+ mtime2=$(get_mtime $B0/${V0}2/file5)
LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2))
elif [ $subvolume == 1 ]
then
- mtime1=$(stat -c %Y $B0/${V0}3/file5)
- mtime2=$(stat -c %Y $B0/${V0}4/file5)
+ mtime1=$(get_mtime $B0/${V0}3/file5)
+ mtime2=$(get_mtime $B0/${V0}4/file5)
LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2))
fi
$CLI volume heal $V0 split-brain latest-mtime /file5
@@ -166,12 +170,12 @@ EXPECT "0" echo $?
if [ $subvolume == 0 ]
then
- mtime1_after_heal=$(stat -c %Y $B0/${V0}1/file5)
- mtime2_after_heal=$(stat -c %Y $B0/${V0}2/file5)
+ mtime1_after_heal=$(get_mtime $B0/${V0}1/file5)
+ mtime2_after_heal=$(get_mtime $B0/${V0}2/file5)
elif [ $subvolume == 1 ]
then
- mtime1_after_heal=$(stat -c %Y $B0/${V0}3/file5)
- mtime2_after_heal=$(stat -c %Y $B0/${V0}4/file5)
+ mtime1_after_heal=$(get_mtime $B0/${V0}3/file5)
+ mtime2_after_heal=$(get_mtime $B0/${V0}4/file5)
fi
#TODO: To below comparisons on full sub-second resolution
@@ -188,14 +192,14 @@ subvolume=$(get_replicate_subvol_number file6)
if [ $subvolume == 0 ]
then
GFID=$(gf_get_gfid_xattr $B0/${V0}1/file6)
- mtime1=$(stat -c %Y $B0/${V0}1/file6)
- mtime2=$(stat -c %Y $B0/${V0}2/file6)
+ mtime1=$(get_mtime $B0/${V0}1/file6)
+ mtime2=$(get_mtime $B0/${V0}2/file6)
LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2))
elif [ $subvolume == 1 ]
then
GFID=$(gf_get_gfid_xattr $B0/${V0}3/file6)
- mtime1=$(stat -c %Y $B0/${V0}3/file6)
- mtime2=$(stat -c %Y $B0/${V0}4/file6)
+ mtime1=$(get_mtime $B0/${V0}3/file6)
+ mtime2=$(get_mtime $B0/${V0}4/file6)
LATEST_MTIME=$(($mtime1 > $mtime2 ? $mtime1:$mtime2))
fi
GFIDSTR="gfid:$(gf_gfid_xattr_to_str $GFID)"
@@ -204,12 +208,12 @@ EXPECT "0" echo $?
if [ $subvolume == 0 ]
then
- mtime1_after_heal=$(stat -c %Y $B0/${V0}1/file6)
- mtime2_after_heal=$(stat -c %Y $B0/${V0}2/file6)
+ mtime1_after_heal=$(get_mtime $B0/${V0}1/file6)
+ mtime2_after_heal=$(get_mtime $B0/${V0}2/file6)
elif [ $subvolume == 1 ]
then
- mtime1_after_heal=$(stat -c %Y $B0/${V0}3/file6)
- mtime2_after_heal=$(stat -c %Y $B0/${V0}4/file6)
+ mtime1_after_heal=$(get_mtime $B0/${V0}3/file6)
+ mtime2_after_heal=$(get_mtime $B0/${V0}4/file6)
fi
#TODO: To below comparisons on full sub-second resolution
@@ -253,4 +257,5 @@ EXPECT "1" echo $?
cd -
TEST rm $AREQUAL_PATH/arequal-checksum
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
cleanup
diff --git a/tests/basic/afr/split-brain-resolution.t b/tests/basic/afr/split-brain-resolution.t
index a88c47de7a7..834237c96ec 100644
--- a/tests/basic/afr/split-brain-resolution.t
+++ b/tests/basic/afr/split-brain-resolution.t
@@ -74,6 +74,18 @@ TEST setfattr -n replica.split-brain-choice -v none $M0/data-split-brain.txt
TEST ! getfattr -n user.test $M0/metadata-split-brain.txt
TEST ! cat $M0/data-split-brain.txt
+#Check that after timeout fops result in EIO again.
+#Set one minute timeout
+TEST setfattr -n replica.split-brain-choice-timeout -v 1 $M0/
+TEST setfattr -n replica.split-brain-choice -v $V0-client-1 $M0/data-split-brain.txt
+EXPECT "brick1_alive" cat $M0/data-split-brain.txt
+TEST setfattr -n replica.split-brain-choice -v $V0-client-0 $M0/metadata-split-brain.txt
+EXPECT "brick0" get_text_xattr user.test $M0/metadata-split-brain.txt
+#Wait until timeout completes and test that the fops fail again
+sleep 62
+TEST ! getfattr -n user.test $M0/metadata-split-brain.txt
+TEST ! cat $M0/data-split-brain.txt
+
#Negative test cases should fail
TEST ! setfattr -n replica.split-brain-choice -v $V0-client-4 $M0/data-split-brain.txt
TEST ! setfattr -n replica.split-brain-heal-finalize -v $V0-client-4 $M0/metadata-split-brain.txt
diff --git a/tests/basic/afr/ta-read.t b/tests/basic/afr/ta-read.t
index f2b3c38e06c..3cfc16b9b8a 100644
--- a/tests/basic/afr/ta-read.t
+++ b/tests/basic/afr/ta-read.t
@@ -25,31 +25,35 @@ TEST ! ls $B0/ta/FILE
# Kill one brick and write to FILE.
TEST ta_kill_brick brick0
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 0
echo "brick0 down">> $M0/FILE
TEST [ $? -eq 0 ]
EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/brick1/FILE
EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.patchy-ta-2
#Umount and mount to remove cached data.
-TEST umount $M0
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST ta_start_mount_process $M0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 1
# Read must be allowed since good brick is up.
TEST cat $M0/FILE
+#Umount and mount to remove cached data.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ta_start_mount_process $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
# Toggle good and bad data brick processes.
TEST ta_start_brick_process brick0
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 0
TEST ta_kill_brick brick1
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 1
# Read must now fail.
TEST ! cat $M0/FILE
# Bring all data bricks up, and kill TA.
TEST ta_start_brick_process brick1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 1
TA_PID=$(ta_get_pid_by_brick_name ta)
TEST [ -n $TA_PID ]
TEST ta_kill_brick ta
diff --git a/tests/basic/afr/ta-shd.t b/tests/basic/afr/ta-shd.t
index bb2e58b3f77..96ecfc678e0 100644
--- a/tests/basic/afr/ta-shd.t
+++ b/tests/basic/afr/ta-shd.t
@@ -22,7 +22,7 @@ TEST ta_start_shd_process glustershd
TEST touch $M0/a.txt
TEST ta_kill_brick brick0
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 0
echo "Hello" >> $M0/a.txt
EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/brick1/a.txt
EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.$V0-ta-2
@@ -33,14 +33,14 @@ EXPECT "000000010000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/
#the SHD process.
TEST ta_start_brick_process brick0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 0
EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/brick1/a.txt
EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/ta/trusted.afr.$V0-ta-2
#Kill the previously up brick and try reading from other brick. Since the heal
#has happened file content should be same.
TEST ta_kill_brick brick1
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 1
#Umount and mount to remove cached data.
TEST umount $M0
TEST ta_start_mount_process $M0
diff --git a/tests/basic/afr/ta-write-on-bad-brick.t b/tests/basic/afr/ta-write-on-bad-brick.t
index 18cb65b3a76..096ca9f47cf 100644
--- a/tests/basic/afr/ta-write-on-bad-brick.t
+++ b/tests/basic/afr/ta-write-on-bad-brick.t
@@ -26,26 +26,26 @@ TEST dd if=/dev/zero of=$M0/a.txt bs=1M count=5
#Good Data brick is down. TA and bad brick are UP
TEST ta_kill_brick brick1
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 1
TEST dd if=/dev/zero of=$M0/a.txt bs=1M count=5
TEST ta_kill_brick brick0
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 0
TEST ta_start_brick_process brick1
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 1
TEST ! dd if=/dev/zero of=$M0/a.txt bs=1M count=5
# Good Data brick is UP. Bad and TA are down
TEST ta_kill_brick brick1
-EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" ta_mount_child_up_status $M0 $V0 1
TEST ta_start_brick_process brick0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 0
TEST ta_kill_brick ta
TEST ! dd if=/dev/zero of=$M0/a.txt bs=1M count=5
# Good and Bad data bricks are UP. TA is down
TEST ta_start_brick_process brick1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_mount_child_up_status $M0 $V0 0
TEST dd if=/dev/zero of=$M0/a.txt bs=1M count=5
cleanup;
diff --git a/tests/basic/changelog/changelog-api.t b/tests/basic/changelog/changelog-api.t
new file mode 100644
index 00000000000..516c2f2f60d
--- /dev/null
+++ b/tests/basic/changelog/changelog-api.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../env.rc
+
+cleanup;
+
+CHANGELOG_BIN_PATH=$(dirname $0)/../../utils/changelog
+build_tester $CHANGELOG_BIN_PATH/test-changelog-api.c -lgfchangelog
+
+CHANGELOG_PATH_0="$B0/${V0}0/.glusterfs/changelogs"
+ROLLOVER_TIME=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+sleep 3;
+
+#Listen to changelog journal notifcations
+$CHANGELOG_BIN_PATH/test-changelog-api &
+for i in {1..12};do echo "data" > $M0/file$i 2>/dev/null; sleep 1;done &
+
+#Wait for changelogs to be in .processed directory
+sleep 12
+
+EXPECT "Y" processed_changelogs "/tmp/scratch_v1/.processed"
+TEST rm $CHANGELOG_BIN_PATH/test-changelog-api
+rm -rf /tmp/scratch_v1
+
+cleanup;
diff --git a/tests/basic/changelog/changelog-history.t b/tests/basic/changelog/changelog-history.t
index 3ce40981c90..ea952619652 100644
--- a/tests/basic/changelog/changelog-history.t
+++ b/tests/basic/changelog/changelog-history.t
@@ -5,6 +5,7 @@
cleanup;
+SCRIPT_TIMEOUT=300
HISTORY_BIN_PATH=$(dirname $0)/../../utils/changelog
build_tester $HISTORY_BIN_PATH/get-history.c -lgfchangelog
@@ -68,19 +69,23 @@ TEST $CLI volume set $V0 changelog.changelog off
sleep 3
time_after_disable=$(date '+%s')
+TEST $CLI volume set $V0 changelog.changelog on
+sleep 5
+
#Passes, gives the changelogs till continuous changelogs are available
# but returns 1
-EXPECT "1" $HISTORY_BIN_PATH/get-history $time_after_enable1 $time_in_sec_htime2
+EXPECT_WITHIN 10 "1" $HISTORY_BIN_PATH/get-history $time_after_enable1 $time_in_sec_htime2
#Fails as start falls between htime files
-EXPECT "-3" $HISTORY_BIN_PATH/get-history $time_between_htime $time_in_sec_htime1
+EXPECT_WITHIN 10 "-3" $HISTORY_BIN_PATH/get-history $time_between_htime $time_in_sec_htime1
#Passes as start and end falls in same htime file
-EXPECT "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime1 $time_in_sec_htime2
+EXPECT_WITHIN 10 "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime1 $time_in_sec_htime2
#Passes, gives the changelogs till continuous changelogs are available
-EXPECT "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime2 $time_after_disable
+EXPECT_WITHIN 10 "0" $HISTORY_BIN_PATH/get-history $time_in_sec_htime2 $time_after_disable
TEST rm $HISTORY_BIN_PATH/get-history
+rm -rf /tmp/scratch_v1/*
cleanup;
diff --git a/tests/basic/changelog/history-api.t b/tests/basic/changelog/history-api.t
new file mode 100644
index 00000000000..9e63118cef9
--- /dev/null
+++ b/tests/basic/changelog/history-api.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../env.rc
+
+cleanup;
+
+HISTORY_BIN_PATH=$(dirname $0)/../../utils/changelog
+build_tester $HISTORY_BIN_PATH/test-history-api.c -lgfchangelog
+
+CHANGELOG_PATH_0="$B0/${V0}0/.glusterfs/changelogs"
+ROLLOVER_TIME=2
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+
+sleep 3
+start=$(date '+%s')
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+touch $M0/file{1..10}
+
+for i in {1..12};do echo "data" > $M0/file$i; sleep 1;done
+end=$(date '+%s')
+sleep 2
+
+#Passes as start and end falls in same htime file
+EXPECT "0" $HISTORY_BIN_PATH/test-history-api $start $end
+
+#Wait for changelogs to be in .processed directory
+sleep 2
+
+EXPECT "Y" processed_changelogs "/tmp/scratch_v1/.history/.processed"
+TEST rm $HISTORY_BIN_PATH/test-history-api
+rm -rf /tmp/scratch_v1
+
+cleanup;
diff --git a/tests/basic/cloudsync-sanity.t b/tests/basic/cloudsync-sanity.t
index 3cf719da011..834ba96430c 100644
--- a/tests/basic/cloudsync-sanity.t
+++ b/tests/basic/cloudsync-sanity.t
@@ -19,4 +19,11 @@ TEST $GFS -s $H0 --volfile-id $V0 $M1;
# create operations
TEST $(dirname $0)/rpc-coverage.sh $M1
+
+TEST cp $(dirname ${0})/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+./glfsxmp $V0 $H0
+cleanup_tester ./glfsxmp
+rm ./glfsxmp.c
+
cleanup;
diff --git a/tests/basic/ctime/ctime-ec-heal.t b/tests/basic/ctime/ctime-ec-heal.t
new file mode 100644
index 00000000000..142237c5014
--- /dev/null
+++ b/tests/basic/ctime/ctime-ec-heal.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# This will test self healing of ctime xattr 'trusted.glusterfs.mdata'
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+#cleate and start volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{1..3}
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+
+# Create files
+mkdir $M0/dir1
+echo "Initial content" > $M0/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+# Kill brick
+TEST kill_brick $V0 $H0 $B0/${V0}3
+
+echo "B3 is down" >> $M0/file1
+echo "Change dir1 time attributes" > $M0/dir1/dir1_file1
+echo "Entry heal file" > $M0/entry_heal_file1
+mkdir $M0/entry_heal_dir1
+
+# Check xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1
+
+TEST $CLI volume start $V0 force
+$CLI volume heal $V0
+
+# Check xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-ec-rebalance.t b/tests/basic/ctime/ctime-ec-rebalance.t
new file mode 100644
index 00000000000..2b73bcdd103
--- /dev/null
+++ b/tests/basic/ctime/ctime-ec-rebalance.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# This will test healing of ctime xattr 'trusted.glusterfs.mdata' after add-brick and rebalance
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+#cleate and start volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+
+# Create files
+mkdir $M0/dir1
+echo "test data" > $M0/dir1/file1
+
+# Add brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{6..8}
+
+#Trigger rebalance
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Verify ctime xattr heal on directory
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}6/dir1"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}7/dir1"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}8/dir1"
+
+b6_mdata=$(get_mdata "$B0/${V0}6/dir1")
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}7/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}8/dir1
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-mdata-legacy-files.t b/tests/basic/ctime/ctime-mdata-legacy-files.t
new file mode 100644
index 00000000000..2e782d5c99d
--- /dev/null
+++ b/tests/basic/ctime/ctime-mdata-legacy-files.t
@@ -0,0 +1,83 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+###############################################################################
+#Replica volume
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#Disable ctime and create file, file doesn't contain "trusted.glusterfs.mdata" xattr
+TEST $CLI volume set $V0 ctime off
+
+TEST "mkdir $M0/DIR"
+TEST "echo hello_world > $M0/DIR/FILE"
+
+#Verify absence of xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR/FILE"
+
+#Enable ctime
+TEST $CLI volume set $V0 ctime on
+sleep 3
+TEST stat $M0/DIR/FILE
+
+#Verify presence "trusted.glusterfs.mdata" xattr on backend
+#The lookup above should have created xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}0/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}1/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}2/DIR/FILE"
+
+###############################################################################
+#Disperse Volume
+
+TEST $CLI volume create $V1 disperse 3 redundancy 1 $H0:$B0/${V1}{0,1,2}
+TEST $CLI volume set $V1 performance.stat-prefetch off
+TEST $CLI volume start $V1
+
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 --entry-timeout=0 $M1;
+
+#Disable ctime and create file, file doesn't contain "trusted.glusterfs.mdata" xattr
+TEST $CLI volume set $V1 ctime off
+TEST "mkdir $M1/DIR"
+TEST "echo hello_world > $M1/DIR/FILE"
+
+#Verify absence of xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "" check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR/FILE"
+
+#Enable ctime
+TEST $CLI volume set $V1 ctime on
+sleep 3
+TEST stat $M1/DIR/FILE
+
+#Verify presence "trusted.glusterfs.mdata" xattr on backend
+#The lookup above should have created xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}0/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}1/DIR/FILE"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V1}2/DIR/FILE"
+
+cleanup;
+###############################################################################
diff --git a/tests/basic/ctime/ctime-readdir.c b/tests/basic/ctime/ctime-readdir.c
new file mode 100644
index 00000000000..8760db29ae8
--- /dev/null
+++ b/tests/basic/ctime/ctime-readdir.c
@@ -0,0 +1,29 @@
+#include <stdio.h>
+#include <dirent.h>
+#include <string.h>
+#include <assert.h>
+
+int
+main(int argc, char **argv)
+{
+ DIR *dir = NULL;
+ struct dirent *entry = NULL;
+ int ret = 0;
+ char *path = NULL;
+
+ assert(argc == 2);
+ path = argv[1];
+
+ dir = opendir(path);
+ if (!dir) {
+ printf("opendir(%s) failed.\n", path);
+ return -1;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ }
+ if (dir)
+ closedir(dir);
+
+ return ret;
+}
diff --git a/tests/basic/ctime/ctime-readdir.t b/tests/basic/ctime/ctime-readdir.t
new file mode 100644
index 00000000000..4564fc1b667
--- /dev/null
+++ b/tests/basic/ctime/ctime-readdir.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{1,2,3};
+TEST $CLI volume set $V0 performance.stat-prefetch on
+TEST $CLI volume set $V0 performance.readdir-ahead off
+TEST $CLI volume start $V0;
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+TEST mkdir $M0/dir0
+TEST "echo hello_world > $M0/dir0/FILE"
+
+ctime1=$(stat -c %Z $M0/dir0/FILE)
+echo "Mount change time: $ctime1"
+
+sleep 2
+
+#Write to back end directly to modify ctime of backend file
+TEST "echo write_from_backend >> $B0/brick1/dir0/FILE"
+TEST "echo write_from_backend >> $B0/brick2/dir0/FILE"
+TEST "echo write_from_backend >> $B0/brick3/dir0/FILE"
+echo "Backend change time"
+echo "brick1: $(stat -c %Z $B0/brick1/dir0/FILE)"
+echo "brick2: $(stat -c %Z $B0/brick2/dir0/FILE)"
+echo "brick3: $(stat -c %Z $B0/brick3/dir0/FILE)"
+
+#Stop and start to hit the case of no inode for readdir
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+TEST build_tester $(dirname $0)/ctime-readdir.c
+
+#Do readdir
+TEST ./$(dirname $0)/ctime-readdir $M0/dir0
+
+EXPECT "$ctime1" stat -c %Z $M0/dir0/FILE
+echo "Mount change time after readdir $(stat -c %Z $M0/dir0/FILE)"
+
+cleanup_tester $(dirname $0)/ctime-readdir
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-rep-heal.t b/tests/basic/ctime/ctime-rep-heal.t
new file mode 100644
index 00000000000..20517c74971
--- /dev/null
+++ b/tests/basic/ctime/ctime-rep-heal.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# This will test self healing of ctime xattr 'trusted.glusterfs.mdata'
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+#cleate and start volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+
+# Create files
+mkdir $M0/dir1
+echo "Initial content" > $M0/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+# Kill brick
+TEST kill_brick $V0 $H0 $B0/${V0}3
+
+echo "B3 is down" >> $M0/file1
+echo "Change dir1 time attributes" > $M0/dir1/dir1_file1
+echo "Entry heal file" > $M0/entry_heal_file1
+mkdir $M0/entry_heal_dir1
+
+# Check xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '2' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1
+
+TEST $CLI volume start $V0 force
+$CLI volume heal $V0
+
+# Check xattr
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/dir1/dir1_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/dir1/dir1_file1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_file1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '3' get_mdata_count $B0/${V0}{1..3}/entry_heal_dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '1' get_mdata_uniq_count $B0/${V0}{1..3}/entry_heal_dir1
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-rep-rebalance.t b/tests/basic/ctime/ctime-rep-rebalance.t
new file mode 100644
index 00000000000..866cf87e6cb
--- /dev/null
+++ b/tests/basic/ctime/ctime-rep-rebalance.t
@@ -0,0 +1,41 @@
+#!/bin/bash
+#
+# This will test healing of ctime xattr 'trusted.glusterfs.mdata' after add-brick and rebalance
+#
+###
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+#cleate and start volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+
+#Mount the volume
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+
+# Create files
+mkdir $M0/dir1
+
+# Add brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{6..8}
+
+#Trigger rebalance
+TEST $CLI volume rebalance $V0 start force
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
+
+#Verify ctime xattr heal on directory
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}6/dir1"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}7/dir1"
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.glusterfs.mdata' check_for_xattr 'trusted.glusterfs.mdata' "$B0/${V0}8/dir1"
+
+b6_mdata=$(get_mdata "$B0/${V0}6/dir1")
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}7/dir1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "${b6_mdata}" get_mdata $B0/${V0}8/dir1
+
+cleanup;
diff --git a/tests/basic/ctime/ctime-utimesat.t b/tests/basic/ctime/ctime-utimesat.t
new file mode 100644
index 00000000000..540e57aec83
--- /dev/null
+++ b/tests/basic/ctime/ctime-utimesat.t
@@ -0,0 +1,28 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.read-after-open off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+touch $M0/FILE
+
+atime=$(stat -c "%.X" $M0/FILE)
+EXPECT $atime stat -c "%.Y" $M0/FILE
+EXPECT $atime stat -c "%.Z" $M0/FILE
+
+cleanup
diff --git a/tests/basic/distribute/brick-down.t b/tests/basic/distribute/brick-down.t
new file mode 100644
index 00000000000..522ccc07210
--- /dev/null
+++ b/tests/basic/distribute/brick-down.t
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../common-utils.rc
+. $(dirname $0)/../../dht.rc
+
+# Test 1 overview:
+# ----------------
+# Test whether lookups are sent after a brick comes up again
+#
+# 1. Create a 3 brick pure distribute volume
+# 2. Fuse mount the volume so the layout is set on the root
+# 3. Kill one brick and try to create a directory which hashes to that brick.
+# It should fail with EIO.
+# 4. Restart the brick that was killed.
+# 5. Do not remount the volume. Try to create the same directory as in step 3.
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0-{1..3}
+TEST $CLI volume start $V0
+
+# We want the lookup to reach DHT
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+# Mount using FUSE and lookup the mount so a layout is set on the brick root
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+ls $M0/
+
+TEST mkdir $M0/level1
+
+# Find a dirname that will hash to the brick we are going to kill
+hashed=$V0-client-1
+TEST dht_first_filename_with_hashsubvol "$hashed" $M0 "dir-"
+roottestdir=$fn_return_val
+
+hashed=$V0-client-1
+TEST dht_first_filename_with_hashsubvol "$hashed" $M0/level1 "dir-"
+level1testdir=$fn_return_val
+
+
+TEST kill_brick $V0 $H0 $B0/$V0-2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status $V0 $H0 $B0/$V0-2
+
+TEST $CLI volume status $V0
+
+
+# Unmount and mount the volume again so dht has an incomplete in memory layout
+
+umount -f $M0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+
+mkdir $M0/$roottestdir
+TEST [ $? -ne 0 ]
+
+mkdir $M0/level1/$level1testdir
+TEST [ $? -ne 0 ]
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/$V0-2
+
+#$CLI volume status
+
+# It takes a while for the client to reconnect to the brick
+sleep 5
+
+
+mkdir $M0/$roottestdir
+TEST [ $? -eq 0 ]
+
+mkdir $M0/$level1/level1testdir
+TEST [ $? -eq 0 ]
+
+# Cleanup
+cleanup
+
+
diff --git a/tests/basic/distribute/dir-heal.t b/tests/basic/distribute/dir-heal.t
new file mode 100644
index 00000000000..851f765b245
--- /dev/null
+++ b/tests/basic/distribute/dir-heal.t
@@ -0,0 +1,145 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../common-utils.rc
+
+# Test 1 overview:
+# ----------------
+#
+# 1. Kill one brick of the volume.
+# 2. Create directories and change directory properties.
+# 3. Bring up the brick and access the directory
+# 4. Check the permissions and xattrs on the backend
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0-{1..3}
+TEST $CLI volume start $V0
+
+# We want the lookup to reach DHT
+TEST $CLI volume set $V0 performance.stat-prefetch off
+
+# Mount using FUSE , kill a brick and create directories
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+ls $M0/
+cd $M0
+
+TEST kill_brick $V0 $H0 $B0/$V0-1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status $V0 $H0 $B0/$V0-1
+
+TEST mkdir dir{1..4}
+
+# No change for dir1
+# Change permissions for dir2
+# Set xattr on dir3
+# Change permissions and set xattr on dir4
+
+TEST chmod 777 $M0/dir2
+
+TEST setfattr -n "user.test" -v "test" $M0/dir3
+
+TEST chmod 777 $M0/dir4
+TEST setfattr -n "user.test" -v "test" $M0/dir4
+
+
+# Start all bricks
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/$V0-1
+
+#$CLI volume status
+
+# It takes a while for the client to reconnect to the brick
+sleep 5
+
+stat $M0/dir* > /dev/null
+
+# Check that directories have been created on the brick that was killed
+
+TEST ls $B0/$V0-1/dir1
+
+TEST ls $B0/$V0-1/dir2
+EXPECT "777" stat -c "%a" $B0/$V0-1/dir2
+
+TEST ls $B0/$V0-1/dir3
+EXPECT "test" getfattr -n "user.test" --absolute-names --only-values $B0/$V0-1/dir3
+
+
+TEST ls $B0/$V0-1/dir4
+EXPECT "777" stat -c "%a" $B0/$V0-1/dir4
+EXPECT "test" getfattr -n "user.test" --absolute-names --only-values $B0/$V0-1/dir4
+
+
+TEST rm -rf $M0/*
+
+cd
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+
+# Test 2 overview:
+# ----------------
+# 1. Create directories with all bricks up.
+# 2. Kill a brick and change directory properties and set user xattr.
+# 2. Bring up the brick and access the directory
+# 3. Check the permissions and xattrs on the backend
+
+
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+
+ls $M0/
+cd $M0
+TEST mkdir dir{1..4}
+
+TEST kill_brick $V0 $H0 $B0/$V0-1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" brick_up_status $V0 $H0 $B0/$V0-1
+
+# No change for dir1
+# Change permissions for dir2
+# Set xattr on dir3
+# Change permissions and set xattr on dir4
+
+TEST chmod 777 $M0/dir2
+
+TEST setfattr -n "user.test" -v "test" $M0/dir3
+
+TEST chmod 777 $M0/dir4
+TEST setfattr -n "user.test" -v "test" $M0/dir4
+
+
+# Start all bricks
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/$V0-1
+
+#$CLI volume status
+
+# It takes a while for the client to reconnect to the brick
+sleep 5
+
+stat $M0/dir* > /dev/null
+
+# Check directories on the brick that was killed
+
+TEST ls $B0/$V0-1/dir2
+EXPECT "777" stat -c "%a" $B0/$V0-1/dir2
+
+TEST ls $B0/$V0-1/dir3
+EXPECT "test" getfattr -n "user.test" --absolute-names --only-values $B0/$V0-1/dir3
+
+
+TEST ls $B0/$V0-1/dir4
+EXPECT "777" stat -c "%a" $B0/$V0-1/dir4
+EXPECT "test" getfattr -n "user.test" --absolute-names --only-values $B0/$V0-1/dir4
+cd
+
+
+# Cleanup
+cleanup
+
diff --git a/tests/basic/distribute/file-rename.t b/tests/basic/distribute/file-rename.t
new file mode 100644
index 00000000000..63111b8ad8f
--- /dev/null
+++ b/tests/basic/distribute/file-rename.t
@@ -0,0 +1,1021 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+. $(dirname $0)/../../common-utils.rc
+
+# Test overview:
+# Test all combinations of src-hashed/src-cached/dst-hashed/dst-cached
+
+hashdebugxattr="dht.file.hashed-subvol."
+
+function get_brick_index {
+ local inpath=$1
+ brickroot=$(getfattr -m . -n trusted.glusterfs.pathinfo $inpath | tr ' ' '\n' | sed -n 's/<POSIX(\(.*\)):.*:.*>.*/\1/p')
+ echo ${brickroot:(-1)}
+}
+
+function get_brick_path_for_subvol {
+ local in_subvol=$1
+ local in_brickpath
+
+ in_brickpath=$(cat "$M0/.meta/graphs/active/$in_subvol/options/remote-subvolume")
+ echo $in_brickpath
+
+}
+
+#Checks that file exists only on hashed and/or cached
+function file_existence_check
+{
+ local in_file_path=$1
+ local in_hashed=$2
+ local in_cached=$3
+ local in_client_subvol
+ local in_brickpath
+ local ret
+
+ for i in {0..3}
+ do
+ in_client_subvol="$V0-client-$i"
+ in_brickpath=$(cat "$M0/.meta/graphs/active/$in_client_subvol/options/remote-subvolume")
+ stat "$in_brickpath/$in_file_path" 2>/dev/null
+ ret=$?
+ # Either the linkto or the data file must exist on the hashed
+ if [ "$in_client_subvol" == "$in_hashed" ]; then
+ if [ $ret -ne 0 ]; then
+ return 1
+ fi
+ continue
+ fi
+
+ # If the cached is non-null, we expect the file to exist on it
+ if [ "$in_client_subvol" == "$in_cached" ]; then
+ if [ $ret -ne 0 ]; then
+ return 1
+ fi
+ continue
+ fi
+
+ if [ $ret -eq 0 ]; then
+ return 2
+ fi
+ done
+ return 0
+}
+
+
+# Check if file exists on any of the bricks of the volume
+function file_does_not_exist
+{
+ local inpath=$1
+ for i in `seq 0 3`
+ do
+ file_path=$B0/$V0-$i/$inpath
+ if [ -f "$file_path" ]; then
+ echo "1"
+ return 1
+ fi
+ done
+ return 0
+}
+
+
+# Input: filename dirpath
+function get_hash_subvol
+{
+ hash_subvol=$(getfattr --only-values -n "$hashdebugxattr$1" $2 2>/dev/null)
+}
+
+
+
+# Find the first filename that hashes to a subvol
+# other than $1
+
+function first_filename_with_diff_hashsubvol
+{
+ local in_subvol=$1
+ local in_path=$2
+ local file_pattern=$3
+ local in_hash_subvol
+
+ for i in {1..100}
+ do
+ dstfilename="$file_pattern$i"
+ in_hash_subvol=$(get_hash_subvol "$dstfilename" "$in_path")
+ echo $in_hash_subvol
+ if [ "$in_subvol" != "$in_hash_subvol" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+# Find the first filename that hashes to the same subvol
+# as $1
+function first_filename_with_same_hashsubvol
+{
+ local in_subvol=$1
+ local in_path=$2
+ local in_hash_subvol
+ local file_pattern=$3
+
+ for i in {1..100}
+ do
+ dstfilename="$file_pattern$i"
+ get_hash_subvol "$dstfilename" "$in_path"
+ in_hash_subvol=$hash_subvol
+# echo $in_hash_subvol
+ if [ "$in_subvol" == "$in_hash_subvol" ]; then
+ return 0
+ fi
+ done
+ return 1
+}
+
+function file_is_linkto
+{
+ local brick_filepath=$1
+
+ test=$(stat $brick_filepath 2>&1)
+ if [ $? -ne 0 ]; then
+ echo "2"
+ return
+ fi
+
+ test=$(getfattr -n trusted.glusterfs.dht.linkto -e text $brick_filepath 2>&1)
+
+ if [ $? -eq 0 ]; then
+ echo "1"
+ else
+ echo "0"
+ fi
+}
+
+
+
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+
+# We need at least 4 bricks to test all combinations of hashed and
+# cached files
+
+TEST $CLI volume create $V0 $H0:$B0/$V0-{0..3}
+TEST $CLI volume start $V0
+
+# Mount using FUSE
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+
+################################################################
+# The first set of tests are those where the Dst file does not exist
+# dst-cached = NULL
+#
+###############################################################
+
+################### Test 1 ####################################
+#
+# src-hashed = src-cached = dst-hashed
+# dst-cached = null
+# src-file = src-1
+
+echo " **** Test 1 **** "
+
+src_file="src-1"
+
+TEST mkdir $M0/test-1
+TEST touch $M0/test-1/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-1
+src_hashed=$hash_subvol
+#echo "Hashed subvol for $src_file: " $src_hashed
+
+# Find a file name that hashes to the same subvol as $src_file
+TEST first_filename_with_same_hashsubvol "$src_hashed" "$M0/test-1" "dst-"
+#echo "dst-file name: " $dstfilename
+dst_hashed=$src_hashed
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-1/$src_file $M0/test-1/$dstfilename
+
+# Expected:
+# dst file is accessible from the mount point
+# dst file exists only on the hashed brick.
+# no linkto files on any bricks
+# src files do not exist
+
+
+TEST stat $M0/test-1/$dstfilename 2>/dev/null
+TEST file_existence_check test-1/$dstfilename $src_hashed
+TEST file_does_not_exist test-1/$src_file
+EXPECT "0" file_is_linkto $src_hash_brick/test-1/$dstfilename
+
+
+################### Test 2 ####################################
+
+# src-hashed = src-cached != dst-hashed
+# dst-cached = null
+
+echo " **** Test 2 **** "
+
+src_file="src-1"
+
+TEST mkdir $M0/test-2
+TEST touch $M0/test-2/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-2
+src_hashed=$hash_subvol
+#echo "Hashed subvol for $src_file: " $src_hashed
+
+# Find a file name that hashes to a diff hashed subvol than $src_file
+TEST first_filename_with_diff_hashsubvol "$src_hashed" "$M0/test-2" "dst-"
+echo "dst-file name: " $dstfilename
+TEST get_hash_subvol $dstfilename $M0/test-2
+dst_hashed=$hash_subvol
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+dst_hash_brick=$(get_brick_path_for_subvol $dst_hashed)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-2/$src_file $M0/test-2/$dstfilename
+
+
+# Expected:
+# dst file is accessible from the mount point
+# dst data file on src_hashed and dst linkto file on dst_hashed
+# src files do not exist
+
+
+TEST stat $M0/test-2/$dstfilename 2>/dev/null
+TEST file_existence_check test-2/$dstfilename $dst_hashed $src_hashed
+TEST file_does_not_exist test-2/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-2/$dstfilename
+EXPECT "0" file_is_linkto $src_hash_brick/test-2/$dstfilename
+
+################### Test 3 ####################################
+
+# src-hashed = dst-hashed != src-cached
+
+echo " **** Test 3 **** "
+
+src_file0="abc-1"
+
+# 1. Create src file with src_cached != src_hashed
+TEST mkdir $M0/test-3
+TEST touch $M0/test-3/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-3
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_diff_hashsubvol "$src_cached" "$M0/test-3" "src-"
+echo "dst-file name: " $dstfilename
+src_file=$dstfilename
+
+TEST mv $M0/test-3/$src_file0 $M0/test-3/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-3
+src_hashed=$hash_subvol
+
+
+# 2. Rename src to dst
+TEST first_filename_with_same_hashsubvol "$src_hashed" "$M0/test-3" "dst-"
+#echo "dst-file name: " $dstfilename
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-3/$src_file $M0/test-3/$dstfilename
+
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-3/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-3/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+TEST file_existence_check test-3/$dstfilename $src_hashed $src_cached
+
+EXPECT "1" file_is_linkto $src_hash_brick/test-3/$dstfilename
+EXPECT "0" file_is_linkto $src_cached_brick/test-3/$dstfilename
+
+
+
+################### Test 4 ####################################
+
+# src-cached = dst-hashed != src-hashed
+
+echo " **** Test 4 **** "
+
+src_file0="abc-1"
+
+# 1. Create src file with src_cached != src_hashed
+TEST mkdir $M0/test-4
+TEST touch $M0/test-4/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-4
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_diff_hashsubvol "$src_cached" "$M0/test-4" "src-"
+src_file=$dstfilename
+
+TEST mv $M0/test-4/$src_file0 $M0/test-4/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-4
+src_hashed=$hash_subvol
+
+
+# 2. Rename src to dst
+TEST first_filename_with_same_hashsubvol "$src_cached" "$M0/test-4" "dst-"
+#echo "dst-file name: " $dstfilename
+
+src_hash_brick=$(get_brick_path_for_subvol $src_hashed)
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-4/$src_file $M0/test-4/$dstfilename
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-4/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-4/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+TEST file_existence_check test-4/$dstfilename $src_cached
+
+EXPECT "0" file_is_linkto $src_cached_brick/test-4/$dstfilename
+
+
+################### Test 5 ####################################
+
+# src-cached != src-hashed
+# src-hashed != dst-hashed
+# src-cached != dst-hashed
+
+
+echo " **** Test 5 **** "
+
+# 1. Create src and dst files
+
+TEST mkdir $M0/test-5
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-5" "abc-"
+src_file0=$dstfilename
+
+TEST touch $M0/test-5/$src_file0
+
+TEST get_hash_subvol $src_file0 $M0/test-5
+src_cached=$hash_subvol
+#echo "Hashed subvol for $src_file0: " $src_cached
+
+# Find a file name that hashes to a diff hashed subvol than $src_file0
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-5" "src-"
+src_file=$dstfilename
+
+TEST mv $M0/test-5/$src_file0 $M0/test-5/$src_file
+
+TEST get_hash_subvol $src_file $M0/test-5
+src_hashed=$hash_subvol
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-5" "dst-"
+#echo "dst-file name: " $dstfilename
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+src_cached_brick=$(get_brick_path_for_subvol $src_cached)
+
+
+# 2. Rename src to dst
+echo "Renaming $src_file to $dstfilename"
+
+TEST mv $M0/test-5/$src_file $M0/test-5/$dstfilename
+
+
+# 3. Validate
+
+# Expected:
+# dst file is accessible from the mount point
+TEST stat $M0/test-5/$dstfilename 2>/dev/null
+
+# src file does not exist
+TEST file_does_not_exist test-5/$src_file
+
+# dst linkto file on src_hashed and dst data file on src_cached
+
+EXPECT "0" file_is_linkto $src_cached_brick/test-5/$dstfilename
+EXPECT "1" file_is_linkto $dst_hash_brick/test-5/$dstfilename
+
+
+########################################################################
+#
+# The Dst file exists
+#
+########################################################################
+
+################### Test 6 ####################################
+
+# src_hash = src_cached
+# dst_hash = dst_cached
+# dst_hash = src_hash
+
+
+TEST mkdir $M0/test-6
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-6" "src-"
+src_file=$dstfilename
+
+TEST touch $M0/test-6/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-6" "dst-"
+dst_file=$dstfilename
+
+TEST touch $M0/test-6/$dst_file
+
+
+# 2. Rename src to dst
+
+TEST mv $M0/test-6/$src_file $M0/test-6/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-6/$dst_file 2>/dev/null
+TEST file_existence_check test-6/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-6/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-6/$dst_file
+
+
+################### Test 7 ####################################
+
+# src_hash = src_cached
+# dst_hash = dst_cached
+# dst_hash != src_hash
+
+
+echo " **** Test 7 **** "
+
+TEST mkdir $M0/test-7
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-7" "src-"
+src_file=$dstfilename
+
+TEST touch $M0/test-7/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-7" "dst-"
+dst_file=$dstfilename
+
+TEST touch $M0/test-7/$dst_file
+
+
+# 2. Rename src to dst
+
+TEST mv $M0/test-7/$src_file $M0/test-7/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+src_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-7/$dst_file 2>/dev/null
+TEST file_existence_check test-7/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-7/$src_file
+
+EXPECT "0" file_is_linkto $src_hash_brick/test-7/$dst_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-7/$dst_file
+
+
+################### Test 8 ####################################
+
+# src_hash = src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash
+# dst_cached != src_hash
+
+echo " **** Test 8 **** "
+
+TEST mkdir $M0/test-8
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-8" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-8/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-8" "dst0-"
+dst_file0=$dstfilename
+TEST touch $M0/test-8/$dst_file0
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-8" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-8/$dst_file0 $M0/test-8/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-8/$src_file $M0/test-8/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+src_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-8/$dst_file 2>/dev/null
+TEST file_existence_check test-8/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-8/$src_file
+
+EXPECT "0" file_is_linkto $src_hash_brick/test-8/$dst_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-8/$dst_file
+
+################### Test 9 ####################################
+
+# src_hash = src_cached = dst_hash
+# dst_hash != dst_cached
+
+echo " **** Test 9 **** "
+
+TEST mkdir $M0/test-9
+
+
+# 1. Create src and dst files
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-9" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-9/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-9" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-9/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-9" "dst-"
+dst_file=$dstfilename
+
+TEST mv $M0/test-9/$dst0_file $M0/test-9/$dst_file
+
+# 2. Rename the file
+
+mv $M0/test-9/$src_file $M0/test-9/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-9/$dst_file 2>/dev/null
+TEST file_existence_check test-9/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-9/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-9/$dst_file
+
+
+################### Test 10 ####################################
+
+# src_hash = src_cached = dst_cached
+# dst_hash != dst_cached
+
+echo " **** Test 10 **** "
+
+TEST mkdir $M0/test-10
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-10" "src-"
+src_file=$dstfilename
+TEST touch $M0/test-10/$src_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-10" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-10/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-10" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-10/$dst0_file $M0/test-10/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-10/$src_file $M0/test-10/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-10/$dst_file 2>/dev/null
+TEST file_existence_check test-10/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-10/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-10/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-10/$dst_file
+
+
+################### Test 11 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached = src_cached
+
+echo " **** Test 11 **** "
+
+TEST mkdir $M0/test-11
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-11" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-11/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-11" "src-"
+src_file=$dstfilename
+
+mv $M0/test-11/$src0_file $M0/test-11/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-11" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-11/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-11/$src_file $M0/test-11/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-11/$dst_file 2>/dev/null
+TEST file_existence_check test-11/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-11/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-11/$dst_file
+
+
+################### Test 12 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached = src_hash
+
+echo " **** Test 12 **** "
+
+TEST mkdir $M0/test-12
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-12" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-12/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-12" "src-"
+src_file=$dstfilename
+
+mv $M0/test-12/$src0_file $M0/test-12/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-12" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-12/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-12/$src_file $M0/test-12/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-12/$dst_file 2>/dev/null
+TEST file_existence_check test-12/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-12/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-12/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-12/$dst_file
+
+################### Test 13 ####################################
+
+# src_hash != src_cached
+# dst_hash = dst_cached
+# dst_hash != src_cached
+# dst_hash != src_hash
+
+echo " **** Test 13 **** "
+
+TEST mkdir $M0/test-13
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-13" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-13/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-13" "src-"
+src_file=$dstfilename
+
+mv $M0/test-13/$src0_file $M0/test-13/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-13" "dst-"
+dst_file=$dstfilename
+TEST touch $M0/test-13/$dst_file
+
+# 2. Rename the file
+
+mv $M0/test-13/$src_file $M0/test-13/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-13/$dst_file 2>/dev/null
+TEST file_existence_check test-13/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-13/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-13/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-13/$dst_file
+
+
+################### Test 14 ####################################
+
+# src_hash != src_cached
+# dst_hash = src_hash
+# dst_cached = src_cached
+
+echo " **** Test 14 **** "
+
+TEST mkdir $M0/test-14
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-14" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-14/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-14" "src-"
+src_file=$dstfilename
+
+mv $M0/test-14/$src0_file $M0/test-14/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-14" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-14/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-14" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-14/$dst0_file $M0/test-14/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-14/$src_file $M0/test-14/$dst_file
+
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-1")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-14/$dst_file 2>/dev/null
+TEST file_existence_check test-14/$dst_file "$V0-client-1" "$V0-client-0"
+TEST file_does_not_exist test-14/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-14/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-14/$dst_file
+
+################### Test 15 ####################################
+
+# src_hash != src_cached
+# dst_hash != src_hash
+# dst_hash != src_cached
+# dst_cached = src_cached
+
+echo " **** Test 15 **** "
+
+TEST mkdir $M0/test-15
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-15" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-15/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-15" "src-"
+src_file=$dstfilename
+
+mv $M0/test-15/$src0_file $M0/test-15/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-15" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-15/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-15" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-15/$dst0_file $M0/test-15/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-15/$src_file $M0/test-15/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-15/$dst_file 2>/dev/null
+TEST file_existence_check test-15/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-15/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-15/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-15/$dst_file
+
+
+
+################### Test 16 ####################################
+
+# src_hash != src_cached
+# dst_hash = src_cached
+# dst_cached = src_hash
+
+echo " **** Test 16 **** "
+
+TEST mkdir $M0/test-16
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-16" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-16/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-16" "src-"
+src_file=$dstfilename
+
+mv $M0/test-16/$src0_file $M0/test-16/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-16" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-16/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-16" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-16/$dst0_file $M0/test-16/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-16/$src_file $M0/test-16/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-16/$dst_file 2>/dev/null
+TEST file_existence_check test-16/$dst_file "$V0-client-0"
+TEST file_does_not_exist test-16/$src_file
+EXPECT "0" file_is_linkto $dst_hash_brick/test-16/$dst_file
+
+
+################### Test 17 ####################################
+
+# src_hash != src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash != src_cached
+# dst_cached = src_hash
+
+
+echo " **** Test 17 **** "
+
+TEST mkdir $M0/test-17
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-17" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-17/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-17" "src-"
+src_file=$dstfilename
+
+mv $M0/test-17/$src0_file $M0/test-17/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-17" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-17/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-17" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-17/$dst0_file $M0/test-17/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-17/$src_file $M0/test-17/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-2")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-17/$dst_file 2>/dev/null
+TEST file_existence_check test-17/$dst_file "$V0-client-2" "$V0-client-0"
+TEST file_does_not_exist test-17/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-17/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-17/$dst_file
+
+
+################### Test 18 ####################################
+
+# src_hash != src_cached
+# dst_hash != dst_cached
+# dst_hash != src_hash != src_cached != dst_cached
+
+
+echo " **** Test 18 **** "
+
+TEST mkdir $M0/test-18
+
+
+# 1. Create src and dst files
+
+TEST first_filename_with_same_hashsubvol "$V0-client-0" "$M0/test-18" "src0-"
+src0_file=$dstfilename
+TEST touch $M0/test-18/$src0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-1" "$M0/test-18" "src-"
+src_file=$dstfilename
+
+mv $M0/test-18/$src0_file $M0/test-18/$src_file
+
+
+TEST first_filename_with_same_hashsubvol "$V0-client-2" "$M0/test-18" "dst0-"
+dst0_file=$dstfilename
+TEST touch $M0/test-18/$dst0_file
+
+TEST first_filename_with_same_hashsubvol "$V0-client-3" "$M0/test-18" "dst-"
+dst_file=$dstfilename
+
+mv $M0/test-18/$dst0_file $M0/test-18/$dst_file
+
+
+# 2. Rename the file
+
+mv $M0/test-18/$src_file $M0/test-18/$dst_file
+
+# 3. Validate
+
+dst_hash_brick=$(get_brick_path_for_subvol "$V0-client-3")
+dst_cached_brick=$(get_brick_path_for_subvol "$V0-client-0")
+
+TEST stat $M0/test-18/$dst_file 2>/dev/null
+TEST file_existence_check test-18/$dst_file "$V0-client-3" "$V0-client-0"
+TEST file_does_not_exist test-18/$src_file
+EXPECT "1" file_is_linkto $dst_hash_brick/test-18/$dst_file
+EXPECT "0" file_is_linkto $dst_cached_brick/test-18/$dst_file
+
+
+# Cleanup
+cleanup
+
diff --git a/tests/basic/distribute/spare_file_rebalance.t b/tests/basic/distribute/spare_file_rebalance.t
new file mode 100644
index 00000000000..061c02f7392
--- /dev/null
+++ b/tests/basic/distribute/spare_file_rebalance.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#------------------------------------------------------------
+
+# Test case - Create sparse files on MP and verify
+# file info after rebalance
+#------------------------------------------------------------
+
+# Create some sparse files and get their size
+TEST cd $M0;
+dd if=/dev/urandom of=sparse_file bs=10k count=1 seek=2M
+cp --sparse=always sparse_file sparse_file_3;
+
+# Add a 3rd brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3;
+
+# Trigger rebalance
+TEST $CLI volume rebalance $V0 start force;
+EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed;
+
+# Compare original and rebalanced files
+TEST cd $B0/${V0}2
+TEST cmp sparse_file $B0/${V0}3/sparse_file_3
+EXPECT_WITHIN 30 "";
+
+cleanup;
diff --git a/tests/basic/ec/ec-1468261.t b/tests/basic/ec/ec-1468261.t
index 902fbb7d2f1..77d704cf880 100644
--- a/tests/basic/ec/ec-1468261.t
+++ b/tests/basic/ec/ec-1468261.t
@@ -34,11 +34,12 @@ EXPECT_WITHIN $IO_WAIT_TIMEOUT "^$" get_hex_xattr trusted.ec.dirty $B0/${V0}3/te
EXPECT_WITHIN $IO_WAIT_TIMEOUT "^$" get_hex_xattr trusted.ec.dirty $B0/${V0}4/test_dir
EXPECT_WITHIN $IO_WAIT_TIMEOUT "^$" get_hex_xattr trusted.ec.dirty $B0/${V0}5/test_dir
-#Touch a file and kill two bricks
-TEST touch $M0/test_dir/new_file
+#Kill two bricks and touch a file
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST kill_brick $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
+TEST touch $M0/test_dir/new_file
+sleep 2
#Dirty should be set on up bricks
EXPECT_WITHIN $IO_WAIT_TIMEOUT "^00000000000000010000000000000001$" get_hex_xattr trusted.ec.dirty $B0/${V0}2/test_dir
@@ -58,15 +59,13 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
#Create a tar file
-TEST mkdir $M0/test_dir
-for i in {1..3000};do
-dd if=/dev/urandom of=$M0/test_dir/file-$i bs=1k count=10;
-done
-tar -cf $M0/test_dir.tar $M0/test_dir/ 2>/dev/null
-rm -rf $M0/test_dir/
+TEST mkdir /tmp/test_dir
+seq 1 3000 | xargs -n 1 -P 20 -I {} dd if=/dev/urandom of=/tmp/test_dir/file-{} bs=10K count=1
+tar -cf /tmp/test_dir.tar /tmp/test_dir/ 2>/dev/null
+rm -rf /tmp/test_dir/
#Untar the tar file
-tar -C $M0 -xf $M0/test_dir.tar 2>/dev/null&
+tar -C $M0 -xf /tmp/test_dir.tar 2>/dev/null&
#Kill 1st and 2nd brick
TEST kill_brick $V0 $H0 $B0/${V0}0
@@ -75,6 +74,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "4" ec_child_up_count $V0 0
#Stop untaring
TEST kill %1
+rm -f /tmp/test_dir.tar
#Bring up the down bricks
TEST $CLI volume start $V0 force
diff --git a/tests/basic/ec/ec-badfd.c b/tests/basic/ec/ec-badfd.c
new file mode 100644
index 00000000000..8be23c10eaf
--- /dev/null
+++ b/tests/basic/ec/ec-badfd.c
@@ -0,0 +1,124 @@
+#include <stdio.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+int
+fill_iov(struct iovec *iov, char fillchar, int count)
+{
+ int ret = -1;
+
+ iov->iov_base = malloc(count + 1);
+ if (iov->iov_base == NULL) {
+ return ret;
+ } else {
+ iov->iov_len = count;
+ ret = 0;
+ }
+ memset(iov->iov_base, fillchar, count);
+ memset(iov->iov_base + count, '\0', 1);
+
+ return ret;
+}
+
+int
+write_sync(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ fprintf(stderr, "failed to create iov");
+ goto out;
+ }
+
+ ret = glfs_pwritev(glfd, &iov, 1, 0, flags);
+out:
+ if (ret < 0) {
+ fprintf(stderr, "glfs_pwritev failed, %d", errno);
+ }
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+ int ret = 1;
+ char volume_cmd[4096] = {0};
+
+ if (argc != 4) {
+ fprintf(stderr, "Syntax: %s <host> <volname> <file>\n", argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_set_logging(fs, "/tmp/ec-badfd.log", 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ fd = glfs_open(fs, argv[3], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ ret = write_sync(fs, fd, 16);
+ if (ret < 0) {
+ fprintf(stderr, "write_sync failed\n");
+ }
+
+ snprintf(volume_cmd, sizeof(volume_cmd),
+ "gluster --mode=script volume stop %s", argv[2]);
+ /*Stop the volume so that update-size-version fails*/
+ system(volume_cmd);
+ sleep(8); /* 3 seconds more than eager-lock-timeout*/
+ snprintf(volume_cmd, sizeof(volume_cmd),
+ "gluster --mode=script volume start %s", argv[2]);
+ system(volume_cmd);
+ sleep(8); /*wait for bricks to come up*/
+ ret = glfs_fsync(fd, NULL, NULL);
+ if (ret == 0) {
+ fprintf(stderr, "fsync succeeded on a BADFD\n");
+ exit(1);
+ }
+
+ ret = glfs_close(fd);
+ if (ret == 0) {
+ fprintf(stderr, "flush succeeded on a BADFD\n");
+ exit(1);
+ }
+ ret = 0;
+
+out:
+ unlink("/tmp/ec-badfd.log");
+ glfs_fini(fs);
+
+ return ret;
+}
diff --git a/tests/basic/ec/ec-badfd.t b/tests/basic/ec/ec-badfd.t
new file mode 100755
index 00000000000..56feb47f115
--- /dev/null
+++ b/tests/basic/ec/ec-badfd.t
@@ -0,0 +1,26 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{1..6}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 5
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+TEST touch $M0/file
+
+TEST build_tester $(dirname $0)/ec-badfd.c -lgfapi -Wall -O2
+TEST $(dirname $0)/ec-badfd $H0 $V0 /file
+cleanup_tester $(dirname ${0})/ec-badfd
+
+cleanup;
diff --git a/tests/basic/ec/ec-cpu-extensions.t b/tests/basic/ec/ec-cpu-extensions.t
index a599a316925..c9af27ea234 100644
--- a/tests/basic/ec/ec-cpu-extensions.t
+++ b/tests/basic/ec/ec-cpu-extensions.t
@@ -1,6 +1,6 @@
#!/bin/bash
-DISPERSE=6
+DISPERSE=18
REDUNDANCY=2
. $(dirname $0)/../../include.rc
@@ -39,6 +39,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 redundancy $REDUNDANCY $H0:$B0/${V0}{1..$DISPERSE}
TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume set $V0 disperse.read-policy round-robin
EXPECT 'Created' volinfo_field $V0 'Status'
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status'
diff --git a/tests/basic/ec/ec-dirty-flags.t b/tests/basic/ec/ec-dirty-flags.t
new file mode 100644
index 00000000000..68e66103f08
--- /dev/null
+++ b/tests/basic/ec/ec-dirty-flags.t
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This checks if the fop keeps the dirty flags settings correctly after
+# finishing the fop.
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0..2}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+cd $M0
+for i in {1..1000}; do dd if=/dev/zero of=file-${i} bs=512k count=2; done
+cd -
+EXPECT "^0$" get_pending_heal_count $V0
+
+cleanup
diff --git a/tests/basic/ec/ec-fix-openfd.t b/tests/basic/ec/ec-fix-openfd.t
index c32f9332137..04fdd802c62 100644
--- a/tests/basic/ec/ec-fix-openfd.t
+++ b/tests/basic/ec/ec-fix-openfd.t
@@ -37,6 +37,8 @@ TEST fd_open $fd 'rw' "$M0/test_file"
TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+sleep 1
+
#Test the fd count
EXPECT "0" get_fd_count $V0 $H0 $B0/${V0}0 test_file
EXPECT "1" get_fd_count $V0 $H0 $B0/${V0}1 test_file
diff --git a/tests/basic/ec/ec-quorum-count.t b/tests/basic/ec/ec-quorum-count.t
new file mode 100644
index 00000000000..9310ebbb8f2
--- /dev/null
+++ b/tests/basic/ec/ec-quorum-count.t
@@ -0,0 +1,167 @@
+ #!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../ec.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0..5}
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 5
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume set $V0 disperse.background-heals 0
+TEST $CLI volume set $V0 disperse.heal-wait-qlength 0
+
+#Should fail on non-disperse volume
+TEST ! $CLI volume set $V1 disperse.quorum-count 5
+
+#Should succeed on a valid range
+TEST ! $CLI volume set $V0 disperse.quorum-count 0
+TEST ! $CLI volume set $V0 disperse.quorum-count -0
+TEST ! $CLI volume set $V0 disperse.quorum-count abc
+TEST ! $CLI volume set $V0 disperse.quorum-count 10abc
+TEST ! $CLI volume set $V0 disperse.quorum-count 1
+TEST ! $CLI volume set $V0 disperse.quorum-count 2
+TEST ! $CLI volume set $V0 disperse.quorum-count 3
+TEST $CLI volume set $V0 disperse.quorum-count 4
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+#Test that the option is reflected in the mount
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^4$" ec_option_value $V0 $M0 0 quorum-count
+TEST $CLI volume reset $V0 disperse.quorum-count
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" ec_option_value $V0 $M0 0 quorum-count
+TEST $CLI volume set $V0 disperse.quorum-count 6
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^6$" ec_option_value $V0 $M0 0 quorum-count
+
+TEST touch $M0/a
+TEST touch $M0/data
+TEST setfattr -n trusted.def -v def $M0/a
+TEST touch $M0/src
+TEST touch $M0/del-me
+TEST mkdir $M0/dir1
+TEST dd if=/dev/zero of=$M0/read-file bs=1M count=1 oflag=direct
+TEST dd if=/dev/zero of=$M0/del-file bs=1M count=1 oflag=direct
+TEST gf_rm_file_and_gfid_link $B0/${V0}0 del-file
+#modify operations should fail as the file is not in quorum
+TEST ! dd if=/dev/zero of=$M0/del-file bs=1M count=1 oflag=direct
+TEST kill_brick $V0 $H0 $B0/${V0}0
+#Read should succeed even when quorum-count is not met
+TEST dd if=$M0/read-file of=/dev/null iflag=direct
+TEST ! touch $M0/a2
+TEST ! mkdir $M0/dir2
+TEST ! mknod $M0/b2 b 4 5
+TEST ! ln -s $M0/a $M0/symlink
+TEST ! ln $M0/a $M0/link
+TEST ! mv $M0/src $M0/dst
+TEST ! rm -f $M0/del-me
+TEST ! rmdir $M0/dir1
+TEST ! dd if=/dev/zero of=$M0/a bs=1M count=1 conv=notrunc
+TEST ! dd if=/dev/zero of=$M0/data bs=1M count=1 conv=notrunc
+TEST ! truncate -s 0 $M0/a
+TEST ! setfattr -n trusted.abc -v abc $M0/a
+TEST ! setfattr -x trusted.def $M0/a
+TEST ! chmod +x $M0/a
+TEST ! fallocate -l 2m -n $M0/a
+TEST ! fallocate -p -l 512k $M0/a
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+# reset the option and check whether the default redundancy count is
+# accepted or not.
+TEST $CLI volume reset $V0 disperse.quorum-count
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^0$" ec_option_value $V0 $M0 0 quorum-count
+TEST touch $M0/a1
+TEST touch $M0/data1
+TEST setfattr -n trusted.def -v def $M0/a1
+TEST touch $M0/src1
+TEST touch $M0/del-me1
+TEST mkdir $M0/dir11
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/a21
+TEST mkdir $M0/dir21
+TEST mknod $M0/b21 b 4 5
+TEST ln -s $M0/a1 $M0/symlink1
+TEST ln $M0/a1 $M0/link1
+TEST mv $M0/src1 $M0/dst1
+TEST rm -f $M0/del-me1
+TEST rmdir $M0/dir11
+TEST dd if=/dev/zero of=$M0/a1 bs=1M count=1 conv=notrunc
+TEST dd if=/dev/zero of=$M0/data1 bs=1M count=1 conv=notrunc
+TEST truncate -s 0 $M0/a1
+TEST setfattr -n trusted.abc -v abc $M0/a1
+TEST setfattr -x trusted.def $M0/a1
+TEST chmod +x $M0/a1
+TEST fallocate -l 2m -n $M0/a1
+TEST fallocate -p -l 512k $M0/a1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST touch $M0/a2
+TEST touch $M0/data2
+TEST setfattr -n trusted.def -v def $M0/a1
+TEST touch $M0/src2
+TEST touch $M0/del-me2
+TEST mkdir $M0/dir12
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST ! touch $M0/a22
+TEST ! mkdir $M0/dir22
+TEST ! mknod $M0/b22 b 4 5
+TEST ! ln -s $M0/a2 $M0/symlink2
+TEST ! ln $M0/a2 $M0/link2
+TEST ! mv $M0/src2 $M0/dst2
+TEST ! rm -f $M0/del-me2
+TEST ! rmdir $M0/dir12
+TEST ! dd if=/dev/zero of=$M0/a2 bs=1M count=1 conv=notrunc
+TEST ! dd if=/dev/zero of=$M0/data2 bs=1M count=1 conv=notrunc
+TEST ! truncate -s 0 $M0/a2
+TEST ! setfattr -n trusted.abc -v abc $M0/a2
+TEST ! setfattr -x trusted.def $M0/a2
+TEST ! chmod +x $M0/a2
+TEST ! fallocate -l 2m -n $M0/a2
+TEST ! fallocate -p -l 512k $M0/a2
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+# Set quorum-count to 5 and kill 1 brick and the fops should pass
+TEST $CLI volume set $V0 disperse.quorum-count 5
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "^5$" ec_option_value $V0 $M0 0 quorum-count
+TEST touch $M0/a3
+TEST touch $M0/data3
+TEST setfattr -n trusted.def -v def $M0/a3
+TEST touch $M0/src3
+TEST touch $M0/del-me3
+TEST mkdir $M0/dir13
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST touch $M0/a31
+TEST mkdir $M0/dir31
+TEST mknod $M0/b31 b 4 5
+TEST ln -s $M0/a3 $M0/symlink3
+TEST ln $M0/a3 $M0/link3
+TEST mv $M0/src3 $M0/dst3
+TEST rm -f $M0/del-me3
+TEST rmdir $M0/dir13
+TEST dd if=/dev/zero of=$M0/a3 bs=1M count=1 conv=notrunc
+TEST dd if=/dev/zero of=$M0/data3 bs=1M count=1 conv=notrunc
+TEST truncate -s 0 $M0/a3
+TEST setfattr -n trusted.abc -v abc $M0/a3
+TEST setfattr -x trusted.def $M0/a3
+TEST chmod +x $M0/a3
+TEST fallocate -l 2m -n $M0/a3
+TEST fallocate -p -l 512k $M0/a3
+TEST dd if=/dev/urandom of=$M0/heal-file bs=1M count=1 oflag=direct
+cksum_before_heal="$(md5sum $M0/heal-file | awk '{print $1}')"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick $V0 $H0 $B0/${V0}5
+cksum_after_heal=$(dd if=$M0/heal-file iflag=direct | md5sum | awk '{print $1}')
+TEST [[ $cksum_before_heal == $cksum_after_heal ]]
+cleanup;
diff --git a/tests/basic/ec/ec-read-mask.t b/tests/basic/ec/ec-read-mask.t
new file mode 100644
index 00000000000..ddb556f2973
--- /dev/null
+++ b/tests/basic/ec/ec-read-mask.t
@@ -0,0 +1,114 @@
+ #!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../ec.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+
+#Empty read-mask should fail
+TEST ! $GFS --xlator-option=*.ec-read-mask="" -s $H0 --volfile-id $V0 $M0
+
+#Less than 4 number of bricks should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option=*.ec-read-mask="0:1:2" -s $H0 --volfile-id $V0 $M0
+
+#ids greater than 5 should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:6" -s $H0 --volfile-id $V0 $M0
+
+#ids less than 0 should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:-1:2:5" -s $H0 --volfile-id $V0 $M0
+
+#read-mask with non-alphabet or comma should fail
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:5:abc" -s $H0 --volfile-id $V0 $M0
+TEST ! $GFS --xlator-option="*.ec-read-mask=0:1:2:5a" -s $H0 --volfile-id $V0 $M0
+
+#mount with at least 4 read-mask-ids and all of them valid should pass
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5:4:3" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^111111$" ec_option_value $V0 $M0 0 read-mask
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+
+TEST dd if=/dev/urandom of=$M0/a bs=1M count=1
+md5=$(md5sum $M0/a | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Read on the file should fail if any of the read-mask is down when number of
+#ids is data-count
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}5
+TEST ! dd if=$M0/a of=/dev/null
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+#Read on file should succeed when non-read-mask bricks are down
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}3
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}4
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+TEST kill_brick $V0 $H0 $B0/${V0}3
+TEST kill_brick $V0 $H0 $B0/${V0}4
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume start $V0 force
+
+#Deliberately corrupt chunks 3: 4 and check that reads still give correct data
+TEST dd if=/dev/zero of=$B0/${V0}3/a bs=256k count=1
+TEST dd if=/dev/zero of=$B0/${V0}4/a bs=256k count=1
+TEST $GFS --xlator-option="*.ec-read-mask=0:1:2:5" -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+EXPECT "^100111$" ec_option_value $V0 $M0 0 read-mask
+EXPECT "^$md5$" echo $(dd if=$M0/a | md5sum | awk '{print $1}')
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup;
diff --git a/tests/basic/ec/ec-reset-brick.t b/tests/basic/ec/ec-reset-brick.t
new file mode 100644
index 00000000000..f1a625df4ff
--- /dev/null
+++ b/tests/basic/ec/ec-reset-brick.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+function num_entries {
+ ls -l $1 | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+mkdir $M0/dir
+touch $M0/dir/{1..10}
+
+mkdir $M0/dir/dir1
+touch $M0/dir/dir1/{1..10}
+
+#kill brick process
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}5 start
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "5" ec_child_up_count $V0 0
+
+#reset-brick by removing all the data and create dir again
+rm -rf $B0/${V0}5
+mkdir $B0/${V0}5
+
+#start brick process and heal by commiting reset-brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}5 $H0:$B0/${V0}5 commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count_shd $V0 0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+
+EXPECT "^12$" num_entries $B0/${V0}5/dir
+EXPECT "^11$" num_entries $B0/${V0}5/dir/dir1
+
+ec_version=$(get_hex_xattr trusted.ec.version $B0/${V0}0)
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}1
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}2
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}3
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}4
+EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}5
+
+cleanup;
diff --git a/tests/basic/ec/ec-root-heal.t b/tests/basic/ec/ec-root-heal.t
index a133885ef1d..11ea7cdf9d4 100644
--- a/tests/basic/ec/ec-root-heal.t
+++ b/tests/basic/ec/ec-root-heal.t
@@ -22,7 +22,8 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count_shd $V0 0
# active heal
TEST $CLI volume heal $V0 full
#ls -l gives "Total" line so number of lines will be 1 more
-EXPECT_WITHIN $HEAL_TIMEOUT "^11$" num_entries $B0/${V0}6
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}
+EXPECT "^11$" num_entries $B0/${V0}6
ec_version=$(get_hex_xattr trusted.ec.version $B0/${V0}0)
EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}1
EXPECT "$ec_version" get_hex_xattr trusted.ec.version $B0/${V0}2
diff --git a/tests/basic/ec/ec-seek.t b/tests/basic/ec/ec-seek.t
index 6a0060870c8..5a7d31b9f8f 100644
--- a/tests/basic/ec/ec-seek.t
+++ b/tests/basic/ec/ec-seek.t
@@ -6,7 +6,7 @@
cleanup
SEEK=$(dirname $0)/seek
-build_tester $(dirname $0)/seek.c -o ${SEEK}
+build_tester $(dirname $0)/../seek.c -o ${SEEK}
TEST glusterd
TEST pidof glusterd
@@ -51,6 +51,7 @@ EXPECT "^$((${BSIZE} * 5 + 512))$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5
EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 5 + 512))
EXPECT "^ENXIO$" ${SEEK} scan ${M0}/test hole $((${BSIZE} * 6))
+rm -f ${SEEK}
cleanup
# Centos6 regression slaves seem to not support SEEK_DATA/SEEK_HOLE
diff --git a/tests/basic/ec/ec-stripe.t b/tests/basic/ec/ec-stripe.t
index 1e940eba81b..98b92294feb 100644
--- a/tests/basic/ec/ec-stripe.t
+++ b/tests/basic/ec/ec-stripe.t
@@ -202,7 +202,7 @@ TEST truncate -s 0 $B0/test_file
TEST truncate -s 0 $M0/test_file
TEST dd if=$B0/misc_file of=$B0/test_file bs=1022 count=5 oflag=seek_bytes,sync seek=400 conv=notrunc
TEST dd if=$B0/misc_file of=$M0/test_file bs=1022 count=5 oflag=seek_bytes,sync seek=400 conv=notrunc
-check_statedump_md5sum 4 5
+check_statedump_md5sum 4 4
clean_file_unmount
### 14 - Truncate to invalidate all but one the stripe in cache ####
diff --git a/tests/basic/ec/gfapi-ec-open-truncate.c b/tests/basic/ec/gfapi-ec-open-truncate.c
new file mode 100644
index 00000000000..fb16807003a
--- /dev/null
+++ b/tests/basic/ec/gfapi-ec-open-truncate.c
@@ -0,0 +1,171 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+int
+fill_iov(struct iovec *iov, char fillchar, int count)
+{
+ int ret = -1;
+
+ iov->iov_base = calloc(count + 1, sizeof(fillchar));
+ if (iov->iov_base == NULL) {
+ return ret;
+ } else {
+ iov->iov_len = count;
+ ret = 0;
+ }
+ memset(iov->iov_base, fillchar, count);
+ memset(iov->iov_base + count, '\0', 1);
+
+ return ret;
+}
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile_server failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+int
+main(int argc, char *argv[])
+{
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ int ret = 0;
+ int i = 0;
+ int count = 200;
+ struct iovec iov = {0};
+ int flags = O_RDWR;
+ int bricksup = 0;
+ int fdopen = 0;
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = init_glfs(hostname, volname, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ /* Brick is down and we are opening a file to trigger fd heal. */
+ /* Bypass Write-behind */
+ glfd = glfs_open(fs, "a", O_WRONLY | O_TRUNC | O_SYNC);
+ if (glfd == NULL) {
+ LOG_ERR("glfs_open_truncate failed");
+ exit(1);
+ }
+ system("gluster --mode=script volume start patchy force");
+ /*CHILD_UP_TIMEOUT is 20 seconds*/
+ for (i = 0; i < 20; i++) {
+ ret = system(
+ "[ $(gluster --mode=script volume status patchy | "
+ "grep \" Y \" | awk '{print $(NF-1)}' | wc -l) == 3 ]");
+ if (WIFEXITED(ret) && WEXITSTATUS(ret)) {
+ printf("Ret value of system: %d\n, ifexited: %d, exitstatus: %d",
+ ret, WIFEXITED(ret), WEXITSTATUS(ret));
+ sleep(1);
+ continue;
+ }
+ printf("Number of loops: %d\n", i);
+ bricksup = 1;
+ break;
+ }
+ if (!bricksup) {
+ system("gluster --mode=script volume status patchy");
+ LOG_ERR("Bricks didn't come up\n");
+ exit(1);
+ }
+
+ /*Not sure how to check that the child-up reached EC, so sleep 3 for now*/
+ sleep(3);
+ ret = fill_iov(&iov, 'a', 200);
+ if (ret) {
+ LOG_ERR("failed to create iov");
+ exit(1);
+ }
+
+ /*write will trigger re-open*/
+ ret = glfs_pwritev(glfd, &iov, 1, 0, flags);
+ if (ret < 0) {
+ LOG_ERR("glfs_test_function failed");
+ exit(1);
+ }
+ /*Check reopen happened by checking for open-fds on the brick*/
+ for (i = 0; i < 20; i++) {
+ ret = system(
+ "[ $(for i in $(pgrep glusterfsd); do ls -l /proc/$i/fd | grep "
+ "\"[.]glusterfs\" | grep -v \".glusterfs/[0-9a-f][0-9a-f]\" | grep "
+ "-v health_check; done | wc -l) == 3 ]");
+ if (WIFEXITED(ret) && WEXITSTATUS(ret)) {
+ printf("Ret value of system: %d\n, ifexited: %d, exitstatus: %d",
+ ret, WIFEXITED(ret), WEXITSTATUS(ret));
+ sleep(1);
+ continue;
+ }
+ fdopen = 1;
+ break;
+ }
+
+ if (!fdopen) {
+ LOG_ERR("fd reopen didn't succeed");
+ exit(1);
+ }
+
+ return 0;
+}
diff --git a/tests/basic/ec/gfapi-ec-open-truncate.t b/tests/basic/ec/gfapi-ec-open-truncate.t
new file mode 100644
index 00000000000..e22562c6ea3
--- /dev/null
+++ b/tests/basic/ec/gfapi-ec-open-truncate.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This .t tests the functionality of open-fd-heal when opened with O_TRUNC.
+#If re-open is not done with O_TRUNC then the test will pass.
+
+cleanup
+
+TEST glusterd
+
+TEST $CLI volume create $V0 disperse 3 ${H0}:$B0/brick{1,2,3}
+EXPECT 'Created' volinfo_field $V0 'Status'
+#Disable heals to prevent any chance of heals masking the problem
+TEST $CLI volume set $V0 disperse.background-heals 0
+TEST $CLI volume set $V0 disperse.heal-wait-qlength 0
+TEST $CLI volume set $V0 performance.write-behind off
+
+#We need truncate fop to go through before pre-op completes for the write-fop
+#which triggers open-fd heal. Otherwise truncate won't be allowed on 'bad' brick
+TEST $CLI volume set $V0 delay-gen posix
+TEST $CLI volume set $V0 delay-gen.enable fxattrop
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.delay-duration 1000000
+
+TEST $CLI volume heal $V0 disable
+
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+TEST touch $M0/a
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST kill_brick $V0 $H0 $B0/brick1
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/gfapi-ec-open-truncate.c -lgfapi
+
+TEST $CLI volume profile $V0 info clear
+TEST ./$(dirname $0)/gfapi-ec-open-truncate ${H0} $V0 $logdir/gfapi-ec-open-truncate.log
+
+EXPECT "^2$" echo $($CLI volume profile $V0 info incremental | grep -i truncate | wc -l)
+cleanup_tester $(dirname $0)/gfapi-ec-open-truncate
+
+cleanup
diff --git a/tests/basic/ec/self-heal-read-write-fail.t b/tests/basic/ec/self-heal-read-write-fail.t
new file mode 100644
index 00000000000..0ba591b5bb2
--- /dev/null
+++ b/tests/basic/ec/self-heal-read-write-fail.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#This test verifies that self-heal fails when read/write fails as part of heal
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+TEST touch $M0/a
+TEST kill_brick $V0 $H0 $B0/${V0}0
+echo abc >> $M0/a
+
+# Umount the volume to force all pending writes to reach the bricks
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Load error-gen and fail read fop and test that heal fails
+TEST $CLI volume stop $V0 #Stop volume so that error-gen can be loaded
+TEST $CLI volume set $V0 debug.error-gen posix
+TEST $CLI volume set $V0 debug.error-fops read
+TEST $CLI volume set $V0 debug.error-number EBADF
+TEST $CLI volume set $V0 debug.error-failure 100
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+TEST ! getfattr -n trusted.ec.heal $M0/a
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+#fail write fop and test that heal fails
+TEST $CLI volume stop $V0
+TEST $CLI volume set $V0 debug.error-fops write
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+TEST ! getfattr -n trusted.ec.heal $M0/a
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume stop $V0 #Stop volume so that error-gen can be disabled
+TEST $CLI volume reset $V0 debug.error-gen
+TEST $CLI volume reset $V0 debug.error-fops
+TEST $CLI volume reset $V0 debug.error-number
+TEST $CLI volume reset $V0 debug.error-failure
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" ec_child_up_count $V0 0
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+TEST getfattr -n trusted.ec.heal $M0/a
+EXPECT "^0$" get_pending_heal_count $V0
+
+#Test that heal worked as expected by forcing read from brick0
+#remount to make sure data is not served from any cache
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT "abc" cat $M0/a
+
+cleanup
diff --git a/tests/basic/ec/self-heal.t b/tests/basic/ec/self-heal.t
index d217559db1a..6329bb60248 100644
--- a/tests/basic/ec/self-heal.t
+++ b/tests/basic/ec/self-heal.t
@@ -131,6 +131,8 @@ TEST $CLI volume create $V0 redundancy 2 $H0:$B0/${V0}{0..5}
TEST $CLI volume set $V0 client-log-level DEBUG
#Write-behind has a bug where lookup can race over write which leads to size mismatch on the mount after a 'cp'
TEST $CLI volume set $V0 performance.write-behind off
+#md-cache can return stale stat due to default timeout being 1 sec
+TEST $CLI volume set $V0 performance.stat-prefetch off
EXPECT "Created" volinfo_field $V0 'Status'
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Started" volinfo_field $V0 'Status'
diff --git a/tests/basic/fencing/afr-lock-heal-advanced.c b/tests/basic/fencing/afr-lock-heal-advanced.c
new file mode 100644
index 00000000000..e202ccd5b29
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-advanced.c
@@ -0,0 +1,227 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define GF_ENFORCE_MANDATORY_LOCK "trusted.glusterfs.enforce-mandatory-lock"
+
+FILE *logfile_fp;
+
+#define LOG_ERR(func, err) \
+ do { \
+ if (!logfile_fp) { \
+ fprintf(stderr, "%\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(stderr); \
+ } else { \
+ fprintf(logfile_fp, "\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(logfile_fp); \
+ } \
+ } while (0)
+
+glfs_t *
+setup_client(char *hostname, char *volname, char *log_file)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(logfile_fp, "\nglfs_new: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_volfile_server failed ret:%d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_logging failed with ret: %d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+glfs_fd_t *
+open_file(glfs_t *fs, char *fname)
+{
+ glfs_fd_t *fd = NULL;
+
+ fd = glfs_creat(fs, fname, O_CREAT, 0644);
+ if (!fd) {
+ LOG_ERR("glfs_creat", errno);
+ goto out;
+ }
+out:
+ return fd;
+}
+
+int
+acquire_mandatory_lock(glfs_t *fs, glfs_fd_t *fd)
+{
+ struct flock lock;
+ int ret = 0;
+
+ /* initialize lock */
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 100;
+
+ ret = glfs_fsetxattr(fd, GF_ENFORCE_MANDATORY_LOCK, "set", 8, 0);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsetxattr", errno);
+ ret = -1;
+ goto out;
+ }
+
+ /* take a write mandatory lock */
+ ret = glfs_file_lock(fd, F_SETLKW, &lock, GLFS_LK_MANDATORY);
+ if (ret) {
+ LOG_ERR("glfs_file_lock", errno);
+ ret = -1;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+int
+perform_test(glfs_t *fs, char *file1, char *file2)
+{
+ int ret = 0;
+ glfs_fd_t *fd1 = NULL;
+ glfs_fd_t *fd2 = NULL;
+ char *buf = "0123456789";
+
+ fd1 = open_file(fs, file1);
+ if (!fd1) {
+ ret = -1;
+ goto out;
+ }
+ fd2 = open_file(fs, file2);
+ if (!fd2) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Kill one brick from the .t.*/
+ pause();
+
+ ret = acquire_mandatory_lock(fs, fd1);
+ if (ret) {
+ goto out;
+ }
+ ret = acquire_mandatory_lock(fs, fd2);
+ if (ret) {
+ goto out;
+ }
+
+ /* Bring the brick up and let the locks heal. */
+ pause();
+ /*At this point, the .t would have killed and brought back 2 bricks, marking
+ * the fd bad.*/
+
+ ret = glfs_write(fd1, buf, 10, 0);
+ if (ret > 0) {
+ /* Write is supposed to fail with EBADFD*/
+ LOG_ERR("glfs_write", ret);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (fd1)
+ glfs_close(fd1);
+ if (fd2)
+ glfs_close(fd2);
+ return ret;
+}
+
+static void
+sigusr1_handler(int signo)
+{
+ /*Signal caught. Just continue with the execution.*/
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char log_file[100];
+ char *hostname = NULL;
+ char *fname1 = NULL;
+ char *fname2 = NULL;
+
+ if (argc != 7) {
+ fprintf(stderr,
+ "Expect following args %s <host> <volname> <file1> <file2> "
+ "<log file "
+ "location> <log_file_suffix>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ fname1 = argv[3];
+ fname2 = argv[4];
+
+ /*Use SIGUSR1 and pause()as a means of hitting break-points this program
+ *when signalled from the .t test case.*/
+ if (signal(SIGUSR1, sigusr1_handler) == SIG_ERR) {
+ LOG_ERR("SIGUSR1 handler error", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[5], "lock-heal.c", argv[6], "log");
+ logfile_fp = fopen(log_file, "w");
+ if (!logfile_fp) {
+ fprintf(stderr, "\nfailed to open %s\n", log_file);
+ fflush(stderr);
+ return -1;
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[5], "glfs-client", argv[6], "log");
+ fs = setup_client(hostname, volname, log_file);
+ if (!fs) {
+ LOG_ERR("setup_client", errno);
+ return -1;
+ }
+
+ ret = perform_test(fs, fname1, fname2);
+
+error:
+ if (fs) {
+ /*glfs_fini(fs)*/; // glfs fini path is racy and crashes the program
+ }
+
+ fclose(logfile_fp);
+
+ return ret;
+}
diff --git a/tests/basic/fencing/afr-lock-heal-advanced.t b/tests/basic/fencing/afr-lock-heal-advanced.t
new file mode 100644
index 00000000000..8a5b5989b5e
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-advanced.t
@@ -0,0 +1,115 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+PROCESS_UP_TIMEOUT=90
+
+function is_gfapi_program_alive()
+{
+ pid=$1
+ ps -p $pid
+ if [ $? -eq 0 ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function get_active_lock_count {
+ brick=$1
+ i1=$2
+ i2=$3
+ pattern="ACTIVE.*client-${brick: -1}"
+
+ sdump=$(generate_brick_statedump $V0 $H0 $brick)
+ lock_count1="$(egrep "$i1" $sdump -A3| egrep "$pattern"|uniq|wc -l)"
+ lock_count2="$(egrep "$i2" $sdump -A3| egrep "$pattern"|uniq|wc -l)"
+ echo "$((lock_count1+lock_count2))"
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+TEST build_tester $(dirname $0)/afr-lock-heal-advanced.c -lgfapi -ggdb
+
+#------------------------------------------------------------------------------
+# Use more than 1 fd from same client so that list_for_each_* loops are executed more than once.
+$(dirname $0)/afr-lock-heal-advanced $H0 $V0 "/FILE1" "/FILE2" $logdir C1&
+client_pid=$!
+TEST [ $client_pid ]
+
+TEST sleep 5 # By now, the client would have opened an fd on FILE1 and FILE2 and waiting for a SIGUSR1.
+EXPECT "Y" is_gfapi_program_alive $client_pid
+
+gfid_str1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE1))
+inode1="FILE1|gfid:$gfid_str1"
+gfid_str2=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE2))
+inode2="FILE2|gfid:$gfid_str2"
+
+# Kill brick-3 and let client-1 take lock on both files.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill -SIGUSR1 $client_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client_pid
+
+# Check lock is present on brick-1 and brick-2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}0 $inode1 $inode2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}1 $inode1 $inode2
+
+# Restart brick-3 and check that the lock has healed on it.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd.
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}2 $inode1 $inode2
+
+#------------------------------------------------------------------------------
+# Kill same brick before heal completes the first time and check it completes the second time.
+TEST $CLI volume set $V0 delay-gen locks
+TEST $CLI volume set $V0 delay-gen.delay-duration 5000000
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.enable finodelk
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume reset $V0 delay-gen
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_active_lock_count $B0/${V0}0 $inode1 $inode2
+
+#------------------------------------------------------------------------------
+# Kill 2 bricks and bring it back. The fds must be marked bad.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+# TODO: `gluster v statedump $V0 client localhost:$client_pid` is not working,
+# so sleep for 20 seconds for the client to connect to connect to the bricks.
+TEST sleep $CHILD_UP_TIMEOUT
+
+# Try to write to FILE1 from the .c; it must fail.
+TEST kill -SIGUSR1 $client_pid
+wait $client_pid
+ret=$?
+TEST [ $ret == 0 ]
+
+cleanup_tester $(dirname $0)/afr-lock-heal-advanced
+cleanup;
diff --git a/tests/basic/fencing/afr-lock-heal-basic.c b/tests/basic/fencing/afr-lock-heal-basic.c
new file mode 100644
index 00000000000..768c9e57181
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-basic.c
@@ -0,0 +1,182 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define GF_ENFORCE_MANDATORY_LOCK "trusted.glusterfs.enforce-mandatory-lock"
+
+FILE *logfile_fp;
+
+#define LOG_ERR(func, err) \
+ do { \
+ if (!logfile_fp) { \
+ fprintf(stderr, "%\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(stderr); \
+ } else { \
+ fprintf(logfile_fp, "\n%d %s : returned error (%s)\n", __LINE__, \
+ func, strerror(err)); \
+ fflush(logfile_fp); \
+ } \
+ } while (0)
+
+glfs_t *
+setup_client(char *hostname, char *volname, char *log_file)
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(logfile_fp, "\nglfs_new: returned NULL (%s)\n",
+ strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_volfile_server failed ret:%d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_set_logging(fs, log_file, 7);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_set_logging failed with ret: %d (%s)\n",
+ ret, strerror(errno));
+ goto error;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(logfile_fp, "\nglfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ goto error;
+ }
+
+out:
+ return fs;
+error:
+ return NULL;
+}
+
+int
+acquire_mandatory_lock(glfs_t *fs, char *fname)
+{
+ struct flock lock;
+ int ret = 0;
+ glfs_fd_t *fd = NULL;
+
+ fd = glfs_creat(fs, fname, O_CREAT, 0644);
+ if (!fd) {
+ if (errno != EEXIST) {
+ LOG_ERR("glfs_creat", errno);
+ ret = -1;
+ goto out;
+ }
+ fd = glfs_open(fs, fname, O_RDWR | O_NONBLOCK);
+ if (!fd) {
+ LOG_ERR("glfs_open", errno);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ /* initialize lock */
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 100;
+
+ ret = glfs_fsetxattr(fd, GF_ENFORCE_MANDATORY_LOCK, "set", 8, 0);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsetxattr", errno);
+ ret = -1;
+ goto out;
+ }
+
+ pause();
+
+ /* take a write mandatory lock */
+ ret = glfs_file_lock(fd, F_SETLKW, &lock, GLFS_LK_MANDATORY);
+ if (ret) {
+ LOG_ERR("glfs_file_lock", errno);
+ goto out;
+ }
+
+ pause();
+
+out:
+ if (fd) {
+ glfs_close(fd);
+ }
+
+ return ret;
+}
+
+static void
+sigusr1_handler(int signo)
+{
+ /*Signal caught. Just continue with the execution.*/
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char log_file[100];
+ char *hostname = NULL;
+ char *fname = NULL;
+
+ if (argc != 6) {
+ fprintf(stderr,
+ "Expect following args %s <host> <volname> <file> <log file "
+ "location> <log_file_suffix>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ fname = argv[3];
+
+ /*Use SIGUSR1 and pause()as a means of hitting break-points this program
+ *when signalled from the .t test case.*/
+ if (signal(SIGUSR1, sigusr1_handler) == SIG_ERR) {
+ LOG_ERR("SIGUSR1 handler error", errno);
+ exit(EXIT_FAILURE);
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[4], "lock-heal-basic.c", argv[5],
+ "log");
+ logfile_fp = fopen(log_file, "w");
+ if (!logfile_fp) {
+ fprintf(stderr, "\nfailed to open %s\n", log_file);
+ fflush(stderr);
+ return -1;
+ }
+
+ sprintf(log_file, "%s/%s.%s.%s", argv[4], "glfs-client", argv[5], "log");
+ fs = setup_client(hostname, volname, log_file);
+ if (!fs) {
+ LOG_ERR("setup_client", errno);
+ return -1;
+ }
+
+ ret = acquire_mandatory_lock(fs, fname);
+
+error:
+ if (fs) {
+ /*glfs_fini(fs)*/; // glfs fini path is racy and crashes the program
+ }
+
+ fclose(logfile_fp);
+
+ return ret;
+}
diff --git a/tests/basic/fencing/afr-lock-heal-basic.t b/tests/basic/fencing/afr-lock-heal-basic.t
new file mode 100644
index 00000000000..69131af085d
--- /dev/null
+++ b/tests/basic/fencing/afr-lock-heal-basic.t
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function is_gfapi_program_alive()
+{
+ pid=$1
+ ps -p $pid
+ if [ $? -eq 0 ]
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 locks.mandatory-locking forced
+TEST $CLI volume set $V0 enforce-mandatory-lock on
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+TEST build_tester $(dirname $0)/afr-lock-heal-basic.c -lgfapi -ggdb
+
+$(dirname $0)/afr-lock-heal-basic $H0 $V0 "/FILE" $logdir C1&
+client1_pid=$!
+TEST [ $client1_pid ]
+
+$(dirname $0)/afr-lock-heal-basic $H0 $V0 "/FILE" $logdir C2&
+client2_pid=$!
+TEST [ $client2_pid ]
+
+TEST sleep 5 # By now, the 2 clients would have opened an fd on FILE and waiting for a SIGUSR1.
+EXPECT "Y" is_gfapi_program_alive $client1_pid
+EXPECT "Y" is_gfapi_program_alive $client2_pid
+
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/FILE))
+inode="FILE|gfid:$gfid_str"
+
+# Kill brick-3 and let client-1 take lock on the file.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill -SIGUSR1 $client1_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client1_pid
+
+# Check lock is present on brick-1 and brick-2
+b1_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}0)
+c1_lock_on_b1="$(egrep "$inode" $b1_sdump -A3| egrep 'ACTIVE.*client-0'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b2_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}1)
+c1_lock_on_b2="$(egrep "$inode" $b2_sdump -A3| egrep 'ACTIVE.*client-1'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c1_lock_on_b1" == "$c1_lock_on_b2" ]
+
+# Restart brick-3 and check that the lock has healed on it.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd. Also wait for lock heal.
+
+b3_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}2)
+c1_lock_on_b3="$(egrep "$inode" $b3_sdump -A3| egrep 'ACTIVE.*client-2'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c1_lock_on_b1" == "$c1_lock_on_b3" ]
+
+# Kill brick-1 and let client-2 preempt the lock on bricks 2 and 3.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill -SIGUSR1 $client2_pid
+# If program is still alive, glfs_file_lock() was a success.
+EXPECT "Y" is_gfapi_program_alive $client2_pid
+
+# Restart brick-1 and let lock healing complete.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+TEST sleep 10 #Needed for client to re-open fd? Otherwise client_pre_lk_v2() fails with EBADFD for remote-fd. Also wait for lock heal.
+
+# Check that all bricks now have locks from client 2 only.
+b1_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}0)
+c2_lock_on_b1="$(egrep "$inode" $b1_sdump -A3| egrep 'ACTIVE.*client-0'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b2_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}1)
+c2_lock_on_b2="$(egrep "$inode" $b2_sdump -A3| egrep 'ACTIVE.*client-1'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+b3_sdump=$(generate_brick_statedump $V0 $H0 $B0/${V0}2)
+c2_lock_on_b3="$(egrep "$inode" $b3_sdump -A3| egrep 'ACTIVE.*client-2'| uniq| awk '{print $1,$2,$3,S4,$5,$6,$7,$8}'|tr -d '(,), ,')"
+TEST [ "$c2_lock_on_b1" == "$c2_lock_on_b2" ]
+TEST [ "$c2_lock_on_b1" == "$c2_lock_on_b3" ]
+TEST [ "$c2_lock_on_b1" != "$c1_lock_on_b1" ]
+
+#Let the client programs run and exit.
+TEST kill -SIGUSR1 $client1_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" is_gfapi_program_alive $client1_pid
+TEST kill -SIGUSR1 $client2_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "N" is_gfapi_program_alive $client2_pid
+
+cleanup_tester $(dirname $0)/afr-lock-heal-basic
+cleanup;
diff --git a/tests/basic/first-test.t b/tests/basic/first-test.t
deleted file mode 100755
index 535b269e6b3..00000000000
--- a/tests/basic/first-test.t
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-
-cat << EOF
-This test should run first for http://review.gluster.org/#/c/13439/ and should
-be removed once that patch has been merged.
-EOF
-
-TEST true
diff --git a/tests/basic/fops-sanity.c b/tests/basic/fops-sanity.c
index aff72d89ca1..ef00aa0f088 100644
--- a/tests/basic/fops-sanity.c
+++ b/tests/basic/fops-sanity.c
@@ -26,6 +26,7 @@
#include <errno.h>
#include <string.h>
#include <dirent.h>
+#include <sys/sysmacros.h>
#ifndef linux
#include <sys/socket.h>
diff --git a/tests/basic/fuse/active-io-graph-switch.t b/tests/basic/fuse/active-io-graph-switch.t
new file mode 100644
index 00000000000..6ec3e1fcbfa
--- /dev/null
+++ b/tests/basic/fuse/active-io-graph-switch.t
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+TESTS_EXPECTED_IN_LOOP=12
+
+function perform_io_on_mount {
+ local m="$1"
+ local f="$2"
+ local lockfile="$3"
+ while [ -f "$m/$lockfile" ];
+ do
+ dd if=/dev/zero of=$m/$f bs=1M count=1
+ done
+}
+
+function perform_graph_switch {
+ for i in {1..3}
+ do
+ TEST_IN_LOOP $CLI volume set $V0 performance.stat-prefetch off
+ sleep 3
+ TEST_IN_LOOP $CLI volume set $V0 performance.stat-prefetch on
+ sleep 3
+ done
+}
+
+function count_files {
+ ls $M0 | wc -l
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/lock
+for i in {1..100}; do perform_io_on_mount $M0 $i lock & done
+EXPECT_WITHIN 5 "101" count_files
+
+perform_graph_switch
+TEST rm -f $M0/lock
+wait
+EXPECT "100" count_files
+TEST rm -f $M0/{1..100}
+EXPECT "0" count_files
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#Repeat the tests with reader-thread-count
+TEST $GFS --reader-thread-count=10 --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/lock
+for i in {1..100}; do perform_io_on_mount $M0 $i lock & done
+EXPECT_WITHIN 5 "101" count_files
+
+perform_graph_switch
+TEST rm -f $M0/lock
+wait
+EXPECT "100" count_files
+TEST rm -f $M0/{1..100}
+EXPECT "0" count_files
+
+cleanup
diff --git a/tests/basic/gfapi/bug-1507896.c b/tests/basic/gfapi/bug-1507896.c
new file mode 100644
index 00000000000..1cc20849c2b
--- /dev/null
+++ b/tests/basic/gfapi/bug-1507896.c
@@ -0,0 +1,49 @@
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define VALIDATE_AND_GOTO_LABEL_ON_ERROR(func, ret, label) \
+ do { \
+ if (ret < 0) { \
+ fprintf(stderr, "%s : returned error %d (%s)\n", func, ret, \
+ strerror(errno)); \
+ goto label; \
+ } \
+ } while (0)
+
+int
+main(int argc, char *argv[])
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ char *hostname = NULL;
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = glfs_new(volname);
+ if (!fs)
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_new(fs)", ret, out);
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_volfile_server(fs)", ret, out);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_set_logging(fs)", ret, out);
+
+ ret = glfs_init(fs);
+ VALIDATE_AND_GOTO_LABEL_ON_ERROR("glfs_init(fs)", ret, out);
+
+out:
+ if (fs) {
+ ret = glfs_fini(fs);
+ if (ret)
+ fprintf(stderr, "glfs_fini(fs) returned %d\n", ret);
+ }
+ return ret;
+}
diff --git a/tests/basic/gfapi/bug-1507896.t b/tests/basic/gfapi/bug-1507896.t
new file mode 100644
index 00000000000..4764e650232
--- /dev/null
+++ b/tests/basic/gfapi/bug-1507896.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/bug-1507896.c -lgfapi
+
+TEST ./$(dirname $0)/bug-1507896 $H0 $V0 $logdir/bug-1507896.log
+
+#volume name precedding with '/'
+TEST ! ./$(dirname $0)/bug-1507896 $H0 /$V0 $logdir/bug-1507896.log
+
+#volume name passed with any special characters
+TEST ! ./$(dirname $0)/bug-1507896 $H0 test@_$V0 $logdir/bug-1507896.log
+
+cleanup_tester $(dirname $0)/bug-1507896
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/basic/gfapi/gfapi-async-calls-test.c b/tests/basic/gfapi/gfapi-async-calls-test.c
index 5a291c3c76b..55835b14709 100644
--- a/tests/basic/gfapi/gfapi-async-calls-test.c
+++ b/tests/basic/gfapi/gfapi-async-calls-test.c
@@ -17,6 +17,17 @@
int cbk_complete = 0;
int cbk_ret_val = -1;
+void
+cbk_check()
+{
+ while (cbk_complete != 1) {
+ sleep(1);
+ }
+ if (cbk_ret_val < 0) {
+ fprintf(stderr, "cbk_ret_val is -ve\n");
+ }
+}
+
int
fill_iov(struct iovec *iov, char fillchar, int count)
{
@@ -76,25 +87,23 @@ out:
}
void
-write_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
- struct stat *poststat, void *cookie)
+pwritev_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
{
if (ret < 0) {
- LOG_ERR("glfs_write failed");
+ LOG_ERR("glfs_pwritev failed");
}
cbk_ret_val = ret;
cbk_complete = 1;
}
int
-write_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+pwritev_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
{
ssize_t ret = -1;
int flags = O_RDWR;
- const char *buff = "This is from my prog\n";
struct iovec iov = {0};
void *write_cookie = NULL;
- void *read_cookie = NULL;
ret = fill_iov(&iov, 'a', char_count);
if (ret) {
@@ -103,7 +112,7 @@ write_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
}
write_cookie = strdup("write_cookie");
- ret = glfs_pwritev_async(glfd, &iov, 1, 0, flags, write_async_cbk,
+ ret = glfs_pwritev_async(glfd, &iov, 1, 0, flags, pwritev_async_cbk,
&write_cookie);
out:
if (ret < 0) {
@@ -112,6 +121,252 @@ out:
return ret;
}
+void
+pwrite_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_pwrite_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+pwrite_async(glfs_fd_t *glfd)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ char buf1[10];
+ char *buf2 = "ten bytes!";
+ void *write_cookie = strdup("write_cookie");
+ ret = glfs_pwrite_async(glfd, buf1, 10, 0, flags, pwrite_async_cbk,
+ &write_cookie);
+
+ if (ret < 0) {
+ LOG_ERR("glfs_pwrite_async failed");
+ }
+ return ret;
+}
+
+void
+writev_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_writev_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+writev_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+ void *write_cookie = NULL;
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ LOG_ERR("failed to create iov");
+ goto out;
+ }
+
+ write_cookie = strdup("write_cookie");
+ ret = glfs_writev_async(glfd, &iov, 1, flags, writev_async_cbk,
+ &write_cookie);
+out:
+ if (ret < 0) {
+ LOG_ERR("glfs_writev_async failed");
+ }
+ return ret;
+}
+
+void
+write_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_write_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+write_async(glfs_fd_t *glfd)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ char buf1[10];
+ char *buf2 = "ten bytes!";
+ void *write_cookie = strdup("write_cookie");
+ ret = glfs_write_async(glfd, buf1, 10, flags, write_async_cbk,
+ &write_cookie);
+
+ if (ret < 0) {
+ LOG_ERR("glfs_write_async failed");
+ }
+ return ret;
+}
+
+void
+preadv_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_preadv_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+preadv_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+ void *read_cookie = NULL;
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ LOG_ERR("failed to create iov");
+ goto out;
+ }
+
+ read_cookie = strdup("preadv_cookie");
+ ret = glfs_preadv_async(glfd, &iov, 1, 0, flags, preadv_async_cbk,
+ &read_cookie);
+out:
+ if (ret < 0) {
+ LOG_ERR("glfs_preadv async failed");
+ }
+ return ret;
+}
+
+void
+pread_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_pread_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+pread_async(glfs_fd_t *glfd)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ char buf1[10];
+ void *read_cookie = strdup("read_cookie");
+ ret = glfs_pread_async(glfd, buf1, 10, 0, flags, pread_async_cbk,
+ &read_cookie);
+ if (ret < 0) {
+ LOG_ERR("glfs_pread_async failed");
+ }
+
+ return ret;
+}
+
+void
+readv_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_readv_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+readv_async(glfs_t *fs, glfs_fd_t *glfd, int char_count)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ struct iovec iov = {0};
+ void *read_cookie = NULL;
+
+ ret = fill_iov(&iov, 'a', char_count);
+ if (ret) {
+ LOG_ERR("failed to create iov");
+ goto out;
+ }
+
+ read_cookie = strdup("read_cookie");
+ ret = glfs_readv_async(glfd, &iov, 1, flags, readv_async_cbk, &read_cookie);
+out:
+ if (ret < 0) {
+ LOG_ERR("glfs_readv_async failed");
+ }
+ return ret;
+}
+
+void
+read_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_read_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+int
+read_async(glfs_fd_t *glfd)
+{
+ ssize_t ret = -1;
+ int flags = O_RDWR;
+ char buf1[10];
+ void *read_cookie = strdup("read_cookie");
+ ret = glfs_read_async(glfd, buf1, 10, flags, read_async_cbk, &read_cookie);
+
+ if (ret < 0) {
+ LOG_ERR("glfs_read_async failed");
+ }
+ return ret;
+}
+
+void
+fsync_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_fsync_async_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+void
+fdatasync_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_fdatasync_async_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
+void
+ftruncate_async_cbk(glfs_fd_t *fd, ssize_t ret, struct stat *prestat,
+ struct stat *poststat, void *cookie)
+{
+ if (ret < 0) {
+ LOG_ERR("glfs_ftruncate_async_cbk failed");
+ }
+ cbk_ret_val = ret;
+ cbk_complete = 1;
+}
+
int
main(int argc, char *argv[])
{
@@ -124,6 +379,7 @@ main(int argc, char *argv[])
int flags = (O_RDWR | O_CREAT);
glfs_fd_t *glfd = NULL;
int count = 200;
+ void *data = strdup("Sample_text");
if (argc != 4) {
fprintf(stderr, "Invalid argument\n");
@@ -146,14 +402,85 @@ main(int argc, char *argv[])
exit(1);
}
- ret = write_async(fs, glfd, count);
+ ret = pwritev_async(fs, glfd, count);
if (ret) {
- LOG_ERR("glfs_test_function failed");
+ LOG_ERR("glfs_pwritev_async_test failed");
exit(1);
}
+ cbk_check();
- while (cbk_complete != 1) {
- sleep(1);
+ ret = writev_async(fs, glfd, count);
+ if (ret) {
+ LOG_ERR("glfs_writev_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = write_async(glfd);
+ if (ret) {
+ LOG_ERR("glfs_write_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = preadv_async(fs, glfd, count);
+ if (ret) {
+ LOG_ERR("glfs_preadv_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = pread_async(glfd);
+ if (ret) {
+ LOG_ERR("glfs_pread_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = readv_async(fs, glfd, count);
+ if (ret) {
+ LOG_ERR("glfs_readv_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = read_async(glfd);
+ if (ret) {
+ LOG_ERR("glfs_read_async_test failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = glfs_fsync(glfd, NULL, NULL);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsync failed");
+ exit(1);
+ }
+
+ ret = glfs_fdatasync(glfd, NULL, NULL);
+ if (ret < 0) {
+ LOG_ERR("glfs_fdatasync failed");
+ exit(1);
+ }
+
+ ret = glfs_fsync_async(glfd, fsync_async_cbk, data);
+ if (ret < 0) {
+ LOG_ERR("glfs_fsync_async failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = glfs_fdatasync_async(glfd, fdatasync_async_cbk, data);
+ if (ret < 0) {
+ LOG_ERR("glfs_fdatasync_async failed");
+ exit(1);
+ }
+ cbk_check();
+
+ ret = glfs_ftruncate_async(glfd, 4, ftruncate_async_cbk, data);
+ if (ret < 0) {
+ LOG_ERR("glfs_ftruncate_async failed");
+ exit(1);
}
ret = glfs_close(glfd);
@@ -161,12 +488,7 @@ main(int argc, char *argv[])
LOG_ERR("glfs close failed");
}
- /*
- * skipping fini
- */
+ ret = glfs_fini(fs);
- if (cbk_ret_val == count)
- return 0;
- else
- return -1;
+ return ret;
}
diff --git a/tests/basic/gfapi/gfapi-copy-file-range.t b/tests/basic/gfapi/gfapi-copy-file-range.t
index c24c1433edf..a56d3a58e07 100644
--- a/tests/basic/gfapi/gfapi-copy-file-range.t
+++ b/tests/basic/gfapi/gfapi-copy-file-range.t
@@ -5,20 +5,22 @@
cleanup;
+mkfs.xfs 2>&1 | grep reflink
+if [ $? -ne 0 ]; then
+ SKIP_TESTS
+ exit
+fi
+
+
TEST glusterd
+TEST truncate -s 2G $B0/xfs_image
# for now, a xfs filesystem with reflink support is created.
# In future, better to make changes in MKFS_LOOP so that,
# once can create a xfs filesystem with reflink enabled in
# generic and simple way, instead of doing below steps each
# time.
-TEST truncate -s 2G $B0/xfs_image
-mkfs.xfs 2>&1 | grep reflink
-if [ $? -eq 0 ]; then
- mkfs.xfs -f -i size=512 -m reflink=1 $B0/xfs_image;
-else
- mkfs.xfs -f -i size=512 $B0/xfs_image;
-fi
+TEST mkfs.xfs -f -i size=512 -m reflink=1 $B0/xfs_image;
TEST mkdir $B0/bricks
TEST mount -t xfs -o loop $B0/xfs_image $B0/bricks
diff --git a/tests/basic/gfapi/gfapi-graph-switch-open-fd.t b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
new file mode 100644
index 00000000000..2e666be7ec7
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-graph-switch-open-fd.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 ${H0}:$B0/brick{0..2};
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/sync
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/gfapi-keep-writing.c -lgfapi
+
+
+#Launch a program to keep doing writes on an fd
+./$(dirname $0)/gfapi-keep-writing ${H0} $V0 $logdir/gfapi-async-calls-test.log sync &
+p=$!
+sleep 1 #Let some writes go through
+#Check if graph switch will lead to any pending markers for ever
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+
+
+TEST rm -f $M0/sync #Make sure the glfd is closed
+TEST wait #Wait for background process to die
+#Goal is to check if there is permanent FOOL changelog
+sleep 5
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick0/glfs_test.txt trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick1/glfs_test.txt trusted.afr.dirty
+EXPECT "0x000000000000000000000000" afr_get_changelog_xattr $B0/brick2/glfs_test.txt trusted.afr.dirty
+
+cleanup_tester $(dirname $0)/gfapi-async-calls-test
+
+cleanup;
diff --git a/tests/basic/gfapi/gfapi-keep-writing.c b/tests/basic/gfapi/gfapi-keep-writing.c
new file mode 100644
index 00000000000..91b59cea02b
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-keep-writing.c
@@ -0,0 +1,129 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile_server failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+int
+glfs_test_function(const char *hostname, const char *volname,
+ const char *logfile, const char *syncfile)
+{
+ int ret = -1;
+ int flags = O_CREAT | O_RDWR;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ const char *buff = "This is from my prog\n";
+ const char *filename = "glfs_test.txt";
+ struct stat buf = {0};
+
+ fs = init_glfs(hostname, volname, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ glfd = glfs_creat(fs, filename, flags, 0644);
+ if (glfd == NULL) {
+ LOG_ERR("glfs_creat failed");
+ goto out;
+ }
+
+ while (glfs_stat(fs, syncfile, &buf) == 0) {
+ ret = glfs_write(glfd, buff, strlen(buff), flags);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+ }
+
+ ret = glfs_close(glfd);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+out:
+ ret = glfs_fini(fs);
+ if (ret) {
+ LOG_ERR("glfs_fini failed");
+ }
+
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *logfile = NULL;
+ char *syncfile = NULL;
+
+ if (argc != 5) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+ syncfile = argv[4];
+
+ ret = glfs_test_function(hostname, volname, logfile, syncfile);
+ if (ret) {
+ LOG_ERR("glfs_test_function failed");
+ }
+
+ return ret;
+}
diff --git a/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c
new file mode 100644
index 00000000000..7beb8dd1fe4
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.c
@@ -0,0 +1,127 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(msg) \
+ do { \
+ fprintf(stderr, "%s : Error (%s)\n", msg, strerror(errno)); \
+ } while (0)
+
+glfs_t *
+init_glfs(const char *hostname, const char *volname, const char *volfile,
+ const char *logfile)
+{
+ int ret = -1;
+ glfs_t *fs = NULL;
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ LOG_ERR("glfs_new failed");
+ return NULL;
+ }
+
+ ret = glfs_set_volfile(fs, volfile);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_volfile failed");
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ LOG_ERR("glfs_set_logging failed");
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ LOG_ERR("glfs_init failed");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ glfs_fini(fs);
+ fs = NULL;
+ }
+
+ return fs;
+}
+
+int
+glfs_test_function(const char *hostname, const char *volname,
+ const char *volfile, const char *logfile)
+{
+ int ret = -1;
+ int flags = O_CREAT | O_RDWR;
+ glfs_t *fs = NULL;
+ glfs_fd_t *glfd = NULL;
+ const char *buff = "This is from my prog\n";
+ const char *filename = "glfs_test.txt";
+
+ fs = init_glfs(hostname, volname, volfile, logfile);
+ if (fs == NULL) {
+ LOG_ERR("init_glfs failed");
+ return -1;
+ }
+
+ glfd = glfs_creat(fs, filename, flags, 0644);
+ if (glfd == NULL) {
+ LOG_ERR("glfs_creat failed");
+ goto out;
+ }
+
+ ret = glfs_write(glfd, buff, strlen(buff), flags);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+ ret = glfs_close(glfd);
+ if (ret < 0) {
+ LOG_ERR("glfs_write failed");
+ goto out;
+ }
+
+out:
+ ret = glfs_fini(fs);
+ if (ret) {
+ LOG_ERR("glfs_fini failed");
+ }
+
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 0;
+ char *hostname = NULL;
+ char *volname = NULL;
+ char *volfile = NULL;
+ char *logfile = NULL;
+
+ if (argc != 5) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ volfile = argv[3];
+ logfile = argv[4];
+
+ ret = glfs_test_function(hostname, volname, volfile, logfile);
+ if (ret) {
+ LOG_ERR("glfs_test_function failed");
+ }
+
+ return ret;
+}
diff --git a/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t
new file mode 100755
index 00000000000..8e94df9d321
--- /dev/null
+++ b/tests/basic/gfapi/gfapi-ssl-load-volfile-test.t
@@ -0,0 +1,76 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+. $(dirname $0)/../../ssl.rc
+
+cleanup;
+
+sed -e "s,@@HOSTNAME@@,${H0},g" -e "s,@@BRICKPATH@@,${B0}/brick1,g" \
+ -e "s,@@SSL@@,off,g" \
+ $(dirname ${0})/protocol-client-ssl.vol.in \
+ > $(dirname ${0})/protocol-client-ssl.vol
+
+TEST create_self_signed_certs
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/gfapi-ssl-load-volfile-test.c -lgfapi
+
+# Run test without I/O or management encryption
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+# Enable management encryption
+touch $GLUSTERD_WORKDIR/secure-access
+
+killall_gluster
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+# Run test with management encryption (No I/O encryption)
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+# Enable I/O encryption
+TEST $CLI volume set $V0 server.ssl on
+
+killall_gluster
+
+sed -e "s,@@HOSTNAME@@,${H0},g" -e "s,@@BRICKPATH@@,${B0}/brick1,g" \
+ -e "s,@@SSL@@,on,g" \
+ $(dirname ${0})/protocol-client-ssl.vol.in \
+ > $(dirname ${0})/protocol-client-ssl.vol
+
+TEST glusterd
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+
+# Run test without I/O or management encryption
+TEST $(dirname $0)/gfapi-ssl-load-volfile-test $H0 $V0 \
+ $(dirname ${0})/protocol-client-ssl.vol \
+ $logdir/gfapi-ssl-load-volfile-test.log
+
+cleanup_tester $(dirname $0)/gfapi-ssl-load-volfile-test
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
+
+# NetBSD build scripts are not up to date therefore this test
+# is failing in NetBSD. Therefore skipping the test in NetBSD
+# as of now.
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
diff --git a/tests/basic/gfapi/glfs_h_creat_open.c b/tests/basic/gfapi/glfs_h_creat_open.c
new file mode 100644
index 00000000000..7672561e73f
--- /dev/null
+++ b/tests/basic/gfapi/glfs_h_creat_open.c
@@ -0,0 +1,118 @@
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <limits.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+#define LOG_ERR(func, ret) \
+ do { \
+ if (ret != 0) { \
+ fprintf(stderr, "%s : returned error ret(%d), errno(%d)\n", func, \
+ ret, errno); \
+ exit(1); \
+ } else { \
+ fprintf(stderr, "%s : returned %d\n", func, ret); \
+ } \
+ } while (0)
+#define LOG_IF_NO_ERR(func, ret) \
+ do { \
+ if (ret == 0) { \
+ fprintf(stderr, "%s : hasn't returned error %d\n", func, ret); \
+ exit(1); \
+ } else { \
+ fprintf(stderr, "%s : returned %d\n", func, ret); \
+ } \
+ } while (0)
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ struct glfs_object *root = NULL, *leaf = NULL;
+ glfs_fd_t *fd = NULL;
+ char *filename = "/ro-file";
+ struct stat sb = {
+ 0,
+ };
+ char *logfile = NULL;
+ char *volname = NULL;
+ char *hostname = NULL;
+ char buf[32] = "abcdefghijklmnopqrstuvwxyz012345";
+
+ fprintf(stderr, "Starting glfs_h_creat_open\n");
+
+ if (argc != 4) {
+ fprintf(stderr, "Invalid argument\n");
+ exit(1);
+ }
+
+ hostname = argv[1];
+ volname = argv[2];
+ logfile = argv[3];
+
+ fs = glfs_new(volname);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ LOG_ERR("glfs_set_volfile_server", ret);
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ LOG_ERR("glfs_set_logging", ret);
+
+ ret = glfs_init(fs);
+ LOG_ERR("glfs_init", ret);
+
+ sleep(2);
+ root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
+ if (!root) {
+ ret = -1;
+ LOG_ERR("glfs_h_lookupat root", ret);
+ }
+ leaf = glfs_h_lookupat(fs, root, filename, &sb, 0);
+ if (!leaf) {
+ ret = -1;
+ LOG_IF_NO_ERR("glfs_h_lookupat leaf", ret);
+ }
+
+ leaf = glfs_h_creat_open(fs, root, filename, O_RDONLY, 00444, &sb, &fd);
+ if (!leaf || !fd) {
+ ret = -1;
+ LOG_ERR("glfs_h_creat leaf", ret);
+ }
+ fprintf(stderr, "glfs_h_create_open leaf - %p\n", leaf);
+
+ ret = glfs_write(fd, buf, 32, 0);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_write: error writing to file %s, %s\n", filename,
+ strerror(errno));
+ goto out;
+ }
+
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ LOG_ERR("glfs_h_getattrs", ret);
+
+ if (sb.st_size != 32) {
+ fprintf(stderr, "glfs_write: post size mismatch\n");
+ goto out;
+ }
+
+ fprintf(stderr, "Successfully opened and written to a read-only file \n");
+out:
+ if (fd)
+ glfs_close(fd);
+
+ ret = glfs_fini(fs);
+ LOG_ERR("glfs_fini", ret);
+
+ fprintf(stderr, "End of libgfapi_fini\n");
+
+ exit(0);
+}
diff --git a/tests/basic/gfapi/glfs_h_creat_open.t b/tests/basic/gfapi/glfs_h_creat_open.t
new file mode 100755
index 00000000000..f24ae7395be
--- /dev/null
+++ b/tests/basic/gfapi/glfs_h_creat_open.t
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick1;
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+logdir=`gluster --print-logdir`
+
+TEST build_tester $(dirname $0)/glfs_h_creat_open.c -lgfapi
+
+TEST ./$(dirname $0)/glfs_h_creat_open $H0 $V0 $logdir/glfs.log
+
+cleanup_tester $(dirname $0)/glfs_h_creat_open
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup;
diff --git a/tests/basic/gfapi/glfsxmp-coverage.c b/tests/basic/gfapi/glfsxmp-coverage.c
new file mode 100644
index 00000000000..51650023efd
--- /dev/null
+++ b/tests/basic/gfapi/glfsxmp-coverage.c
@@ -0,0 +1,1900 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <string.h>
+#include <time.h>
+
+#define TEST_STR_LEN 2048
+
+int
+test_dirops(glfs_t *fs)
+{
+ glfs_fd_t *fd = NULL;
+ char buf[2048];
+ struct dirent *entry = NULL;
+
+ fd = glfs_opendir(fs, "/");
+ if (!fd) {
+ fprintf(stderr, "/: %s\n", strerror(errno));
+ return -1;
+ }
+
+ fprintf(stderr, "Entries:\n");
+ while (glfs_readdir_r(fd, (struct dirent *)buf, &entry), entry) {
+ fprintf(stderr, "%s: %lu\n", entry->d_name, glfs_telldir(fd));
+ }
+
+ /* Should internally call fsyncdir(), hopefully */
+ glfs_fsync(fd, NULL, NULL);
+
+ glfs_closedir(fd);
+ return 0;
+}
+
+int
+test_xattr(glfs_t *fs)
+{
+ char *filename = "/filename2";
+ char *linkfile = "/linkfile";
+ glfs_fd_t *fd = NULL;
+ char buf[512];
+ char *ptr;
+ int ret;
+
+ ret = glfs_setxattr(fs, filename, "user.testkey", "testval", 8, 0);
+ fprintf(stderr, "setxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_setxattr(fs, filename, "user.testkey2", "testval", 8, 0);
+ fprintf(stderr, "setxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_getxattr(fs, filename, "user.testkey", buf, 512);
+ fprintf(stderr, "getxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_listxattr(fs, filename, buf, 512);
+ fprintf(stderr, "listxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_symlink(fs, "filename", linkfile);
+ fprintf(stderr, "symlink(%s %s): %s\n", filename, linkfile,
+ strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_readlink(fs, linkfile, buf, 512);
+ fprintf(stderr, "readlink(%s) : %d (%s)\n", filename, ret, strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_lsetxattr(fs, filename, "user.testkey3", "testval", 8, 0);
+ fprintf(stderr, "lsetxattr(%s) : %d (%s)\n", linkfile, ret,
+ strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_llistxattr(fs, linkfile, buf, 512);
+ fprintf(stderr, "llistxattr(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ ret = glfs_lgetxattr(fs, filename, "user.testkey3", buf, 512);
+ fprintf(stderr, "lgetxattr(%s): %d (%s)\n", linkfile, ret, strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ for (ptr = buf; ptr < buf + ret; ptr++) {
+ printf("key=%s\n", ptr);
+ ptr += strlen(ptr);
+ }
+
+ ret = glfs_removexattr(fs, filename, "user.testkey2");
+ fprintf(stderr, "removexattr(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ fd = glfs_open(fs, filename, O_RDWR);
+ fprintf(stderr, "open(%s): (%p) %s\n", filename, fd, strerror(errno));
+
+ ret = glfs_fsetxattr(fd, "user.testkey2", "testval", 8, 0);
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_fgetxattr(fd, "user.testkey2", buf, 512);
+ fprintf(stderr, "fgetxattr(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_flistxattr(fd, buf, 512);
+ fprintf(stderr, "flistxattr(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ if (ret < 0)
+ return -1;
+
+ for (ptr = buf; ptr < buf + ret; ptr++) {
+ printf("key=%s\n", ptr);
+ ptr += strlen(ptr);
+ }
+
+ ret = glfs_fremovexattr(fd, "user.testkey2");
+ fprintf(stderr, "fremovexattr(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ glfs_close(fd);
+
+ return 0;
+}
+
+int
+test_chdir(glfs_t *fs)
+{
+ int ret = -1;
+ char *dir = "/dir";
+ char *topdir = "/topdir";
+ char *linkdir = "/linkdir";
+ char *linkdir2 = "/linkdir2";
+ char *subdir = "./subdir";
+ char *respath = NULL;
+ char pathbuf[4096];
+
+ ret = glfs_mkdir(fs, topdir, 0755);
+ fprintf(stderr, "mkdir(%s): %s\n", topdir, strerror(errno));
+ if (ret)
+ return -1;
+
+ ret = glfs_mkdir(fs, dir, 0755);
+ fprintf(stderr, "mkdir(%s): %s\n", dir, strerror(errno));
+ if (ret)
+ return -1;
+
+ respath = glfs_getcwd(fs, pathbuf, 4096);
+ fprintf(stdout, "getcwd() = %s\n", respath);
+
+ ret = glfs_symlink(fs, "topdir", linkdir);
+ if (ret) {
+ fprintf(stderr, "symlink(%s, %s): %s\n", topdir, linkdir,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_chdir(fs, linkdir);
+ if (ret) {
+ fprintf(stderr, "chdir(%s): %s\n", linkdir, strerror(errno));
+ return -1;
+ }
+
+ respath = glfs_getcwd(fs, pathbuf, 4096);
+ fprintf(stdout, "getcwd() = %s\n", respath);
+
+ respath = glfs_realpath(fs, subdir, pathbuf);
+ if (respath) {
+ fprintf(stderr, "realpath(%s) worked unexpectedly: %s\n", subdir,
+ respath);
+ return -1;
+ }
+
+ ret = glfs_mkdir(fs, subdir, 0755);
+ if (ret) {
+ fprintf(stderr, "mkdir(%s): %s\n", subdir, strerror(errno));
+ return -1;
+ }
+
+ respath = glfs_realpath(fs, subdir, pathbuf);
+ if (!respath) {
+ fprintf(stderr, "realpath(%s): %s\n", subdir, strerror(errno));
+ } else {
+ fprintf(stdout, "realpath(%s) = %s\n", subdir, respath);
+ }
+
+ ret = glfs_chdir(fs, subdir);
+ if (ret) {
+ fprintf(stderr, "chdir(%s): %s\n", subdir, strerror(errno));
+ return -1;
+ }
+
+ respath = glfs_getcwd(fs, pathbuf, 4096);
+ fprintf(stdout, "getcwd() = %s\n", respath);
+
+ respath = glfs_realpath(fs, "/linkdir/subdir", pathbuf);
+ if (!respath) {
+ fprintf(stderr, "realpath(/linkdir/subdir): %s\n", strerror(errno));
+ } else {
+ fprintf(stdout, "realpath(/linkdir/subdir) = %s\n", respath);
+ }
+
+ return 0;
+}
+
+#ifdef DEBUG
+static void
+peek_stat(struct stat *sb)
+{
+ printf("Dumping stat information:\n");
+ printf("File type: ");
+
+ switch (sb->st_mode & S_IFMT) {
+ case S_IFBLK:
+ printf("block device\n");
+ break;
+ case S_IFCHR:
+ printf("character device\n");
+ break;
+ case S_IFDIR:
+ printf("directory\n");
+ break;
+ case S_IFIFO:
+ printf("FIFO/pipe\n");
+ break;
+ case S_IFLNK:
+ printf("symlink\n");
+ break;
+ case S_IFREG:
+ printf("regular file\n");
+ break;
+ case S_IFSOCK:
+ printf("socket\n");
+ break;
+ default:
+ printf("unknown?\n");
+ break;
+ }
+
+ printf("I-node number: %ld\n", (long)sb->st_ino);
+
+ printf("Mode: %lo (octal)\n",
+ (unsigned long)sb->st_mode);
+
+ printf("Link count: %ld\n", (long)sb->st_nlink);
+ printf("Ownership: UID=%ld GID=%ld\n", (long)sb->st_uid,
+ (long)sb->st_gid);
+
+ printf("Preferred I/O block size: %ld bytes\n", (long)sb->st_blksize);
+ printf("File size: %lld bytes\n", (long long)sb->st_size);
+ printf("Blocks allocated: %lld\n", (long long)sb->st_blocks);
+
+ printf("Last status change: %s", ctime(&sb->st_ctime));
+ printf("Last file access: %s", ctime(&sb->st_atime));
+ printf("Last file modification: %s", ctime(&sb->st_mtime));
+
+ return;
+}
+
+static void
+peek_handle(unsigned char *glid)
+{
+ int i;
+
+ for (i = 0; i < GFAPI_HANDLE_LENGTH; i++) {
+ printf(":%02x:", glid[i]);
+ }
+ printf("\n");
+}
+#else /* DEBUG */
+static void
+peek_stat(struct stat *sb)
+{
+ return;
+}
+
+static void
+peek_handle(unsigned char *id)
+{
+ return;
+}
+#endif /* DEBUG */
+
+glfs_t *fs = NULL;
+char *full_parent_name = "/testdir", *parent_name = "testdir";
+
+void
+test_h_unlink(void)
+{
+ char *my_dir = "unlinkdir";
+ char *my_file = "file.txt";
+ char *my_subdir = "dir1";
+ struct glfs_object *parent = NULL, *leaf = NULL, *dir = NULL,
+ *subdir = NULL, *subleaf = NULL;
+ struct stat sb;
+ int ret;
+
+ printf("glfs_h_unlink tests: In Progress\n");
+
+ /* Prepare tests */
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dir = glfs_h_mkdir(fs, parent, my_dir, 0755, &sb);
+ if (dir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, parent, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ leaf = glfs_h_creat(fs, dir, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ subdir = glfs_h_mkdir(fs, dir, my_subdir, 0755, &sb);
+ if (subdir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_subdir, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ subleaf = glfs_h_creat(fs, subdir, my_file, O_CREAT, 0644, &sb);
+ if (subleaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, subdir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink non empty directory */
+ ret = glfs_h_unlink(fs, dir, my_subdir);
+ if ((ret && errno != ENOTEMPTY) || (ret == 0)) {
+ fprintf(stderr,
+ "glfs_h_unlink: error unlinking %s: it is non empty: %s\n",
+ my_subdir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink regular file */
+ ret = glfs_h_unlink(fs, subdir, my_file);
+ if (ret) {
+ fprintf(stderr, "glfs_h_unlink: error unlinking %s: from (%p),%s\n",
+ my_file, subdir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink directory */
+ ret = glfs_h_unlink(fs, dir, my_subdir);
+ if (ret) {
+ fprintf(stderr, "glfs_h_unlink: error unlinking %s: from (%p),%s\n",
+ my_subdir, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink regular file */
+ ret = glfs_h_unlink(fs, dir, my_file);
+ if (ret) {
+ fprintf(stderr, "glfs_h_unlink: error unlinking %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink non-existent regular file */
+ ret = glfs_h_unlink(fs, dir, my_file);
+ if ((ret && errno != ENOENT) || (ret == 0)) {
+ fprintf(stderr,
+ "glfs_h_unlink: error unlinking non-existent %s: invalid errno "
+ ",%d, %s\n",
+ my_file, ret, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink non-existent directory */
+ ret = glfs_h_unlink(fs, dir, my_subdir);
+ if ((ret && errno != ENOENT) || (ret == 0)) {
+ fprintf(stderr,
+ "glfs_h_unlink: error unlinking non-existent %s: invalid "
+ "errno ,%d, %s\n",
+ my_subdir, ret, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ /* unlink directory */
+ ret = glfs_h_unlink(fs, parent, my_dir);
+ if (ret) {
+ fprintf(stderr, "glfs_h_unlink: error unlinking %s: from (%p),%s\n",
+ my_dir, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+
+ printf("glfs_h_unlink tests: PASSED\n");
+
+out:
+ if (dir)
+ glfs_h_close(dir);
+ if (leaf)
+ glfs_h_close(leaf);
+ if (subdir)
+ glfs_h_close(subdir);
+ if (subleaf)
+ glfs_h_close(subleaf);
+ if (parent)
+ glfs_h_close(parent);
+
+ return;
+}
+
+void
+test_h_getsetattrs(void)
+{
+ char *my_dir = "attrdir";
+ char *my_file = "attrfile.txt";
+ struct glfs_object *parent = NULL, *leaf = NULL, *dir = NULL;
+ struct stat sb, retsb;
+ int ret, valid;
+ struct timespec timestamp;
+
+ printf("glfs_h_getattrs and setattrs tests: In Progress\n");
+
+ /* Prepare tests */
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dir = glfs_h_mkdir(fs, parent, my_dir, 0755, &sb);
+ if (dir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, parent, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, dir, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_unlink tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ ret = glfs_h_getattrs(fs, dir, &retsb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_getattrs: error %s: from (%p),%s\n", my_dir,
+ dir, strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&retsb);
+ /* TODO: Compare stat information */
+
+ retsb.st_mode = 00666;
+ retsb.st_uid = 1000;
+ retsb.st_gid = 1001;
+ ret = clock_gettime(CLOCK_REALTIME, &timestamp);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+ retsb.st_atim = timestamp;
+ retsb.st_mtim = timestamp;
+ valid = GFAPI_SET_ATTR_MODE | GFAPI_SET_ATTR_UID | GFAPI_SET_ATTR_GID |
+ GFAPI_SET_ATTR_ATIME | GFAPI_SET_ATTR_MTIME;
+ peek_stat(&retsb);
+
+ ret = glfs_h_setattrs(fs, dir, &retsb, valid);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_setattrs: error %s: from (%p),%s\n", my_dir,
+ dir, strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ memset(&retsb, 0, sizeof(struct stat));
+ ret = glfs_h_stat(fs, dir, &retsb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_stat: error %s: from (%p),%s\n", my_dir, dir,
+ strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&retsb);
+
+ printf("glfs_h_getattrs and setattrs tests: PASSED\n");
+out:
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+ if (dir)
+ glfs_h_close(dir);
+
+ return;
+}
+
+void
+test_h_truncate(void)
+{
+ char *my_dir = "truncatedir";
+ char *my_file = "file.txt";
+ struct glfs_object *root = NULL, *parent = NULL, *leaf = NULL;
+ struct stat sb;
+ glfs_fd_t *fd = NULL;
+ char buf[32];
+ off_t offset = 0;
+ int ret = 0;
+
+ printf("glfs_h_truncate tests: In Progress\n");
+
+ /* Prepare tests */
+ root = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ parent = glfs_h_mkdir(fs, root, my_dir, 0755, &sb);
+ if (parent == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, root, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, parent, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ fd = glfs_h_open(fs, leaf, O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_h_open: error on open of %s: %s\n", my_file,
+ strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+
+ memcpy(buf, "abcdefghijklmnopqrstuvwxyz012345", 32);
+ ret = glfs_write(fd, buf, 32, 0);
+
+ /* run tests */
+ /* truncate lower */
+ offset = 30;
+ ret = glfs_h_truncate(fs, leaf, offset);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_truncate: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_getattrs: error for %s (%p),%s\n", my_file,
+ leaf, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ if (sb.st_size != offset) {
+ fprintf(stderr, "glfs_h_truncate: post size mismatch\n");
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+
+ /* truncate higher */
+ offset = 32;
+ ret = glfs_h_truncate(fs, leaf, offset);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_truncate: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_getattrs: error for %s (%p),%s\n", my_file,
+ leaf, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ if (sb.st_size != offset) {
+ fprintf(stderr, "glfs_h_truncate: post size mismatch\n");
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+
+ /* truncate equal */
+ offset = 30;
+ ret = glfs_h_truncate(fs, leaf, offset);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_truncate: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ ret = glfs_h_getattrs(fs, leaf, &sb);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_getattrs: error for %s (%p),%s\n", my_file,
+ leaf, strerror(errno));
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+ if (sb.st_size != offset) {
+ fprintf(stderr, "glfs_h_truncate: post size mismatch\n");
+ printf("glfs_h_truncate tests: FAILED\n");
+ goto out;
+ }
+
+ printf("glfs_h_truncate tests: PASSED\n");
+out:
+ if (fd)
+ glfs_close(fd);
+ if (root)
+ glfs_h_close(root);
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+
+ return;
+}
+
+void
+test_h_links(void)
+{
+ char *my_dir = "linkdir";
+ char *my_file = "file.txt";
+ char *my_symlnk = "slnk.txt";
+ char *my_lnk = "lnk.txt";
+ char *linksrc_dir = "dir1";
+ char *linktgt_dir = "dir2";
+ struct glfs_object *root = NULL, *parent = NULL, *leaf = NULL,
+ *dirsrc = NULL, *dirtgt = NULL, *dleaf = NULL;
+ struct glfs_object *ln1 = NULL;
+ struct stat sb;
+ int ret;
+ char *buf = NULL;
+
+ printf("glfs_h_link(s) tests: In Progress\n");
+
+ /* Prepare tests */
+ root = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ parent = glfs_h_mkdir(fs, root, my_dir, 0755, &sb);
+ if (parent == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, root, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, parent, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dirsrc = glfs_h_mkdir(fs, parent, linksrc_dir, 0755, &sb);
+ if (dirsrc == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ linksrc_dir, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dirtgt = glfs_h_mkdir(fs, parent, linktgt_dir, 0755, &sb);
+ if (dirtgt == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ linktgt_dir, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dleaf = glfs_h_creat(fs, dirsrc, my_file, O_CREAT, 0644, &sb);
+ if (dleaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dirsrc, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* run tests */
+ /* sym link: /testdir/linkdir/file.txt to ./slnk.txt */
+ ln1 = glfs_h_symlink(fs, parent, my_symlnk, "./file.txt", &sb);
+ if (ln1 == NULL) {
+ fprintf(stderr, "glfs_h_symlink: error creating %s: from (%p),%s\n",
+ my_symlnk, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ buf = calloc(1024, sizeof(char));
+ if (buf == NULL) {
+ fprintf(stderr, "Error allocating memory\n");
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+
+ ret = glfs_h_readlink(fs, ln1, buf, 1024);
+ if (ret <= 0) {
+ fprintf(stderr, "glfs_h_readlink: error reading %s: from (%p),%s\n",
+ my_symlnk, ln1, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ if (!(strncmp(buf, my_symlnk, strlen(my_symlnk)))) {
+ fprintf(stderr,
+ "glfs_h_readlink: error mismatch in link name: actual %s: "
+ "retrieved %s\n",
+ my_symlnk, buf);
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+
+ /* link: /testdir/linkdir/file.txt to ./lnk.txt */
+ ret = glfs_h_link(fs, leaf, parent, my_lnk);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_link: error creating %s: from (%p),%s\n",
+ my_lnk, parent, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ /* TODO: Should write content to a file and read from the link */
+
+ /* link: /testdir/linkdir/dir1/file.txt to ../dir2/slnk.txt */
+ ret = glfs_h_link(fs, dleaf, dirtgt, my_lnk);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_link: error creating %s: from (%p),%s\n",
+ my_lnk, dirtgt, strerror(errno));
+ printf("glfs_h_link(s) tests: FAILED\n");
+ goto out;
+ }
+ /* TODO: Should write content to a file and read from the link */
+
+ printf("glfs_h_link(s) tests: PASSED\n");
+
+out:
+ if (root)
+ glfs_h_close(root);
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+ if (dirsrc)
+ glfs_h_close(dirsrc);
+ if (dirtgt)
+ glfs_h_close(dirtgt);
+ if (dleaf)
+ glfs_h_close(dleaf);
+ if (ln1)
+ glfs_h_close(ln1);
+ if (buf)
+ free(buf);
+
+ return;
+}
+
+void
+test_h_rename(void)
+{
+ char *my_dir = "renamedir";
+ char *my_file = "file.txt";
+ char *src_dir = "dir1";
+ char *tgt_dir = "dir2";
+ struct glfs_object *root = NULL, *parent = NULL, *leaf = NULL,
+ *dirsrc = NULL, *dirtgt = NULL, *dleaf = NULL;
+ struct stat sb;
+ int ret;
+
+ printf("glfs_h_rename tests: In Progress\n");
+
+ /* Prepare tests */
+ root = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ parent = glfs_h_mkdir(fs, root, my_dir, 0755, &sb);
+ if (parent == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, root, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, parent, my_file, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, parent, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dirsrc = glfs_h_mkdir(fs, parent, src_dir, 0755, &sb);
+ if (dirsrc == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ src_dir, parent, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dirtgt = glfs_h_mkdir(fs, parent, tgt_dir, 0755, &sb);
+ if (dirtgt == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ tgt_dir, parent, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ dleaf = glfs_h_creat(fs, dirsrc, my_file, O_CREAT, 0644, &sb);
+ if (dleaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dirsrc, strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* run tests */
+ /* Rename file.txt -> file1.txt */
+ ret = glfs_h_rename(fs, parent, "file.txt", parent, "file1.txt");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s to %s (%s)\n",
+ "file.txt", "file1.txt", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename dir1/file.txt -> file.txt */
+ ret = glfs_h_rename(fs, dirsrc, "file.txt", parent, "file.txt");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s/%s to %s (%s)\n",
+ src_dir, "file.txt", "file.txt", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename file1.txt -> file.txt (exists) */
+ ret = glfs_h_rename(fs, parent, "file1.txt", parent, "file.txt");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s to %s (%s)\n",
+ "file.txt", "file.txt", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename dir1 -> dir3 */
+ ret = glfs_h_rename(fs, parent, "dir1", parent, "dir3");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s to %s (%s)\n", "dir1",
+ "dir3", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename dir2 ->dir3 (exists) */
+ ret = glfs_h_rename(fs, parent, "dir2", parent, "dir3");
+ if (ret != 0) {
+ fprintf(stderr, "glfs_h_rename: error renaming %s to %s (%s)\n", "dir2",
+ "dir3", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename file.txt -> dir3 (fail) */
+ ret = glfs_h_rename(fs, parent, "file.txt", parent, "dir3");
+ if (ret == 0) {
+ fprintf(stderr, "glfs_h_rename: NO error renaming %s to %s (%s)\n",
+ "file.txt", "dir3", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ /* rename dir3 -> file.txt (fail) */
+ ret = glfs_h_rename(fs, parent, "dir3", parent, "file.txt");
+ if (ret == 0) {
+ fprintf(stderr, "glfs_h_rename: NO error renaming %s to %s (%s)\n",
+ "dir3", "file.txt", strerror(errno));
+ printf("glfs_h_rename tests: FAILED\n");
+ goto out;
+ }
+
+ printf("glfs_h_rename tests: PASSED\n");
+
+out:
+ if (root)
+ glfs_h_close(root);
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+ if (dirsrc)
+ glfs_h_close(dirsrc);
+ if (dirtgt)
+ glfs_h_close(dirtgt);
+ if (dleaf)
+ glfs_h_close(dleaf);
+
+ return;
+}
+
+void
+assimilatetime(struct timespec *ts, struct timespec ts_st,
+ struct timespec ts_ed)
+{
+ if ((ts_ed.tv_nsec - ts_st.tv_nsec) < 0) {
+ ts->tv_sec += ts_ed.tv_sec - ts_st.tv_sec - 1;
+ ts->tv_nsec += 1000000000 + ts_ed.tv_nsec - ts_st.tv_nsec;
+ } else {
+ ts->tv_sec += ts_ed.tv_sec - ts_st.tv_sec;
+ ts->tv_nsec += ts_ed.tv_nsec - ts_st.tv_nsec;
+ }
+
+ if (ts->tv_nsec > 1000000000) {
+ ts->tv_nsec = ts->tv_nsec - 1000000000;
+ ts->tv_sec += 1;
+ }
+
+ return;
+}
+
+#define MAX_FILES_CREATE 10
+#define MAXPATHNAME 512
+void
+test_h_performance(void)
+{
+ char *my_dir = "perftest", *full_dir_path = "/testdir/perftest";
+ char *my_file = "file_", my_file_name[MAXPATHNAME];
+ struct glfs_object *parent = NULL, *leaf = NULL, *dir = NULL;
+ struct stat sb;
+ int ret, i;
+ struct glfs_fd *fd;
+ struct timespec c_ts = {0, 0}, c_ts_st, c_ts_ed;
+ struct timespec o_ts = {0, 0}, o_ts_st, o_ts_ed;
+
+ printf("glfs_h_performance tests: In Progress\n");
+
+ /* Prepare tests */
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, NULL, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ dir = glfs_h_mkdir(fs, parent, my_dir, 0755, &sb);
+ if (dir == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error creating %s: from (%p),%s\n",
+ my_dir, parent, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* create performance */
+ ret = clock_gettime(CLOCK_REALTIME, &o_ts_st);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ for (i = 0; i < MAX_FILES_CREATE; i++) {
+ sprintf(my_file_name, "%s%d", my_file, i);
+
+ ret = clock_gettime(CLOCK_REALTIME, &c_ts_st);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ leaf = glfs_h_lookupat(fs, dir, my_file_name, &sb, 0);
+ if (leaf != NULL) {
+ fprintf(stderr, "glfs_h_lookup: exists %s\n", my_file_name);
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ leaf = glfs_h_creat(fs, dir, my_file_name, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error creating %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ ret = clock_gettime(CLOCK_REALTIME, &c_ts_ed);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ assimilatetime(&c_ts, c_ts_st, c_ts_ed);
+ glfs_h_close(leaf);
+ leaf = NULL;
+ }
+
+ ret = clock_gettime(CLOCK_REALTIME, &o_ts_ed);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ assimilatetime(&o_ts, o_ts_st, o_ts_ed);
+
+ printf("Creation performance (handle based):\n\t# empty files:%d\n",
+ MAX_FILES_CREATE);
+ printf("\tOverall time:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n", o_ts.tv_sec,
+ o_ts.tv_nsec);
+ printf("\tcreate call time time:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n",
+ c_ts.tv_sec, c_ts.tv_nsec);
+
+ /* create using path */
+ c_ts.tv_sec = o_ts.tv_sec = 0;
+ c_ts.tv_nsec = o_ts.tv_nsec = 0;
+
+ sprintf(my_file_name, "%s1", full_dir_path);
+ ret = glfs_mkdir(fs, my_file_name, 0755);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_mkdir: error creating %s: from (%p),%s\n", my_dir,
+ parent, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ ret = clock_gettime(CLOCK_REALTIME, &o_ts_st);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ for (i = 0; i < MAX_FILES_CREATE; i++) {
+ sprintf(my_file_name, "%s1/%sn%d", full_dir_path, my_file, i);
+
+ ret = clock_gettime(CLOCK_REALTIME, &c_ts_st);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ ret = glfs_stat(fs, my_file_name, &sb);
+ if (ret == 0) {
+ fprintf(stderr, "glfs_stat: exists %s\n", my_file_name);
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ fd = glfs_creat(fs, my_file_name, O_CREAT, 0644);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_creat: error creating %s: from (%p),%s\n",
+ my_file, dir, strerror(errno));
+ printf("glfs_h_performance tests: FAILED\n");
+ goto out;
+ }
+
+ ret = clock_gettime(CLOCK_REALTIME, &c_ts_ed);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ assimilatetime(&c_ts, c_ts_st, c_ts_ed);
+ glfs_close(fd);
+ }
+
+ ret = clock_gettime(CLOCK_REALTIME, &o_ts_ed);
+ if (ret != 0) {
+ fprintf(stderr, "clock_gettime: error %s\n", strerror(errno));
+ printf("glfs_h_getattrs and setattrs tests: FAILED\n");
+ goto out;
+ }
+
+ assimilatetime(&o_ts, o_ts_st, o_ts_ed);
+
+ printf("Creation performance (path based):\n\t# empty files:%d\n",
+ MAX_FILES_CREATE);
+ printf("\tOverall time:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n", o_ts.tv_sec,
+ o_ts.tv_nsec);
+ printf("\tcreate call time time:\n\t\tSecs:%ld\n\t\tnSecs:%ld\n",
+ c_ts.tv_sec, c_ts.tv_nsec);
+out:
+ return;
+}
+
+int
+test_handleops(int argc, char *argv[])
+{
+ int ret = 0;
+ glfs_fd_t *fd = NULL;
+ struct stat sb = {
+ 0,
+ };
+ struct glfs_object *root = NULL, *parent = NULL, *leaf = NULL, *tmp = NULL;
+ char readbuf[32], writebuf[32];
+ unsigned char leaf_handle[GFAPI_HANDLE_LENGTH];
+
+ char *full_leaf_name = "/testdir/testfile.txt", *leaf_name = "testfile.txt",
+ *relative_leaf_name = "testdir/testfile.txt";
+ char *leaf_name1 = "testfile1.txt";
+ char *full_newparent_name = "/testdir/dir1", *newparent_name = "dir1";
+ char *full_newnod_name = "/testdir/nod1", *newnod_name = "nod1";
+
+ /* Initialize test area */
+ ret = glfs_mkdir(fs, full_parent_name, 0755);
+ if (ret != 0 && errno != EEXIST) {
+ fprintf(stderr, "%s: (%p) %s\n", full_parent_name, fd, strerror(errno));
+ printf("Test initialization failed on volume %s\n", argv[1]);
+ goto out;
+ } else if (ret != 0) {
+ printf("Found test directory %s to be existing\n", full_parent_name);
+ printf("Cleanup test directory and restart tests\n");
+ goto out;
+ }
+
+ fd = glfs_creat(fs, full_leaf_name, O_CREAT, 0644);
+ if (fd == NULL) {
+ fprintf(stderr, "%s: (%p) %s\n", full_leaf_name, fd, strerror(errno));
+ printf("Test initialization failed on volume %s\n", argv[1]);
+ goto out;
+ }
+ glfs_close(fd);
+
+ printf("Initialized the test area, within volume %s\n", argv[1]);
+
+ /* Handle based APIs test area */
+
+ /* glfs_lookupat test */
+ printf("glfs_h_lookupat tests: In Progress\n");
+ /* start at root of the volume */
+ root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n", "/",
+ NULL, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* lookup a parent within root */
+ parent = glfs_h_lookupat(fs, root, parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ parent_name, root, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* lookup a leaf/child within the parent */
+ leaf = glfs_h_lookupat(fs, parent, leaf_name, &sb, 0);
+ if (leaf == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ leaf_name, parent, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* reset */
+ glfs_h_close(root);
+ root = NULL;
+ glfs_h_close(leaf);
+ leaf = NULL;
+ glfs_h_close(parent);
+ parent = NULL;
+
+ /* check absolute paths */
+ root = glfs_h_lookupat(fs, NULL, "/", &sb, 0);
+ if (root == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n", "/",
+ NULL, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, root, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_lookupat(fs, NULL, full_leaf_name, &sb, 0);
+ if (leaf == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_leaf_name, parent, strerror(errno));
+ printf("glfs_h_lookupat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* reset */
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ /* check multiple component paths */
+ leaf = glfs_h_lookupat(fs, root, relative_leaf_name, &sb, 0);
+ if (leaf == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ relative_leaf_name, parent, strerror(errno));
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* reset */
+ glfs_h_close(root);
+ root = NULL;
+ glfs_h_close(parent);
+ parent = NULL;
+
+ /* check symlinks in path */
+
+ /* TODO: -ve test cases */
+ /* parent invalid
+ * path invalid
+ * path does not exist after some components
+ * no parent, but relative path
+ * parent and full path? -ve?
+ */
+
+ printf("glfs_h_lookupat tests: PASSED\n");
+
+ /* glfs_openat test */
+ printf("glfs_h_open tests: In Progress\n");
+ fd = glfs_h_open(fs, leaf, O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_h_open: error on open of %s: %s\n",
+ full_leaf_name, strerror(errno));
+ printf("glfs_h_open tests: FAILED\n");
+ goto out;
+ }
+
+ /* test read/write based on fd */
+ memcpy(writebuf, "abcdefghijklmnopqrstuvwxyz012345", 32);
+ ret = glfs_write(fd, writebuf, 32, 0);
+
+ glfs_lseek(fd, 10, SEEK_SET);
+
+ ret = glfs_read(fd, readbuf, 32, 0);
+ if (memcmp(readbuf, writebuf, 32)) {
+ printf("Failed to read what I wrote: %s %s\n", readbuf, writebuf);
+ glfs_close(fd);
+ printf("glfs_h_open tests: FAILED\n");
+ goto out;
+ }
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+ glfs_close(fd);
+
+ printf("glfs_h_open tests: PASSED\n");
+
+ /* Create tests */
+ printf("glfs_h_creat tests: In Progress\n");
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, root, strerror(errno));
+ printf("glfs_h_creat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_creat(fs, parent, leaf_name1, O_CREAT, 0644, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_creat: error on create of %s: from (%p),%s\n",
+ leaf_name1, parent, strerror(errno));
+ printf("glfs_h_creat tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ leaf = glfs_h_creat(fs, parent, leaf_name1, O_CREAT | O_EXCL, 0644, &sb);
+ if (leaf != NULL || errno != EEXIST) {
+ fprintf(stderr,
+ "glfs_h_creat: existing file, leaf = (%p), errno = %s\n", leaf,
+ strerror(errno));
+ printf("glfs_h_creat tests: FAILED\n");
+ if (leaf != NULL) {
+ glfs_h_close(leaf);
+ leaf = NULL;
+ }
+ }
+
+ tmp = glfs_h_creat(fs, root, parent_name, O_CREAT, 0644, &sb);
+ if (tmp != NULL || !(errno == EISDIR || errno == EINVAL)) {
+ fprintf(stderr, "glfs_h_creat: dir create, tmp = (%p), errno = %s\n",
+ leaf, strerror(errno));
+ printf("glfs_h_creat tests: FAILED\n");
+ if (tmp != NULL) {
+ glfs_h_close(tmp);
+ tmp = NULL;
+ }
+ }
+
+ /* TODO: Other combinations and -ve cases as applicable */
+ printf("glfs_h_creat tests: PASSED\n");
+
+ /* extract handle and create from handle test */
+ printf(
+ "glfs_h_extract_handle and glfs_h_create_from_handle tests: In "
+ "Progress\n");
+ /* TODO: Change the lookup to create below for a GIFD recovery failure,
+ * that needs to be fixed */
+ leaf = glfs_h_lookupat(fs, parent, leaf_name1, &sb, 0);
+ if (leaf == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ leaf_name1, parent, strerror(errno));
+ printf("glfs_h_extract_handle tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ ret = glfs_h_extract_handle(leaf, leaf_handle, GFAPI_HANDLE_LENGTH);
+ if (ret < 0) {
+ fprintf(stderr,
+ "glfs_h_extract_handle: error extracting handle of %s: %s\n",
+ full_leaf_name, strerror(errno));
+ printf("glfs_h_extract_handle tests: FAILED\n");
+ goto out;
+ }
+ peek_handle(leaf_handle);
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ leaf = glfs_h_create_from_handle(fs, leaf_handle, GFAPI_HANDLE_LENGTH, &sb);
+ if (leaf == NULL) {
+ fprintf(
+ stderr,
+ "glfs_h_create_from_handle: error on create of %s: from (%p),%s\n",
+ leaf_name1, leaf_handle, strerror(errno));
+ printf("glfs_h_create_from_handle tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ fd = glfs_h_open(fs, leaf, O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_h_open: error on open of %s: %s\n",
+ full_leaf_name, strerror(errno));
+ printf("glfs_h_create_from_handle tests: FAILED\n");
+ goto out;
+ }
+
+ /* test read/write based on fd */
+ memcpy(writebuf, "abcdefghijklmnopqrstuvwxyz012345", 32);
+ ret = glfs_write(fd, writebuf, 32, 0);
+
+ glfs_lseek(fd, 0, SEEK_SET);
+
+ ret = glfs_read(fd, readbuf, 32, 0);
+ if (memcmp(readbuf, writebuf, 32)) {
+ printf("Failed to read what I wrote: %s %s\n", writebuf, writebuf);
+ printf("glfs_h_create_from_handle tests: FAILED\n");
+ glfs_close(fd);
+ goto out;
+ }
+
+ glfs_close(fd);
+ glfs_h_close(leaf);
+ leaf = NULL;
+ glfs_h_close(parent);
+ parent = NULL;
+
+ printf(
+ "glfs_h_extract_handle and glfs_h_create_from_handle tests: PASSED\n");
+
+ /* Mkdir tests */
+ printf("glfs_h_mkdir tests: In Progress\n");
+
+ ret = glfs_rmdir(fs, full_newparent_name);
+ if (ret && errno != ENOENT) {
+ fprintf(stderr, "glfs_rmdir: Failed for %s: %s\n", full_newparent_name,
+ strerror(errno));
+ printf("glfs_h_mkdir tests: FAILED\n");
+ goto out;
+ }
+
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, root, strerror(errno));
+ printf("glfs_h_mkdir tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_mkdir(fs, parent, newparent_name, 0755, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error on mkdir of %s: from (%p),%s\n",
+ newparent_name, parent, strerror(errno));
+ printf("glfs_h_mkdir tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ leaf = glfs_h_mkdir(fs, parent, newparent_name, 0755, &sb);
+ if (leaf != NULL || errno != EEXIST) {
+ fprintf(stderr,
+ "glfs_h_mkdir: existing directory, leaf = (%p), errno = %s\n",
+ leaf, strerror(errno));
+ printf("glfs_h_mkdir tests: FAILED\n");
+ if (leaf != NULL) {
+ glfs_h_close(leaf);
+ leaf = NULL;
+ }
+ }
+
+ glfs_h_close(parent);
+ parent = NULL;
+
+ printf("glfs_h_mkdir tests: PASSED\n");
+
+ /* Mknod tests */
+ printf("glfs_h_mknod tests: In Progress\n");
+ ret = glfs_unlink(fs, full_newnod_name);
+ if (ret && errno != ENOENT) {
+ fprintf(stderr, "glfs_unlink: Failed for %s: %s\n", full_newnod_name,
+ strerror(errno));
+ printf("glfs_h_mknod tests: FAILED\n");
+ goto out;
+ }
+
+ parent = glfs_h_lookupat(fs, NULL, full_parent_name, &sb, 0);
+ if (parent == NULL) {
+ fprintf(stderr,
+ "glfs_h_lookupat: error on lookup of %s: from (%p),%s\n",
+ full_parent_name, root, strerror(errno));
+ printf("glfs_h_mknod tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ leaf = glfs_h_mknod(fs, parent, newnod_name, S_IFIFO, 0, &sb);
+ if (leaf == NULL) {
+ fprintf(stderr, "glfs_h_mkdir: error on mkdir of %s: from (%p),%s\n",
+ newnod_name, parent, strerror(errno));
+ printf("glfs_h_mknod tests: FAILED\n");
+ goto out;
+ }
+ peek_stat(&sb);
+
+ /* TODO: create op on a FIFO node hangs, need to check and fix
+ tmp = glfs_h_creat (fs, parent, newnod_name, O_CREAT, 0644, &sb);
+ if (tmp != NULL || errno != EINVAL) {
+ fprintf (stderr, "glfs_h_creat: node create, tmp = (%p), errno =
+ %s\n", tmp, strerror (errno)); printf ("glfs_h_creat/mknod tests:
+ FAILED\n"); if (tmp != NULL) { glfs_h_close(tmp); tmp = NULL;
+ }
+ } */
+
+ glfs_h_close(leaf);
+ leaf = NULL;
+
+ leaf = glfs_h_mknod(fs, parent, newnod_name, 0644, 0, &sb);
+ if (leaf != NULL || errno != EEXIST) {
+ fprintf(stderr,
+ "glfs_h_mknod: existing node, leaf = (%p), errno = %s\n", leaf,
+ strerror(errno));
+ printf("glfs_h_mknod tests: FAILED\n");
+ if (leaf != NULL) {
+ glfs_h_close(leaf);
+ leaf = NULL;
+ }
+ }
+
+ glfs_h_close(parent);
+ parent = NULL;
+
+ printf("glfs_h_mknod tests: PASSED\n");
+
+ /* unlink tests */
+ test_h_unlink();
+
+ /* TODO: opendir tests */
+
+ /* getattr tests */
+ test_h_getsetattrs();
+
+ /* TODO: setattr tests */
+
+ /* truncate tests */
+ test_h_truncate();
+
+ /* link tests */
+ test_h_links();
+
+ /* rename tests */
+ test_h_rename();
+
+ /* performance tests */
+ test_h_performance();
+
+ /* END: New APIs test area */
+
+out:
+ /* Cleanup glfs handles */
+ if (root)
+ glfs_h_close(root);
+ if (parent)
+ glfs_h_close(parent);
+ if (leaf)
+ glfs_h_close(leaf);
+
+ return ret;
+}
+
+int
+test_write_apis(glfs_t *fs)
+{
+ /* Add more content here */
+ /* Some apis we can get are */
+ /*
+ 0. glfs_set_xlator_option()
+
+ Read/Write combinations:
+ . glfs_{p,}readv/{p,}writev
+ . glfs_pread/pwrite
+
+ tests/basic/gfapi/gfapi-async-calls-test.c
+ . glfs_read_async/write_async
+ . glfs_pread_async/pwrite_async
+ . glfs_readv_async/writev_async
+ . glfs_preadv_async/pwritev_async
+
+ . ftruncate/ftruncate_async
+ . fsync/fsync_async
+ . fdatasync/fdatasync_async
+
+ */
+
+ glfs_fd_t *fd = NULL;
+ char *filename = "/filename2";
+ int flags = O_RDWR;
+ char *buf = "some bytes!";
+ char writestr[TEST_STR_LEN];
+ struct iovec iov = {&writestr, TEST_STR_LEN};
+ int ret, i;
+
+ for (i = 0; i < TEST_STR_LEN; i++)
+ writestr[i] = 0x11;
+
+ fd = glfs_open(fs, filename, flags);
+ if (!fd)
+ fprintf(stderr, "open(%s): (%p) %s\n", filename, fd, strerror(errno));
+
+ ret = glfs_writev(fd, &iov, 1, flags);
+ if (ret < 0) {
+ fprintf(stderr, "writev(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ ret = glfs_pwrite(fd, buf, 10, 4, flags, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "pwrite(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ ret = glfs_pwritev(fd, &iov, 1, 4, flags);
+ if (ret < 0) {
+ fprintf(stderr, "pwritev(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ ret = glfs_fsync(fd, NULL, NULL);
+ if (ret < 0) {
+ fprintf(stderr, "fsync(%s): %d (%s)\n", filename, ret, strerror(errno));
+ }
+
+ glfs_close(fd);
+
+ return 0;
+}
+
+int
+test_metadata_ops(glfs_t *fs, glfs_t *fs2)
+{
+ glfs_fd_t *fd = NULL;
+ glfs_fd_t *fd2 = NULL;
+ struct stat sb = {
+ 0,
+ };
+ struct glfs_stat gsb = {
+ 0,
+ };
+ struct statvfs sfs;
+ char readbuf[32];
+ char writebuf[11] = "helloworld";
+
+ char *filename = "/filename2";
+ int ret;
+
+ ret = glfs_lstat(fs, filename, &sb);
+ fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
+
+ fd = glfs_creat(fs, filename, O_RDWR, 0644);
+ if (!fd)
+ fprintf(stderr, "creat(%s): (%p) %s\n", filename, fd, strerror(errno));
+
+ fd2 = glfs_open(fs2, filename, O_RDWR);
+ if (!fd2)
+ fprintf(stderr, "open(%s): (%p) %s\n", filename, fd, strerror(errno));
+
+ ret = glfs_lstat(fs, filename, &sb);
+ if (ret)
+ fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_write(fd, writebuf, 11, 0);
+ if (ret < 0) {
+ fprintf(stderr, "writev(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+ }
+
+ glfs_fsync(fd, NULL, NULL);
+
+ glfs_lseek(fd2, 5, SEEK_SET);
+
+ ret = glfs_read(fd2, readbuf, 32, 0);
+
+ printf("read %d, %s", ret, readbuf);
+
+ /* get stat */
+ ret = glfs_fstat(fd2, &sb);
+ if (ret)
+ fprintf(stderr, "fstat(%s): %d (%s)\n", filename, ret, strerror(errno));
+
+ ret = glfs_access(fs, filename, R_OK);
+ if (ret)
+ fprintf(stderr, "access(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_fallocate(fd2, 1024, 1024, 1024);
+ if (ret)
+ fprintf(stderr, "fallocate(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_discard(fd2, 1024, 512);
+ if (ret)
+ fprintf(stderr, "discard(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_zerofill(fd2, 2048, 1024);
+ if (ret)
+ fprintf(stderr, "zerofill(%s): %d (%s)\n", filename, ret,
+ strerror(errno));
+
+ /* set stat */
+ /* TODO: got some errors, need to fix */
+ ret = glfs_fsetattr(fd2, &gsb);
+
+ glfs_close(fd);
+ glfs_close(fd2);
+
+ filename = "/filename3";
+ ret = glfs_mknod(fs, filename, S_IFIFO, 0);
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_lstat(fs, filename, &sb);
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_rename(fs, filename, "/filename4");
+ if (ret)
+ fprintf(stderr, "rename(%s): (%d) %s\n", filename, ret,
+ strerror(errno));
+
+ ret = glfs_unlink(fs, "/filename4");
+ if (ret)
+ fprintf(stderr, "unlink(%s): (%d) %s\n", "/filename4", ret,
+ strerror(errno));
+
+ filename = "/dirname2";
+ ret = glfs_mkdir(fs, filename, 0);
+ if (ret)
+ fprintf(stderr, "%s: (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_lstat(fs, filename, &sb);
+ if (ret)
+ fprintf(stderr, "lstat(%s): (%d) %s\n", filename, ret, strerror(errno));
+
+ ret = glfs_rmdir(fs, filename);
+ if (ret)
+ fprintf(stderr, "rmdir(%s): (%d) %s\n", filename, ret, strerror(errno));
+}
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs2 = NULL;
+ int ret = 0;
+ glfs_fd_t *fd = NULL;
+ glfs_fd_t *fd2 = NULL;
+ struct stat sb = {
+ 0,
+ };
+ struct glfs_stat gsb = {
+ 0,
+ };
+ struct statvfs sfs;
+ char readbuf[32];
+ char writebuf[32];
+ char volumeid[64];
+
+ char *filename = "/filename2";
+
+ if ((argc < 2) || (argc > 3)) {
+ printf("Usage:\n\t%s <volname> <hostname>\n\t%s <volfile-path>",
+ argv[0], argv[0]);
+ return -1;
+ }
+
+ if (argc == 2) {
+ /* Generally glfs_new() requires volume name as an argument */
+ fs = glfs_new("test-only");
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile(fs, argv[1]);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile failed\n");
+ } else {
+ fs = glfs_new(argv[1]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+ // ret = glfs_set_volfile_server (fs, "unix", "/tmp/gluster.sock", 0);
+ ret = glfs_set_volfile_server(fs, "tcp", argv[2], 24007);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile_server failed\n");
+ }
+
+ /* Change this to relevant file when running locally */
+ ret = glfs_set_logging(fs, "/dev/stderr", 5);
+ if (ret)
+ fprintf(stderr, "glfs_set_logging failed\n");
+
+ ret = glfs_init(fs);
+ if (ret)
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+
+ if (ret)
+ goto out;
+
+ /* no major use for getting the volume id in this test, done for coverage */
+ ret = glfs_get_volumeid(fs, volumeid, 64);
+ if (ret) {
+ fprintf(stderr, "glfs_get_volumeid: returned %d\n", ret);
+ }
+
+ sleep(2);
+
+ if (argc == 2) {
+ /* Generally glfs_new() requires volume name as an argument */
+ fs2 = glfs_new("test_only_volume");
+ if (!fs2) {
+ fprintf(stderr, "glfs_new(fs2): returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile(fs2, argv[1]);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile failed(fs2)\n");
+ } else {
+ fs2 = glfs_new(argv[1]);
+ if (!fs2) {
+ fprintf(stderr, "glfs_new(fs2): returned NULL\n");
+ return 1;
+ }
+ ret = glfs_set_volfile_server(fs2, "tcp", argv[2], 24007);
+ if (ret)
+ fprintf(stderr, "glfs_set_volfile_server failed(fs2)\n");
+ }
+
+ ret = glfs_set_statedump_path(fs2, "/tmp");
+ if (ret) {
+ fprintf(stderr, "glfs_set_statedump_path: %s\n", strerror(errno));
+ }
+
+ ret = glfs_init(fs2);
+ if (ret)
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+
+ test_metadata_ops(fs, fs2);
+
+ test_dirops(fs);
+
+ test_xattr(fs);
+
+ test_chdir(fs);
+
+ test_handleops(argc, argv);
+ // done
+
+ /* Test some extra apis */
+ test_write_apis(fs);
+
+ glfs_statvfs(fs, "/", &sfs);
+
+ glfs_unset_volfile_server(fs, "tcp", argv[2], 24007);
+
+ glfs_fini(fs);
+ glfs_fini(fs2);
+
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/tests/basic/gfapi/glfsxmp.t b/tests/basic/gfapi/glfsxmp.t
new file mode 100644
index 00000000000..b3e6645c0f5
--- /dev/null
+++ b/tests/basic/gfapi/glfsxmp.t
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+$CLI system getspec $V0 > fubar.vol
+
+TEST cp $(dirname $0)/glfsxmp-coverage.c ./glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+
+TEST ./glfsxmp fubar.vol
+
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/basic/gfapi/protocol-client-ssl.vol.in b/tests/basic/gfapi/protocol-client-ssl.vol.in
new file mode 100644
index 00000000000..cdc0c9d0671
--- /dev/null
+++ b/tests/basic/gfapi/protocol-client-ssl.vol.in
@@ -0,0 +1,15 @@
+#
+# This .vol file expects that there is
+#
+# 1. GlusterD listening on @@HOSTNAME@@
+# 2. a volume that provides a brick on @@BRICKPATH@@
+# 3. the volume with the brick has been started
+#
+volume test
+ type protocol/client
+ option remote-host @@HOSTNAME@@
+ option remote-subvolume @@BRICKPATH@@
+ option transport-type socket
+ option transport.socket.ssl-enabled @@SSL@@
+end-volume
+
diff --git a/tests/basic/global-threading.t b/tests/basic/global-threading.t
new file mode 100644
index 00000000000..f7d34044b09
--- /dev/null
+++ b/tests/basic/global-threading.t
@@ -0,0 +1,104 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# Test if the given process has a number of threads of a given type between
+# min and max.
+function check_threads() {
+ local pid="${1}"
+ local pattern="${2}"
+ local min="${3}"
+ local max="${4-}"
+ local count
+
+ count="$(ps hH -o comm ${pid} | grep "${pattern}" | wc -l)"
+ if [[ ${min} -gt ${count} ]]; then
+ return 1
+ fi
+ if [[ ! -z "${max}" && ${max} -lt ${count} ]]; then
+ return 1
+ fi
+
+ return 0
+}
+
+cleanup
+
+TEST glusterd
+
+# Glusterd shouldn't use any thread
+TEST check_threads $(get_glusterd_pid) glfs_tpw 0 0
+TEST check_threads $(get_glusterd_pid) glfs_iotwr 0 0
+
+TEST pkill -9 glusterd
+
+TEST glusterd --global-threading
+
+# Glusterd shouldn't use global threads, even if enabled
+TEST check_threads $(get_glusterd_pid) glfs_tpw 0 0
+TEST check_threads $(get_glusterd_pid) glfs_iotwr 0 0
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/b{0,1}
+
+# Normal configuration using io-threads on bricks
+TEST $CLI volume set $V0 config.global-threading off
+TEST $CLI volume set $V0 performance.iot-pass-through off
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $CLI volume start $V0
+
+# There shouldn't be global threads
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b0) glfs_tpw 0 0
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b1) glfs_tpw 0 0
+
+# There should be at least 1 io-thread
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b0) glfs_iotwr 1
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b1) glfs_iotwr 1
+
+# Self-heal should be using global threads
+TEST check_threads $(get_shd_process_pid) glfs_tpw 1
+TEST check_threads $(get_shd_process_pid) glfs_iotwr 0 0
+
+TEST $CLI volume stop $V0
+
+# Configuration with global threads on bricks
+TEST $CLI volume set $V0 config.global-threading on
+TEST $CLI volume set $V0 performance.iot-pass-through on
+TEST $CLI volume start $V0
+
+# There should be at least 1 global thread
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b0) glfs_tpw 1
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b1) glfs_tpw 1
+
+# There shouldn't be any io-thread worker threads
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b0) glfs_iotwr 0 0
+TEST check_threads $(get_brick_pid $V0 $H0 $B0/b1) glfs_iotwr 0 0
+
+# Normal configuration using io-threads on clients
+TEST $CLI volume set $V0 performance.iot-pass-through off
+TEST $CLI volume set $V0 performance.client-io-threads on
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+# There shouldn't be global threads
+TEST check_threads $(get_mount_process_pid $V0 $M0) glfs_tpw 0 0
+
+# There should be at least 1 io-thread
+TEST check_threads $(get_mount_process_pid $V0 $M0) glfs_iotwr 1
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Configuration with global threads on clients
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --global-threading $M0
+
+# There should be at least 1 global thread
+TEST check_threads $(get_mount_process_pid $V0 $M0) glfs_tpw 1
+
+# There shouldn't be io-threads
+TEST check_threads $(get_mount_process_pid $V0 $M0) glfs_iotwr 0 0
+
+# Some basic volume access checks with global-threading enabled everywhere
+TEST mkdir ${M0}/dir
+TEST dd if=/dev/zero of=${M0}/dir/file bs=128k count=8
+
+cleanup
diff --git a/tests/basic/glusterd-restart-shd-mux.t b/tests/basic/glusterd-restart-shd-mux.t
new file mode 100644
index 00000000000..46d0dac2fce
--- /dev/null
+++ b/tests/basic/glusterd-restart-shd-mux.t
@@ -0,0 +1,96 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=20
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+
+for i in $(seq 1 3); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+#Stop the glusterd
+TEST pkill glusterd
+#Only stopping glusterd, so there will be one shd
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^1$" shd_count
+TEST glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+shd_pid=$(get_shd_mux_pid $V0)
+for i in $(seq 1 3); do
+ afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
+ ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
+done
+
+#Reboot a node scenario
+TEST pkill gluster
+#Only stopped glusterd, so there will be one shd
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+TEST glusterd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+shd_pid=$(get_shd_mux_pid $V0)
+for i in $(seq 1 3); do
+ afr_path="/var/run/gluster/shd/${V0}_afr$i/${V0}_afr$i-shd.pid"
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $afr_path
+ ec_path="/var/run/gluster/shd/${V0}_ec$i/${V0}_ec${i}-shd.pid"
+ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" cat $ec_path
+done
+
+for i in $(seq 1 3); do
+ TEST $CLI volume stop ${V0}_afr$i
+ TEST $CLI volume stop ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}3
+
+TEST touch $M0/foo{1..100}
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^204$" get_pending_heal_count $V0
+
+TEST $CLI volume start ${V0} force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST rm -rf $M0/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^0$" shd_count
+
+cleanup
diff --git a/tests/basic/glusterd/arbiter-volume.t b/tests/basic/glusterd/arbiter-volume.t
deleted file mode 100644
index e9edf046905..00000000000
--- a/tests/basic/glusterd/arbiter-volume.t
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-# This command tests the volume create command validation for arbiter volumes.
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-
-TEST $CLI volume delete $V0
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b{4..9}
-EXPECT "2 x \(2 \+ 1\) = 6" volinfo_field $V0 "Number of Bricks"
-
-TEST $CLI volume delete $V0
-
-TEST rm -rf $B0/b{1..3}
-TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-TEST killall -15 glusterd
-TEST glusterd
-TEST pidof glusterd
-EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
-
-#cleanup
diff --git a/tests/basic/glusterd/check-cloudsync-ancestry.t b/tests/basic/glusterd/check-cloudsync-ancestry.t
new file mode 100644
index 00000000000..ff6ffee8db7
--- /dev/null
+++ b/tests/basic/glusterd/check-cloudsync-ancestry.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# When shard and cloudsync xlators enabled on a volume, shard xlator
+# should be an ancestor of cloudsync. This testcase is to check this condition.
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+
+volfile=$(gluster system:: getwd)"/vols/$V0/trusted-$V0.tcp-fuse.vol"
+
+#Test that both shard and cloudsync are not loaded
+EXPECT "N" volgen_volume_exists $volfile $V0-shard features shard
+EXPECT "N" volgen_volume_exists $volfile $V0-cloudsync features cloudsync
+
+#Enable shard and cloudsync in that order and check if volfile is correct
+TEST $CLI volume set $V0 shard on
+TEST $CLI volume set $V0 cloudsync on
+
+#Test that both shard and cloudsync are loaded
+EXPECT "Y" volgen_volume_exists $volfile $V0-shard features shard
+EXPECT "Y" volgen_volume_exists $volfile $V0-cloudsync features cloudsync
+
+EXPECT "Y" volgen_check_ancestry $volfile features shard features cloudsync
+
+#Disable shard and cloudsync
+TEST $CLI volume set $V0 shard off
+TEST $CLI volume set $V0 cloudsync off
+
+#Test that both shard and cloudsync are not loaded
+EXPECT "N" volgen_volume_exists $volfile $V0-shard features shard
+EXPECT "N" volgen_volume_exists $volfile $V0-cloudsync features cloudsync
+
+#Enable cloudsync and shard in that order and check if volfile is correct
+TEST $CLI volume set $V0 cloudsync on
+TEST $CLI volume set $V0 shard on
+
+#Test that both shard and cloudsync are loaded
+EXPECT "Y" volgen_volume_exists $volfile $V0-shard features shard
+EXPECT "Y" volgen_volume_exists $volfile $V0-cloudsync features cloudsync
+
+EXPECT "Y" volgen_check_ancestry $volfile features shard features cloudsync
+
+cleanup;
diff --git a/tests/basic/glusterd/disperse-create.t b/tests/basic/glusterd/disperse-create.t
index 384c675c882..db8a621d48e 100644
--- a/tests/basic/glusterd/disperse-create.t
+++ b/tests/basic/glusterd/disperse-create.t
@@ -20,6 +20,10 @@ TEST $CLI volume create $V0 disperse 3 redundancy 1 $H0:$B0/b7 $H0:$B0/b8 $H0:$B
EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
TEST $CLI volume delete $V0
+TEST $CLI volume create $V0 disperse-data 2 $H0:$B0/b10 $H0:$B0/b11 $H0:$B0/b12
+EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
+
+TEST $CLI volume delete $V0
TEST $CLI volume create $V0 redundancy 1 $H0:$B0/b10 $H0:$B0/b11 $H0:$B0/b12
EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks"
diff --git a/tests/basic/glusterd/heald.t b/tests/basic/glusterd/heald.t
index ca112ad0b75..7dae3c3f0fb 100644
--- a/tests/basic/glusterd/heald.t
+++ b/tests/basic/glusterd/heald.t
@@ -7,11 +7,16 @@
# Covers enable/disable at the moment. Will be enhanced later to include
# the other commands as well.
+function is_pid_running {
+ local pid=$1
+ num=`ps auxww | grep glustershd | grep $pid | grep -v grep | wc -l`
+ echo $num
+}
+
cleanup;
TEST glusterd
TEST pidof glusterd
-volfile=$(gluster system:: getwd)"/glustershd/glustershd-server.vol"
#Commands should fail when volume doesn't exist
TEST ! $CLI volume heal non-existent-volume enable
TEST ! $CLI volume heal non-existent-volume disable
@@ -20,51 +25,55 @@ TEST ! $CLI volume heal non-existent-volume disable
# volumes
TEST $CLI volume create dist $H0:$B0/dist
TEST $CLI volume start dist
-TEST "[ -z $(get_shd_process_pid)]"
+TEST "[ -z $(get_shd_process_pid dist)]"
TEST ! $CLI volume heal dist enable
TEST ! $CLI volume heal dist disable
# Commands should work on replicate/disperse volume.
TEST $CLI volume create r2 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
-TEST "[ -z $(get_shd_process_pid)]"
+TEST "[ -z $(get_shd_process_pid r2)]"
TEST $CLI volume start r2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid r2
TEST $CLI volume heal r2 enable
EXPECT "enable" volume_option r2 "cluster.self-heal-daemon"
-EXPECT "enable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+volfiler2=$(gluster system:: getwd)"/vols/r2/r2-shd.vol"
+EXPECT "enable" volgen_volume_option $volfiler2 r2-replicate-0 cluster replicate self-heal-daemon
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid r2
+pid=$( get_shd_process_pid r2 )
TEST $CLI volume heal r2 disable
EXPECT "disable" volume_option r2 "cluster.self-heal-daemon"
-EXPECT "disable" volgen_volume_option $volfile r2-replicate-0 cluster replicate self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+EXPECT "disable" volgen_volume_option $volfiler2 r2-replicate-0 cluster replicate self-heal-daemon
+EXPECT "1" is_pid_running $pid
# Commands should work on disperse volume.
TEST $CLI volume create ec2 disperse 3 redundancy 1 $H0:$B0/ec2_0 $H0:$B0/ec2_1 $H0:$B0/ec2_2
TEST $CLI volume start ec2
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid ec2
TEST $CLI volume heal ec2 enable
EXPECT "enable" volume_option ec2 "cluster.disperse-self-heal-daemon"
-EXPECT "enable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+volfileec2=$(gluster system:: getwd)"/vols/ec2/ec2-shd.vol"
+EXPECT "enable" volgen_volume_option $volfileec2 ec2-disperse-0 cluster disperse self-heal-daemon
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid ec2
+pid=$(get_shd_process_pid ec2)
TEST $CLI volume heal ec2 disable
EXPECT "disable" volume_option ec2 "cluster.disperse-self-heal-daemon"
-EXPECT "disable" volgen_volume_option $volfile ec2-disperse-0 cluster disperse self-heal-daemon
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "[0-9][0-9]*" get_shd_process_pid
+EXPECT "disable" volgen_volume_option $volfileec2 ec2-disperse-0 cluster disperse self-heal-daemon
+EXPECT "1" is_pid_running $pid
#Check that shd graph is rewritten correctly on volume stop/start
-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
+EXPECT "Y" volgen_volume_exists $volfileec2 ec2-disperse-0 cluster disperse
+
+EXPECT "Y" volgen_volume_exists $volfiler2 r2-replicate-0 cluster replicate
TEST $CLI volume stop r2
-EXPECT "Y" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "N" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
+EXPECT "Y" volgen_volume_exists $volfileec2 ec2-disperse-0 cluster disperse
TEST $CLI volume stop ec2
# When both the volumes are stopped glustershd volfile is not modified just the
# process is stopped
-TEST "[ -z $(get_shd_process_pid) ]"
+TEST "[ -z $(get_shd_process_pid dist) ]"
+TEST "[ -z $(get_shd_process_pid ec2) ]"
TEST $CLI volume start r2
-EXPECT "N" volgen_volume_exists $volfile ec2-disperse-0 cluster disperse
-EXPECT "Y" volgen_volume_exists $volfile r2-replicate-0 cluster replicate
+EXPECT "Y" volgen_volume_exists $volfiler2 r2-replicate-0 cluster replicate
TEST $CLI volume set r2 self-heal-daemon on
TEST $CLI volume set r2 cluster.self-heal-daemon off
diff --git a/tests/basic/glusterd/thin-arbiter-volume-probe.t b/tests/basic/glusterd/thin-arbiter-volume-probe.t
new file mode 100644
index 00000000000..acc6943806d
--- /dev/null
+++ b/tests/basic/glusterd/thin-arbiter-volume-probe.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+#This tests if the thin-arbiter-count is transferred to the other peer.
+function check_peers {
+ $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+
+kill_glusterd 2
+$CLI_1 volume create $V0 replica 2 thin-arbiter 1 $H0:$B0/b{1..3}
+TEST $glusterd_2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
+EXPECT "1 x 2 = 2" volinfo_field_1 $V0 "Number of Bricks"
+EXPECT "1 x 2 = 2" volinfo_field_2 $V0 "Number of Bricks"
+
+cleanup;
diff --git a/tests/basic/glusterd/thin-arbiter-volume.t b/tests/basic/glusterd/thin-arbiter-volume.t
new file mode 100644
index 00000000000..4e813890a45
--- /dev/null
+++ b/tests/basic/glusterd/thin-arbiter-volume.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../ volume.rc
+. $(dirname $0)/../../thin-arbiter.rc
+
+#This command tests the volume create command validation for thin-arbiter volumes.
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 thin-arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+EXPECT "1 x 2 = 2" volinfo_field $V0 "Number of Bricks"
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST touch $M0/a.txt
+TEST ls $B0/b1/a.txt
+TEST ls $B0/b2/a.txt
+TEST ! ls $B0/b3/a.txt
+
+TEST umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+TEST $CLI volume create $V0 replica 2 thin-arbiter 1 $H0:$B0/b{4..8}
+EXPECT "2 x 2 = 4" volinfo_field $V0 "Number of Bricks"
+
+TEST $CLI volume delete $V0
+
+TEST rm -rf $B0/b{1..3}
+
+TEST $CLI volume create $V0 replica 2 thin-arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+EXPECT "1 x 2 = 2" volinfo_field $V0 "Number of Bricks"
+
+TEST killall -15 glusterd
+TEST glusterd
+TEST pidof glusterd
+EXPECT "1 x 2 = 2" volinfo_field $V0 "Number of Bricks"
+
+cleanup
+
diff --git a/tests/basic/glusterd/volfile_server_switch.t b/tests/basic/glusterd/volfile_server_switch.t
index 309060919b7..e11cfed509a 100644
--- a/tests/basic/glusterd/volfile_server_switch.t
+++ b/tests/basic/glusterd/volfile_server_switch.t
@@ -34,7 +34,7 @@ TEST glusterfs --volfile-id=/$V0 --volfile-server=$H1 --volfile-server=$H2 --vol
TEST kill_glusterd 1
-TEST $CLI_2 volume set $V0 performance.io-cache off
+TEST $CLI_2 volume set $V0 performance.write-behind off
# make sure by this time directory will be created
# TODO: suggest ideal time to wait
diff --git a/tests/basic/glusterd/volume-brick-count.t b/tests/basic/glusterd/volume-brick-count.t
new file mode 100644
index 00000000000..dc1a5278f4f
--- /dev/null
+++ b/tests/basic/glusterd/volume-brick-count.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function test_volume_config()
+{
+ volname=$1
+ type_string=$2
+ brickCount=$3
+ distCount=$4
+ replicaCount=$5
+ arbiterCount=$6
+ disperseCount=$7
+ redundancyCount=$8
+
+ EXPECT "$type_string" volinfo_field $volname "Number of Bricks"
+ EXPECT "$brickCount" get-xml "volume info $volname" "brickCount"
+ EXPECT "$distCount" get-xml "volume info $volname" "distCount"
+ EXPECT "$replicaCount" get-xml "volume info $volname" "replicaCount"
+ EXPECT "$arbiterCount" get-xml "volume info $volname" "arbiterCount"
+ EXPECT "$disperseCount" get-xml "volume info $volname" "disperseCount"
+ EXPECT "$redundancyCount" get-xml "volume info $volname" "redundancyCount"
+}
+
+# This command tests the volume create command and number of bricks for different volume types.
+cleanup;
+TESTS_EXPECTED_IN_LOOP=56
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create ${V0}_1 replica 3 arbiter 1 $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+test_volume_config "${V0}_1" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+
+TEST $CLI volume create ${V0}_2 replica 3 arbiter 1 $H0:$B0/b{4..9}
+test_volume_config "${V0}_2" "2 x \(2 \+ 1\) = 6" "6" "2" "3" "1" "0" "0"
+
+
+TEST $CLI volume create ${V0}_3 replica 3 arbiter 1 $H0:$B0/b{10..12}
+test_volume_config "${V0}_3" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+TEST killall -15 glusterd
+TEST glusterd
+TEST pidof glusterd
+test_volume_config "${V0}_3" "1 x \(2 \+ 1\) = 3" "3" "1" "3" "1" "0" "0"
+
+TEST $CLI volume create ${V0}_4 replica 3 $H0:$B0/b{13..15}
+test_volume_config "${V0}_4" "1 x 3 = 3" "3" "1" "3" "0" "0" "0"
+
+TEST $CLI volume create ${V0}_5 replica 3 $H0:$B0/b{16..21}
+test_volume_config "${V0}_5" "2 x 3 = 6" "6" "2" "3" "0" "0" "0"
+
+TEST $CLI volume create ${V0}_6 disperse 3 redundancy 1 $H0:$B0/b{22..24}
+test_volume_config "${V0}_6" "1 x \(2 \+ 1\) = 3" "3" "1" "1" "0" "3" "1"
+
+TEST $CLI volume create ${V0}_7 disperse 3 redundancy 1 $H0:$B0/b{25..30}
+test_volume_config "${V0}_7" "2 x \(2 \+ 1\) = 6" "6" "2" "1" "0" "3" "1"
+
+TEST $CLI volume create ${V0}_8 $H0:$B0/b{31..33}
+test_volume_config "${V0}_8" "3" "3" "3" "1" "0" "0" "0"
+
+cleanup
diff --git a/tests/basic/graph-cleanup-brick-down-shd-mux.t b/tests/basic/graph-cleanup-brick-down-shd-mux.t
new file mode 100644
index 00000000000..3c621cdcc26
--- /dev/null
+++ b/tests/basic/graph-cleanup-brick-down-shd-mux.t
@@ -0,0 +1,64 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=4
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+
+for i in $(seq 1 2); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#kill one brick and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+#kill an entire subvol and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+#wait for some time to create a race sceanrio
+sleep 1
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+#kill all bricks and test cleanup
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}3
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick $V0 $H0 $B0/${V0}5
+#wait for some time to create a race sceanrio
+sleep 2
+
+TEST $CLI volume stop $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd ${V0}_afr1 "afr_shd_index_healer"
+
+cleanup
diff --git a/tests/basic/metadisp/fsyncdir.c b/tests/basic/metadisp/fsyncdir.c
new file mode 100644
index 00000000000..62b532b9ce4
--- /dev/null
+++ b/tests/basic/metadisp/fsyncdir.c
@@ -0,0 +1,29 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int pfd;
+
+ pfd = open(argv[1], O_RDONLY | O_DIRECTORY);
+ if (pfd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (rename(argv[2], argv[3]) == (-1)) {
+ perror("rename");
+ return EXIT_FAILURE;
+ }
+
+ if (fsync(pfd) == (-1)) {
+ perror("fsync");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/ftruncate.c b/tests/basic/metadisp/ftruncate.c
new file mode 100644
index 00000000000..c9185212c31
--- /dev/null
+++ b/tests/basic/metadisp/ftruncate.c
@@ -0,0 +1,34 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int pfd;
+
+ pfd = open(argv[1], O_RDWR);
+ if (pfd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (ftruncate(pfd, 0) == (-1)) {
+ perror("ftruncate");
+ return EXIT_FAILURE;
+ }
+
+ if (write(pfd, "hello", 5) == (-1)) {
+ perror("write");
+ return EXIT_FAILURE;
+ }
+
+ if (fsync(pfd) == (-1)) {
+ perror("fsync");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/fxattr.c b/tests/basic/metadisp/fxattr.c
new file mode 100644
index 00000000000..e552057778a
--- /dev/null
+++ b/tests/basic/metadisp/fxattr.c
@@ -0,0 +1,107 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <fcntl.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/xattr.h>
+
+static char MY_XATTR[] = "user.fxtest";
+static char *PROGRAM;
+#define CONSUME(v) \
+ do { \
+ if (!argc) { \
+ fprintf(stderr, "missing argument\n"); \
+ return EXIT_FAILURE; \
+ } \
+ v = argv[0]; \
+ ++argv; \
+ --argc; \
+ } while (0)
+
+static int
+do_get(int argc, char **argv, int fd)
+{
+ char *value;
+ int ret;
+ char buf[1024];
+
+ CONSUME(value);
+
+ ret = fgetxattr(fd, MY_XATTR, buf, sizeof(buf));
+ if (ret == (-1)) {
+ perror("fgetxattr");
+ return EXIT_FAILURE;
+ }
+
+ if (strncmp(buf, value, ret) != 0) {
+ fprintf(stderr, "data mismatch\n");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+static int
+do_set(int argc, char **argv, int fd)
+{
+ char *value;
+ int ret;
+
+ CONSUME(value);
+
+ ret = fsetxattr(fd, MY_XATTR, value, strlen(value), 0);
+ if (ret == (-1)) {
+ perror("fsetxattr");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+static int
+do_remove(int argc, char **argv, int fd)
+{
+ int ret;
+
+ ret = fremovexattr(fd, MY_XATTR);
+ if (ret == (-1)) {
+ perror("femovexattr");
+ return EXIT_FAILURE;
+ }
+
+ return EXIT_SUCCESS;
+}
+
+int
+main(int argc, char **argv)
+{
+ int fd;
+ char *path;
+ char *cmd;
+
+ CONSUME(PROGRAM);
+ CONSUME(path);
+ CONSUME(cmd);
+
+ fd = open(path, O_RDWR);
+ if (fd == (-1)) {
+ perror("open");
+ return EXIT_FAILURE;
+ }
+
+ if (strcmp(cmd, "get") == 0) {
+ return do_get(argc, argv, fd);
+ }
+
+ if (strcmp(cmd, "set") == 0) {
+ return do_set(argc, argv, fd);
+ }
+
+ if (strcmp(cmd, "remove") == 0) {
+ return do_remove(argc, argv, fd);
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/tests/basic/metadisp/gfs-fsetxattr.c b/tests/basic/metadisp/gfs-fsetxattr.c
new file mode 100644
index 00000000000..63578bc528f
--- /dev/null
+++ b/tests/basic/metadisp/gfs-fsetxattr.c
@@ -0,0 +1,141 @@
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int gfapi = 1;
+
+int
+main(int argc, char *argv[])
+{
+ glfs_t *fs = NULL;
+ int ret = 0;
+ int i = 0;
+ glfs_fd_t *fd = NULL;
+ char *topdir = "topdir", *filename = "file1";
+ char *buf = NULL;
+ char *logfile = NULL;
+ char *hostname = NULL;
+ char *basename = NULL;
+ char *dir1 = NULL, *dir2 = NULL, *filename1 = NULL, *filename2 = NULL;
+ struct stat sb = {
+ 0,
+ };
+
+ if (argc != 5) {
+ fprintf(
+ stderr,
+ "Expect following args %s <hostname> <Vol> <log file> <basename>\n",
+ argv[0]);
+ return -1;
+ }
+
+ hostname = argv[1];
+ logfile = argv[3];
+ basename = argv[4];
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL (%s)\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", hostname, 24007);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_volfile_server failed ret:%d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_set_logging(fs, logfile, 7);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_set_logging failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_init(fs);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_init failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = asprintf(&dir1, "%s-dir", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ ret = glfs_mkdir(fs, dir1, 0755);
+ if (ret < 0) {
+ fprintf(stderr, "mkdir(%s): %s\n", dir1, strerror(errno));
+ return -1;
+ }
+
+ fd = glfs_opendir(fs, dir1);
+ if (!fd) {
+ fprintf(stderr, "/: %s\n", strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fsetxattr(fd, "user.dirfattr", "fsetxattr", 9, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", dir1, ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_closedir(fd);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_closedir failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = asprintf(&filename1, "%s-file", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ ret = asprintf(&filename2, "%s-file-renamed", basename);
+ if (ret < 0) {
+ fprintf(stderr, "cannot construct filename (%s)", strerror(errno));
+ return ret;
+ }
+
+ fd = glfs_creat(fs, filename1, O_RDWR, 0644);
+ if (!fd) {
+ fprintf(stderr, "%s: (%p) %s\n", filename1, fd, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_rename(fs, filename1, filename2);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_rename failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_lstat(fs, filename2, &sb);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_lstat failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_fsetxattr(fd, "user.filefattr", "fsetxattr", 9, 0);
+ if (ret < 0) {
+ fprintf(stderr, "fsetxattr(%s): %d (%s)\n", dir1, ret, strerror(errno));
+ return -1;
+ }
+
+ ret = glfs_close(fd);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_close failed with ret: %d (%s)\n", ret,
+ strerror(errno));
+ return -1;
+ }
+}
diff --git a/tests/basic/metadisp/metadisp.t b/tests/basic/metadisp/metadisp.t
new file mode 100644
index 00000000000..894ffe07226
--- /dev/null
+++ b/tests/basic/metadisp/metadisp.t
@@ -0,0 +1,316 @@
+#!/usr/bin/env bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+
+# Considering `--enable-metadisp` is an option for `./configure`,
+# which is disabled by default, this test will never pass regression.
+# But to see the value of this test, run below after configuring
+# with above option :
+# `prove -vmfe '/bin/bash' tests/basic/metadisp/metadisp.t`
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST
+
+cleanup;
+
+TEST mkdir -p $B0/b0/{0,1}
+
+TEST setfattr -n trusted.glusterfs.volume-id -v 0xddab9eece7b64a95b07351a1f748f56f ${B0}/b0/0
+TEST setfattr -n trusted.glusterfs.volume-id -v 0xddab9eece7b64a95b07351a1f748f56f ${B0}/b0/1
+
+TEST $GFS --volfile=$(dirname $0)/metadisp.vol --volfile-id=$V0 $M0;
+
+NUM_FILES=40
+TEST touch $M0/{1..${NUM_FILES}}
+
+# each drive should get 40 files
+TEST [ $(dir -1 $B0/b0/0/ | wc -l) -eq $NUM_FILES ]
+TEST [ $(dir -1 $B0/b0/1/ | wc -l) -eq $NUM_FILES ]
+
+# now write some data to a file
+echo "hello" > $M0/3
+filename=$$
+echo "hello" > /tmp/metadisp-write-${filename}
+checksum=$(md5sum /tmp/metadisp-write-${filename} | awk '{print $1}')
+TEST [ "$(md5sum $M0/3 | awk '{print $1}')" == "$checksum" ]
+
+# check that the backend file exists on b1
+gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/b0/*/3))
+TEST [ $(dir -1 $B0/b0/1/$gfid | wc -l) -eq 1 ]
+
+# check that the backend file matches the frontend
+TEST [ "$(md5sum $B0/b0/1/$gfid | awk '{print $1}')" == "$checksum" ]
+
+# delete the file
+TEST rm $M0/3
+
+# ensure the frontend and backend files are cleaned up
+TEST ! -e $M0/3
+TEST ! [ stat $B0/b*/*/$gfid ]
+
+# Test TRUNCATE + WRITE flow
+echo "hello" | tee $M0/4
+echo "goo" | tee $M0/4
+filename=$$
+echo "goo" | tee /tmp/metadisp-truncate-${filename}
+checksum=$(md5sum /tmp/metadisp-truncate-${filename} | awk '{print $1}')
+TEST [ "$(md5sum $M0/4 | awk '{print $1}')" == "$checksum" ]
+
+# Test mkdir + rmdir.
+TEST mkdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+TEST rmdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+
+# Test rename.
+TEST touch $M0/rename_me
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+TEST mv $M0/rename_me $M0/such_rename
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+
+# Test rename of a file that doesn't exist.
+TEST ! mv $M0/does-not-exist $M0/neither-does-this
+
+
+# cleanup all the other files.
+TEST rm -v $M0/1 $M0/2 $M0/{4..${NUM_FILES}}
+TEST rm $M0/such_rename
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq 0 ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq 0 ]
+
+# Test CREATE flow
+NUM_FILES=40
+TEST touch $M0/{1..${NUM_FILES}}
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq $NUM_FILES ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq $NUM_FILES ]
+
+# Test UNLINK flow
+# No drives should have any files
+TEST rm -v $M0/{1..${NUM_FILES}}
+TEST [ $(ls /d/backends/b0/0/ | wc -l) -eq 0 ]
+TEST [ $(ls /d/backends/b0/1/ | wc -l) -eq 0 ]
+
+# Test CREATE + WRITE + READ flow
+filename=$$
+dd if=/dev/urandom of=/tmp/${filename} bs=1M count=10
+checksum=$(md5sum /tmp/${filename} | awk '{print $1}')
+TEST cp -v /tmp/${filename} $M0/1
+TEST cp -v /tmp/${filename} $M0/2
+TEST cp -v /tmp/${filename} $M0/3
+TEST cp -v /tmp/${filename} $M0/4
+TEST [ "$(md5sum $M0/1 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/2 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/3 | awk '{print $1}')" == "$checksum" ]
+TEST [ "$(md5sum $M0/4 | awk '{print $1}')" == "$checksum" ]
+
+# Test TRUNCATE + WRITE flow
+TEST dd if=/dev/zero of=$M0/1 bs=1M count=20
+
+# Check that readdir stats the files properly and we get the correct sizes
+TEST [ $(find $M0 -size +9M | wc -l) -eq 4 ];
+
+# Test mkdir + rmdir.
+TEST mkdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+TEST rmdir $M0/rmdir_me
+nfiles=$(ls -d $B0/b*/*/rmdir_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+
+# Test rename.
+# Still flaky, so disabled until it can be debugged.
+TEST touch $M0/rename_me
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+TEST mv $M0/rename_me $M0/such_rename
+nfiles=$(ls $B0/b*/*/rename_me 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "0" ]
+nfiles=$(ls $B0/b*/*/such_rename 2> /dev/null | wc -l)
+TEST [ "$nfiles" = "1" ]
+
+# Test rename of a file that doesn't exist.
+TEST ! mv $M0/does-not-exist $M0/neither-does-this
+
+# Test rename over an existing file.
+ok=yes
+for i in $(seq 0 9); do
+ echo foo > $M0/src$i
+ echo bar > $M0/dst$i
+done
+for i in $(seq 0 9); do
+ mv $M0/src$i $M0/dst$i
+done
+for i in $(seq 0 9); do
+ nfiles=$(cat $B0/b0/*/dst$i | wc -l)
+ if [ "$nfiles" = "2" ]; then
+ echo "COLLISION on dst$i"
+ (ls -l $B0/b0/*/dst$i; cat $B0/b0/*/dst$i) | sed "/^/s// /"
+ ok=no
+ fi
+done
+EXPECT "yes" echo $ok
+
+# Test rename of a directory.
+count_copies () {
+ ls -d $B0/b?/?/$1 2> /dev/null | wc -l
+}
+TEST mkdir $M0/foo_dir
+EXPECT 1 count_copies foo_dir
+EXPECT 0 count_copies bar_dir
+TEST mv $M0/foo_dir $M0/bar_dir
+EXPECT 0 count_copies foo_dir
+EXPECT 1 count_copies bar_dir
+
+for x in $(seq 0 99); do
+ touch $M0/target$x
+ ln -s $M0/target$x $M0/link$x
+done
+on_0=$(ls $B0/b*/0/link* | wc -l)
+on_1=$(ls $B0/b*/1/link* | wc -l)
+TEST [ "$on_0" -eq 100 ]
+TEST [ "$on_1" -eq 0 ]
+TEST [ "$(ls -l $M0/link* | wc -l)" = 100 ]
+
+# Test (hard) link.
+_test_hardlink () {
+ local b
+ local has_src
+ local has_dst
+ local src_inum
+ local dst_inum
+ touch $M0/hardsrc$1
+ ln $M0/hardsrc$1 $M0/harddst$1
+ for b in $B0/b{0}/{0,1}; do
+ [ -f $b/hardsrc$1 ]; has_src=$?
+ [ -f $b/harddst$1 ]; has_dst=$?
+ if [ "$has_src" != "$has_dst" ]; then
+ echo "MISSING $b/hardxxx$1 $has_src $has_dst"
+ return
+ fi
+ if [ "$has_src$has_dst" = "00" ]; then
+ src_inum=$(stat -c '%i' $b/hardsrc$1)
+ dst_inum=$(stat -c '%i' $b/harddst$1)
+ if [ "$dst_inum" != "$src_inum" ]; then
+ echo "MISMATCH $b/hardxx$i $src_inum $dst_inum"
+ return
+ fi
+ fi
+ done
+ echo "OK"
+}
+
+test_hardlink () {
+ local result=$(_test_hardlink $*)
+ # [ "$result" = "OK" ] || echo $result > /dev/tty
+ echo $result
+}
+
+# Do this multiple times to make sure colocation isn't a fluke.
+EXPECT "OK" test_hardlink 0
+EXPECT "OK" test_hardlink 1
+EXPECT "OK" test_hardlink 2
+EXPECT "OK" test_hardlink 3
+EXPECT "OK" test_hardlink 4
+EXPECT "OK" test_hardlink 5
+EXPECT "OK" test_hardlink 6
+EXPECT "OK" test_hardlink 7
+EXPECT "OK" test_hardlink 8
+EXPECT "OK" test_hardlink 9
+
+# Test remove hardlink source. ensure deleting one file
+# doesn't delete the data unless link-count is 1
+TEST mkdir $M0/hardlink
+TEST touch $M0/hardlink/fileA
+echo "data" >> $M0/hardlink/fileA
+checksum=$(md5sum $M0/hardlink/fileA | awk '{print $1}')
+TEST ln $M0/hardlink/fileA $M0/hardlink/fileB
+TEST [ $(dir -1 $M0/hardlink/ | wc -l) -eq 2 ]
+TEST rm $M0/hardlink/fileA
+TEST [ $(dir -1 $M0/hardlink/ | wc -l) -eq 1 ]
+TEST [ "$(md5sum $M0/hardlink/fileB | awk '{print $1}')" == "$checksum" ]
+
+#
+# FIXME: statfs values look ok but the test is bad
+#
+# Test statfs. If we're doing it right, the numbers for the mountpoint should be
+# double those for the brick filesystem times the number of bricks,
+# but unless we're on a completely idle
+# system (which never happens) the numbers can change even while this function
+# runs and that would trip us up. Do a sloppy comparison to deal with that.
+#compare_fields () {
+# val1=$(df $1 | grep / | awk "{print \$$3}")
+# val2=$(df $2 | grep / | awk "{print \$$3}")
+# [ "$val2" -gt "$(((val1/(29/10))*19/10))" -a "$val2" -lt "$(((val1/(31/10))*21/10))" ]
+#}
+
+#brick_df=$(df $B0 | grep /)
+#mount_df=$(df $M0 | grep /)
+#TEST compare_fields $B0 $M0 2 # Total blocks
+#TEST compare_fields $B0 $M0 3 # Used
+#TEST compare_fields $B0 $M0 4 # Available
+
+# Test removexattr.
+#RXATTR_FILE=$(get_file_not_on_disk0 rxtest)
+#TEST setfattr -n user.foo -v bar $M0/$RXATTR_FILE
+#TEST getfattr -n user.foo $B0/b0/1/$RXATTR_FILE
+#TEST setfattr -x user.foo $M0/$RXATTR_FILE
+#TEST ! getfattr -n user.foo $B0/b0/1/$RXATTR_FILE
+
+# Test fsyncdir. We can't really test whether it's doing the right thing,
+# but we can test that it doesn't fail and we can hand-check that it's calling
+# down to all of the disks instead of just one.
+#
+# P.S. There's no fsyncdir test in the rest of Gluster, so who even knows if
+# other translators are handling it correctly?
+
+#FSYNCDIR_EXE=$(dirname $0)/fsyncdir
+#build_tester ${FSYNCDIR_EXE}.c
+#TEST touch $M0/fsyncdir_src
+#TEST $FSYNCDIR_EXE $M0 $M0/fsyncdir_src $M0/fsyncdir_dst
+#TEST rm -f $FSYNCDIR_EXE
+
+# Test fsetxattr, fgetxattr, fremovexattr (in that order).
+FXATTR_FILE=$M0/fxfile1
+TEST touch $FXATTR_FILE
+FXATTR_EXE=$(dirname $0)/fxattr
+build_tester ${FXATTR_EXE}.c
+TEST ! getfattr -n user.fxtest $FXATTR_FILE
+TEST $FXATTR_EXE $FXATTR_FILE set value1
+TEST getfattr -n user.fxtest $FXATTR_FILE
+TEST setfattr -n user.fxtest -v value2 $FXATTR_FILE
+TEST $FXATTR_EXE $FXATTR_FILE get value2
+TEST $FXATTR_EXE $FXATTR_FILE remove
+TEST ! getfattr -n user.fxtest $FXATTR_FILE
+TEST rm -f $FXATTR_EXE
+
+# Test ftruncate
+FTRUNCATE_EXE=$(dirname $0)/ftruncate
+build_tester ${FTRUNCATE_EXE}.c
+FTRUNCATE_FILE=$M0/ftfile1
+TEST dd if=/dev/urandom of=$FTRUNCATE_FILE count=1 bs=1MB
+TEST $FTRUNCATE_EXE $FTRUNCATE_FILE
+#gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/b0/*/ftfile1))
+
+# Test fallocate, discard, zerofill. Actually we don't so much check that these
+# *work* as that they don't throw any errors (especially ENOENT because the
+# file's not on disk zero).
+FALLOC_FILE=fatest1
+TEST touch $M0/$FALLOC_FILE
+TEST fallocate -l $((4096*5)) $M0/$FALLOC_FILE
+TEST fallocate -p -o 4096 -l 4096 $M0/$FALLOC_FILE
+# This actually fails with "operation not supported" on most filesystems, so
+# don't leave it enabled except to test changes.
+#TEST fallocate -z -o $((4096*3)) -l 4096 $M0/$FALLOC_FILE
+
+#cleanup;
diff --git a/tests/basic/metadisp/metadisp.vol b/tests/basic/metadisp/metadisp.vol
new file mode 100644
index 00000000000..58ae2f6f2a8
--- /dev/null
+++ b/tests/basic/metadisp/metadisp.vol
@@ -0,0 +1,14 @@
+volume posix-0
+ type storage/posix
+ option directory /d/backends/b0/0
+end-volume
+
+volume posix-1
+ type storage/posix
+ option directory /d/backends/b0/1
+end-volume
+
+volume metadisp-0
+ type features/metadisp
+ subvolumes posix-0 posix-1
+end-volume
diff --git a/tests/basic/mount.t b/tests/basic/mount.t
index f4c2df31135..3a3d7cc9d8d 100755
--- a/tests/basic/mount.t
+++ b/tests/basic/mount.t
@@ -69,6 +69,9 @@ TEST rm -f $N0/newfile;
TEST ! stat $M0/newfile;
TEST ! stat $M1/newfile;
+# No need to check for status here right now
+$(dirname $0)/rpc-coverage.sh $N0 >/dev/null
+
## Before killing daemon to avoid deadlocks
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" umount_nfs $N0
diff --git a/tests/basic/multiple-volume-shd-mux.t b/tests/basic/multiple-volume-shd-mux.t
new file mode 100644
index 00000000000..d7cfbaec85f
--- /dev/null
+++ b/tests/basic/multiple-volume-shd-mux.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=16
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume start $V0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+for i in $(seq 1 3); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+#Check the thread count become to number of volumes*number of ec subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (4*6=24)
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^24$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+#Delete the volumes
+for i in $(seq 1 3); do
+ TEST $CLI volume stop ${V0}_afr$i
+ TEST $CLI volume stop ${V0}_ec$i
+ TEST $CLI volume delete ${V0}_afr$i
+ TEST $CLI volume delete ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+cleanup
diff --git a/tests/basic/nl-cache.t b/tests/basic/nl-cache.t
index 141fb75d5c8..90c778c8a88 100755
--- a/tests/basic/nl-cache.t
+++ b/tests/basic/nl-cache.t
@@ -64,5 +64,35 @@ TEST rm $M0/dir1/file_link
TEST rmdir $M0/dir1/dir2
TEST rmdir $M0/dir1
+#Check mknod
+TEST ! ls -l $M0/dir
+TEST mkdir $M0/dir
+TEST mknod -m 0666 $M0/dir/block b 4 5
+TEST mknod -m 0666 $M0/dir/char c 1 5
+TEST mknod -m 0666 $M0/dir/fifo p
+TEST rm $M0/dir/block
+TEST rm $M0/dir/char
+TEST rm $M0/dir/fifo
+
+#Check getxattr
+TEST touch $M0/file1
+TEST getfattr -d -m. -e hex $M0/file1
+TEST getfattr -n "glusterfs.get_real_filename:file1" $M0;
+TEST getfattr -n "glusterfs.get_real_filename:FILE1" $M0;
+TEST ! getfattr -n "glusterfs.get_real_filename:FILE2" $M0;
+
+#Check statedump
+TEST generate_mount_statedump $V0 $M0
+TEST cleanup_mount_statedump $V0
+
+#Check reconfigure
+TEST $CLI volume reset $V0 nl-cache-timeout
+TEST $CLI volume reset $V0 nl-cache-positive-entry
+TEST $CLI volume reset $V0 nl-cache-limit
+TEST $CLI volume reset $V0 nl-cache-pass-through
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
cleanup;
#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/basic/open-behind/open-behind.t b/tests/basic/open-behind/open-behind.t
new file mode 100644
index 00000000000..5e865d602e2
--- /dev/null
+++ b/tests/basic/open-behind/open-behind.t
@@ -0,0 +1,183 @@
+#!/bin/bash
+
+WD="$(dirname "${0}")"
+
+. ${WD}/../../include.rc
+. ${WD}/../../volume.rc
+
+function assign() {
+ local _assign_var="${1}"
+ local _assign_value="${2}"
+
+ printf -v "${_assign_var}" "%s" "${_assign_value}"
+}
+
+function pipe_create() {
+ local _pipe_create_var="${1}"
+ local _pipe_create_name
+ local _pipe_create_fd
+
+ _pipe_create_name="$(mktemp -u)"
+ mkfifo "${_pipe_create_name}"
+ exec {_pipe_create_fd}<>"${_pipe_create_name}"
+ rm "${_pipe_create_name}"
+
+ assign "${_pipe_create_var}" "${_pipe_create_fd}"
+}
+
+function pipe_close() {
+ local _pipe_close_fd="${!1}"
+
+ exec {_pipe_close_fd}>&-
+}
+
+function tester_start() {
+ declare -ag tester
+ local tester_in
+ local tester_out
+
+ pipe_create tester_in
+ pipe_create tester_out
+
+ ${WD}/tester <&${tester_in} >&${tester_out} &
+
+ tester=("$!" "${tester_in}" "${tester_out}")
+}
+
+function tester_send() {
+ declare -ag tester
+ local tester_res
+ local tester_extra
+
+ echo "${*}" >&${tester[1]}
+
+ read -t 3 -u ${tester[2]} tester_res tester_extra
+ echo "${tester_res} ${tester_extra}"
+ if [[ "${tester_res}" == "OK" ]]; then
+ return 0
+ fi
+
+ return 1
+}
+
+function tester_stop() {
+ declare -ag tester
+ local tester_res
+
+ tester_send "quit"
+
+ tester_res=0
+ if ! wait ${tester[0]}; then
+ tester_res=$?
+ fi
+
+ unset tester
+
+ return ${tester_res}
+}
+
+function count_open() {
+ local file="$(realpath "${B0}/${V0}/${1}")"
+ local count="0"
+ local inode
+ local ref
+
+ inode="$(stat -c %i "${file}")"
+
+ for fd in /proc/${BRICK_PID}/fd/*; do
+ ref="$(readlink "${fd}")"
+ if [[ "${ref}" == "${B0}/${V0}/"* ]]; then
+ if [[ "$(stat -c %i "${ref}")" == "${inode}" ]]; then
+ count="$((${count} + 1))"
+ fi
+ fi
+ done
+
+ echo "${count}"
+}
+
+cleanup
+
+TEST build_tester ${WD}/tester.c ${WD}/tester-fd.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST ${CLI} volume create ${V0} ${H0}:${B0}/${V0}
+TEST ${CLI} volume set ${V0} flush-behind off
+TEST ${CLI} volume set ${V0} write-behind off
+TEST ${CLI} volume set ${V0} quick-read off
+TEST ${CLI} volume set ${V0} stat-prefetch on
+TEST ${CLI} volume set ${V0} io-cache off
+TEST ${CLI} volume set ${V0} open-behind on
+TEST ${CLI} volume set ${V0} lazy-open off
+TEST ${CLI} volume set ${V0} read-after-open off
+TEST ${CLI} volume start ${V0}
+
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+BRICK_PID="$(get_brick_pid ${V0} ${H0} ${B0}/${V0})"
+
+TEST touch "${M0}/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_start
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT_WITHIN 5 "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${CLI} volume set ${V0} lazy-open on
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+sleep 2
+EXPECT "0" count_open "/test"
+TEST tester_send fd write 0 "test"
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+EXPECT "test" tester_send fd read 0 64
+# Even though read-after-open is disabled, use-anonymous-fd is also disabled,
+# so reads need to open the file first.
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+TEST tester_send fd open 1 "${M0}/test"
+EXPECT "2" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "1" count_open "/test"
+TEST tester_send fd close 1
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST ${CLI} volume set ${V0} read-after-open on
+TEST ${GFS} --volfile-id=/${V0} --volfile-server=${H0} ${M0};
+
+TEST tester_send fd open 0 "${M0}/test"
+EXPECT "0" count_open "/test"
+EXPECT "test" tester_send fd read 0 64
+EXPECT "1" count_open "/test"
+TEST tester_send fd close 0
+EXPECT_WITHIN 5 "0" count_open "/test"
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST tester_stop
+
+cleanup
diff --git a/tests/basic/open-behind/tester-fd.c b/tests/basic/open-behind/tester-fd.c
new file mode 100644
index 00000000000..00f02bc5b0a
--- /dev/null
+++ b/tests/basic/open-behind/tester-fd.c
@@ -0,0 +1,99 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "tester.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+static int32_t
+fd_open(context_t *ctx, command_t *cmd)
+{
+ obj_t *obj;
+ int32_t fd;
+
+ obj = cmd->args[0].obj.ref;
+
+ fd = open(cmd->args[1].str.data, O_RDWR);
+ if (fd < 0) {
+ return error(errno, "open() failed");
+ }
+
+ obj->type = OBJ_TYPE_FD;
+ obj->fd = fd;
+
+ out_ok("%d", fd);
+
+ return 0;
+}
+
+static int32_t
+fd_close(context_t *ctx, command_t *cmd)
+{
+ obj_t *obj;
+
+ obj = cmd->args[0].obj.ref;
+ obj->type = OBJ_TYPE_NONE;
+
+ if (close(obj->fd) != 0) {
+ return error(errno, "close() failed");
+ }
+
+ out_ok();
+
+ return 0;
+}
+
+static int32_t
+fd_write(context_t *ctx, command_t *cmd)
+{
+ ssize_t len, ret;
+
+ len = strlen(cmd->args[1].str.data);
+ ret = write(cmd->args[0].obj.ref->fd, cmd->args[1].str.data, len);
+ if (ret < 0) {
+ return error(errno, "write() failed");
+ }
+
+ out_ok("%zd", ret);
+
+ return 0;
+}
+
+static int32_t
+fd_read(context_t *ctx, command_t *cmd)
+{
+ char data[cmd->args[1].num.value + 1];
+ ssize_t ret;
+
+ ret = read(cmd->args[0].obj.ref->fd, data, cmd->args[1].num.value);
+ if (ret < 0) {
+ return error(errno, "read() failed");
+ }
+
+ data[ret] = 0;
+
+ out_ok("%zd %s", ret, data);
+
+ return 0;
+}
+
+command_t fd_commands[] = {
+ {"open", fd_open, CMD_ARGS(ARG_VAL(OBJ_TYPE_NONE), ARG_STR(1024))},
+ {"close", fd_close, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD))},
+ {"write", fd_write, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD), ARG_STR(1024))},
+ {"read", fd_read, CMD_ARGS(ARG_VAL(OBJ_TYPE_FD), ARG_NUM(0, 1024))},
+ CMD_END};
diff --git a/tests/basic/open-behind/tester.c b/tests/basic/open-behind/tester.c
new file mode 100644
index 00000000000..b2da71c8385
--- /dev/null
+++ b/tests/basic/open-behind/tester.c
@@ -0,0 +1,444 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include "tester.h"
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <errno.h>
+
+static void *
+mem_alloc(size_t size)
+{
+ void *ptr;
+
+ ptr = malloc(size);
+ if (ptr == NULL) {
+ error(ENOMEM, "Failed to allocate memory (%zu bytes)", size);
+ }
+
+ return ptr;
+}
+
+static void
+mem_free(void *ptr)
+{
+ free(ptr);
+}
+
+static bool
+buffer_create(context_t *ctx, size_t size)
+{
+ ctx->buffer.base = mem_alloc(size);
+ if (ctx->buffer.base == NULL) {
+ return false;
+ }
+
+ ctx->buffer.size = size;
+ ctx->buffer.len = 0;
+ ctx->buffer.pos = 0;
+
+ return true;
+}
+
+static void
+buffer_destroy(context_t *ctx)
+{
+ mem_free(ctx->buffer.base);
+ ctx->buffer.size = 0;
+ ctx->buffer.len = 0;
+}
+
+static int32_t
+buffer_get(context_t *ctx)
+{
+ ssize_t len;
+
+ if (ctx->buffer.pos >= ctx->buffer.len) {
+ len = read(0, ctx->buffer.base, ctx->buffer.size);
+ if (len < 0) {
+ return error(errno, "read() failed");
+ }
+ if (len == 0) {
+ return 0;
+ }
+
+ ctx->buffer.len = len;
+ ctx->buffer.pos = 0;
+ }
+
+ return ctx->buffer.base[ctx->buffer.pos++];
+}
+
+static int32_t
+str_skip_spaces(context_t *ctx, int32_t current)
+{
+ while ((current > 0) && (current != '\n') && isspace(current)) {
+ current = buffer_get(ctx);
+ }
+
+ return current;
+}
+
+static int32_t
+str_token(context_t *ctx, char *buffer, uint32_t size, int32_t current)
+{
+ uint32_t len;
+
+ current = str_skip_spaces(ctx, current);
+
+ len = 0;
+ while ((size > 0) && (current > 0) && (current != '\n') &&
+ !isspace(current)) {
+ len++;
+ *buffer++ = current;
+ size--;
+ current = buffer_get(ctx);
+ }
+
+ if (len == 0) {
+ return error(ENODATA, "Expecting a token");
+ }
+
+ if (size == 0) {
+ return error(ENOBUFS, "Token too long");
+ }
+
+ *buffer = 0;
+
+ return current;
+}
+
+static int32_t
+str_number(context_t *ctx, uint64_t min, uint64_t max, uint64_t *value,
+ int32_t current)
+{
+ char text[32], *ptr;
+ uint64_t num;
+
+ current = str_token(ctx, text, sizeof(text), current);
+ if (current > 0) {
+ num = strtoul(text, &ptr, 0);
+ if ((*ptr != 0) || (num < min) || (num > max)) {
+ return error(ERANGE, "Invalid number");
+ }
+ *value = num;
+ }
+
+ return current;
+}
+
+static int32_t
+str_eol(context_t *ctx, int32_t current)
+{
+ current = str_skip_spaces(ctx, current);
+ if (current != '\n') {
+ return error(EINVAL, "Expecting end of command");
+ }
+
+ return current;
+}
+
+static void
+str_skip(context_t *ctx, int32_t current)
+{
+ while ((current > 0) && (current != '\n')) {
+ current = buffer_get(ctx);
+ }
+}
+
+static int32_t
+cmd_parse_obj(context_t *ctx, arg_t *arg, int32_t current)
+{
+ obj_t *obj;
+ uint64_t id;
+
+ current = str_number(ctx, 0, ctx->obj_count, &id, current);
+ if (current <= 0) {
+ return current;
+ }
+
+ obj = &ctx->objs[id];
+ if (obj->type != arg->obj.type) {
+ if (obj->type != OBJ_TYPE_NONE) {
+ return error(EBUSY, "Object is in use");
+ }
+ return error(ENOENT, "Object is not defined");
+ }
+
+ arg->obj.ref = obj;
+
+ return current;
+}
+
+static int32_t
+cmd_parse_num(context_t *ctx, arg_t *arg, int32_t current)
+{
+ return str_number(ctx, arg->num.min, arg->num.max, &arg->num.value,
+ current);
+}
+
+static int32_t
+cmd_parse_str(context_t *ctx, arg_t *arg, int32_t current)
+{
+ return str_token(ctx, arg->str.data, arg->str.size, current);
+}
+
+static int32_t
+cmd_parse_args(context_t *ctx, command_t *cmd, int32_t current)
+{
+ arg_t *arg;
+
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_OBJ:
+ current = cmd_parse_obj(ctx, arg, current);
+ break;
+ case ARG_TYPE_NUM:
+ current = cmd_parse_num(ctx, arg, current);
+ break;
+ case ARG_TYPE_STR:
+ current = cmd_parse_str(ctx, arg, current);
+ break;
+ default:
+ return error(EINVAL, "Unknown argument type");
+ }
+ }
+
+ if (current < 0) {
+ return current;
+ }
+
+ current = str_eol(ctx, current);
+ if (current <= 0) {
+ return error(EINVAL, "Syntax error");
+ }
+
+ return cmd->handler(ctx, cmd);
+}
+
+static int32_t
+cmd_parse(context_t *ctx, command_t *cmds)
+{
+ char text[32];
+ command_t *cmd;
+ int32_t current;
+
+ cmd = cmds;
+ do {
+ current = str_token(ctx, text, sizeof(text), buffer_get(ctx));
+ if (current <= 0) {
+ return current;
+ }
+
+ while (cmd->name != NULL) {
+ if (strcmp(cmd->name, text) == 0) {
+ if (cmd->handler != NULL) {
+ return cmd_parse_args(ctx, cmd, current);
+ }
+ cmd = cmd->cmds;
+ break;
+ }
+ cmd++;
+ }
+ } while (cmd->name != NULL);
+
+ str_skip(ctx, current);
+
+ return error(ENOTSUP, "Unknown command");
+}
+
+static void
+cmd_fini(context_t *ctx, command_t *cmds)
+{
+ command_t *cmd;
+ arg_t *arg;
+
+ for (cmd = cmds; cmd->name != NULL; cmd++) {
+ if (cmd->handler == NULL) {
+ cmd_fini(ctx, cmd->cmds);
+ } else {
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_STR:
+ mem_free(arg->str.data);
+ arg->str.data = NULL;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+}
+
+static bool
+cmd_init(context_t *ctx, command_t *cmds)
+{
+ command_t *cmd;
+ arg_t *arg;
+
+ for (cmd = cmds; cmd->name != NULL; cmd++) {
+ if (cmd->handler == NULL) {
+ if (!cmd_init(ctx, cmd->cmds)) {
+ return false;
+ }
+ } else {
+ for (arg = cmd->args; arg->type != ARG_TYPE_NONE; arg++) {
+ switch (arg->type) {
+ case ARG_TYPE_STR:
+ arg->str.data = mem_alloc(arg->str.size);
+ if (arg->str.data == NULL) {
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool
+objs_create(context_t *ctx, uint32_t count)
+{
+ uint32_t i;
+
+ ctx->objs = mem_alloc(sizeof(obj_t) * count);
+ if (ctx->objs == NULL) {
+ return false;
+ }
+ ctx->obj_count = count;
+
+ for (i = 0; i < count; i++) {
+ ctx->objs[i].type = OBJ_TYPE_NONE;
+ }
+
+ return true;
+}
+
+static int32_t
+objs_destroy(context_t *ctx)
+{
+ uint32_t i;
+ int32_t err;
+
+ err = 0;
+ for (i = 0; i < ctx->obj_count; i++) {
+ if (ctx->objs[i].type != OBJ_TYPE_NONE) {
+ err = error(ENOTEMPTY, "Objects not destroyed");
+ break;
+ }
+ }
+
+ mem_free(ctx->objs);
+ ctx->objs = NULL;
+ ctx->obj_count = 0;
+
+ return err;
+}
+
+static context_t *
+init(size_t size, uint32_t objs, command_t *cmds)
+{
+ context_t *ctx;
+
+ ctx = mem_alloc(sizeof(context_t));
+ if (ctx == NULL) {
+ goto failed;
+ }
+
+ if (!buffer_create(ctx, size)) {
+ goto failed_ctx;
+ }
+
+ if (!objs_create(ctx, objs)) {
+ goto failed_buffer;
+ }
+
+ if (!cmd_init(ctx, cmds)) {
+ goto failed_objs;
+ }
+
+ ctx->active = true;
+
+ return ctx;
+
+failed_objs:
+ cmd_fini(ctx, cmds);
+ objs_destroy(ctx);
+failed_buffer:
+ buffer_destroy(ctx);
+failed_ctx:
+ mem_free(ctx);
+failed:
+ return NULL;
+}
+
+static int32_t
+fini(context_t *ctx, command_t *cmds)
+{
+ int32_t ret;
+
+ cmd_fini(ctx, cmds);
+ buffer_destroy(ctx);
+
+ ret = objs_destroy(ctx);
+
+ ctx->active = false;
+
+ return ret;
+}
+
+static int32_t
+exec_quit(context_t *ctx, command_t *cmd)
+{
+ ctx->active = false;
+
+ return 0;
+}
+
+static command_t commands[] = {{"fd", NULL, CMD_SUB(fd_commands)},
+ {"quit", exec_quit, CMD_ARGS()},
+ CMD_END};
+
+int32_t
+main(int32_t argc, char *argv[])
+{
+ context_t *ctx;
+ int32_t res;
+
+ ctx = init(1024, 16, commands);
+ if (ctx == NULL) {
+ return 1;
+ }
+
+ do {
+ res = cmd_parse(ctx, commands);
+ if (res < 0) {
+ out_err(-res);
+ }
+ } while (ctx->active);
+
+ res = fini(ctx, commands);
+ if (res >= 0) {
+ out_ok();
+ return 0;
+ }
+
+ out_err(-res);
+
+ return 1;
+}
diff --git a/tests/basic/open-behind/tester.h b/tests/basic/open-behind/tester.h
new file mode 100644
index 00000000000..64e940c78fc
--- /dev/null
+++ b/tests/basic/open-behind/tester.h
@@ -0,0 +1,145 @@
+/*
+ Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#ifndef __TESTER_H__
+#define __TESTER_H__
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdbool.h>
+
+enum _obj_type;
+typedef enum _obj_type obj_type_t;
+
+enum _arg_type;
+typedef enum _arg_type arg_type_t;
+
+struct _buffer;
+typedef struct _buffer buffer_t;
+
+struct _obj;
+typedef struct _obj obj_t;
+
+struct _context;
+typedef struct _context context_t;
+
+struct _arg;
+typedef struct _arg arg_t;
+
+struct _command;
+typedef struct _command command_t;
+
+enum _obj_type { OBJ_TYPE_NONE, OBJ_TYPE_FD };
+
+enum _arg_type { ARG_TYPE_NONE, ARG_TYPE_OBJ, ARG_TYPE_NUM, ARG_TYPE_STR };
+
+struct _buffer {
+ char *base;
+ uint32_t size;
+ uint32_t len;
+ uint32_t pos;
+};
+
+struct _obj {
+ obj_type_t type;
+ union {
+ int32_t fd;
+ };
+};
+
+struct _context {
+ obj_t *objs;
+ buffer_t buffer;
+ uint32_t obj_count;
+ bool active;
+};
+
+struct _arg {
+ arg_type_t type;
+ union {
+ struct {
+ obj_type_t type;
+ obj_t *ref;
+ } obj;
+ struct {
+ uint64_t value;
+ uint64_t min;
+ uint64_t max;
+ } num;
+ struct {
+ uint32_t size;
+ char *data;
+ } str;
+ };
+};
+
+struct _command {
+ const char *name;
+ int32_t (*handler)(context_t *ctx, command_t *cmd);
+ union {
+ arg_t *args;
+ command_t *cmds;
+ };
+};
+
+#define msg(_stream, _fmt, _args...) \
+ do { \
+ fprintf(_stream, _fmt "\n", ##_args); \
+ fflush(_stream); \
+ } while (0)
+
+#define msg_out(_fmt, _args...) msg(stdout, _fmt, ##_args)
+#define msg_err(_err, _fmt, _args...) \
+ ({ \
+ int32_t __msg_err = (_err); \
+ msg(stderr, "[%4u:%-15s] " _fmt, __LINE__, __FUNCTION__, __msg_err, \
+ ##_args); \
+ -__msg_err; \
+ })
+
+#define error(_err, _fmt, _args...) msg_err(_err, "E(%4d) " _fmt, ##_args)
+#define warn(_err, _fmt, _args...) msg_err(_err, "W(%4d) " _fmt, ##_args)
+#define info(_err, _fmt, _args...) msg_err(_err, "I(%4d) " _fmt, ##_args)
+
+#define out_ok(_args...) msg_out("OK " _args)
+#define out_err(_err) msg_out("ERR %d", _err)
+
+#define ARG_END \
+ { \
+ ARG_TYPE_NONE \
+ }
+
+#define CMD_ARGS1(_x, _args...) \
+ .args = (arg_t[]) { _args }
+#define CMD_ARGS(_args...) CMD_ARGS1(, ##_args, ARG_END)
+
+#define CMD_SUB(_cmds) .cmds = _cmds
+
+#define CMD_END \
+ { \
+ NULL, NULL, CMD_SUB(NULL) \
+ }
+
+#define ARG_VAL(_type) \
+ { \
+ ARG_TYPE_OBJ, .obj = {.type = _type } \
+ }
+#define ARG_NUM(_min, _max) \
+ { \
+ ARG_TYPE_NUM, .num = {.min = _min, .max = _max } \
+ }
+#define ARG_STR(_size) \
+ { \
+ ARG_TYPE_STR, .str = {.size = _size } \
+ }
+
+extern command_t fd_commands[];
+
+#endif /* __TESTER_H__ */ \ No newline at end of file
diff --git a/tests/basic/playground/template-xlator-sanity.t b/tests/basic/playground/template-xlator-sanity.t
index c3090dae5a8..1c665502bfe 100755
--- a/tests/basic/playground/template-xlator-sanity.t
+++ b/tests/basic/playground/template-xlator-sanity.t
@@ -15,6 +15,7 @@ end-volume
volume template
type playground/template
subvolumes posix
+ option dummy 13
end-volume
EOF
@@ -22,6 +23,21 @@ TEST glusterfs -f $B0/template.vol $M0
TEST $(dirname $0)/../rpc-coverage.sh --no-locks $M0
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+# Take statedump to get maximum code coverage
+pid=$(ps auxww | grep glusterfs | grep -E "template.vol" | awk '{print $2}' | head -1)
+
+TEST generate_statedump $pid
+
+# For monitor output
+kill -USR2 $pid
+
+# Handle SIGHUP and reconfigure
+sed -i -e '/s/dummy 13/dummy 42/g' $B0/template.vol
+kill -HUP $pid
+
+# for calling 'fini()'
+kill -TERM $pid
+
+force_umount $M0
cleanup;
diff --git a/tests/basic/posix/shared-statfs.t b/tests/basic/posix/shared-statfs.t
index 33439562ec9..0e4a1bb409f 100644
--- a/tests/basic/posix/shared-statfs.t
+++ b/tests/basic/posix/shared-statfs.t
@@ -20,15 +20,18 @@ TEST mkdir -p $B0/${V0}1 $B0/${V0}2
TEST MOUNT_LOOP $LO1 $B0/${V0}1
TEST MOUNT_LOOP $LO2 $B0/${V0}2
+total_brick_blocks=$(df -P $B0/${V0}1 $B0/${V0}2 | tail -2 | awk '{sum = sum+$2}END{print sum}')
+#Account for rounding error
+brick_blocks_two_percent_less=$((total_brick_blocks*98/100))
# Create a subdir in mountpoint and use that for volume.
TEST $CLI volume create $V0 $H0:$B0/${V0}1/1 $H0:$B0/${V0}2/1;
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" online_brick_count
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
-total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
+total_mount_blocks=$(df -P $M0 | tail -1 | awk '{ print $2}')
# Keeping the size less than 200M mainly because XFS will use
# some storage in brick to keep its own metadata.
-TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ]
+TEST [ $total_mount_blocks -gt $brick_blocks_two_percent_less -a $total_mount_blocks -lt 200000 ]
TEST force_umount $M0
@@ -41,8 +44,8 @@ TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1/2 $H0:$B0/${V0}2/2 $H0:$B0/${V0}1/
TEST $CLI volume start $V0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" online_brick_count
TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
-total_space=$(df -P $M0 | tail -1 | awk '{ print $2}')
-TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ]
+total_mount_blocks=$(df -P $M0 | tail -1 | awk '{ print $2}')
+TEST [ $total_mount_blocks -gt $brick_blocks_two_percent_less -a $total_mount_blocks -lt 200000 ]
TEST force_umount $M0
TEST $CLI volume stop $V0
diff --git a/tests/basic/posix/zero-fill-enospace.c b/tests/basic/posix/zero-fill-enospace.c
index 1371ff59a5f..b1f142c6be9 100644
--- a/tests/basic/posix/zero-fill-enospace.c
+++ b/tests/basic/posix/zero-fill-enospace.c
@@ -1,4 +1,5 @@
#include <stdio.h>
+#include <stdlib.h>
#include <glusterfs/api/glfs.h>
#include <glusterfs/api/glfs-handles.h>
@@ -8,7 +9,7 @@ main(int argc, char *argv[])
glfs_t *fs = NULL;
glfs_fd_t *fd = NULL;
int ret = 1;
- int size = 0;
+ off_t size = 0;
if (argc != 6) {
fprintf(stderr,
@@ -45,12 +46,12 @@ main(int argc, char *argv[])
goto out;
}
- size = atoi(argv[5]);
+ size = strtol(argv[5], NULL, 10);
if (size < 0) {
fprintf(stderr, "Wrong size %s", argv[5]);
goto out;
}
- ret = glfs_zerofill(fd, 0, atoi(argv[5]));
+ ret = glfs_zerofill(fd, 0, size);
if (ret <= 0) {
fprintf(stderr, "glfs_zerofill: returned %d\n", ret);
goto out;
diff --git a/tests/basic/quick-read-with-upcall.t b/tests/basic/quick-read-with-upcall.t
index 318e93a1bf0..dfb751dfcdb 100644
--- a/tests/basic/quick-read-with-upcall.t
+++ b/tests/basic/quick-read-with-upcall.t
@@ -15,8 +15,8 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1..2};
TEST $CLI volume start $V0
# Mount FUSE without selinux:
-TEST glusterfs -s $H0 --volfile-id $V0 $M0;
-TEST glusterfs -s $H0 --volfile-id $V0 $M1;
+TEST glusterfs -s $H0 --volfile-id $V0 --direct-io-mode=enable $M0;
+TEST glusterfs -s $H0 --volfile-id $V0 --direct-io-mode=enable $M1;
D0="test-message0";
D1="test-message1";
@@ -38,11 +38,13 @@ EXPECT "$D1" cat $M0/test.txt
EXPECT "$D0" cat $M1/test.txt
sleep 1
+
+# TODO: This line normally fails
EXPECT "$D1" cat $M1/test.txt
TEST $CLI volume set $V0 features.cache-invalidation on
-TEST $CLI volume set $V0 performance.qr-cache-timeout 60
-TEST $CLI volume set $V0 performance.md-cache-timeout 60
+TEST $CLI volume set $V0 performance.quick-read-cache-timeout 15
+TEST $CLI volume set $V0 performance.md-cache-timeout 15
TEST write_to "$M0/test1.txt" "$D0"
EXPECT "$D0" cat $M0/test1.txt
@@ -55,9 +57,10 @@ EXPECT "$D0" cat $M1/test1.txt
sleep 1
EXPECT "$D0" cat $M1/test1.txt
-sleep 60
+sleep 30
EXPECT "$D1" cat $M1/test1.txt
+TEST $CLI volume set $V0 performance.quick-read-cache-invalidation on
TEST $CLI volume set $V0 performance.cache-invalidation on
TEST write_to "$M0/test2.txt" "$D0"
diff --git a/tests/basic/rpc-coverage.sh b/tests/basic/rpc-coverage.sh
index 4b6759a59eb..6203f0ac7cb 100755
--- a/tests/basic/rpc-coverage.sh
+++ b/tests/basic/rpc-coverage.sh
@@ -419,9 +419,15 @@ function test_rmdir()
rm -rf $PFX || fail "rm -rf"
}
+function test_statvfs()
+{
+ df $DIR 2>&1 || fail "df"
+}
+
function run_tests()
{
+ test_statvfs;
test_mkdir;
test_create;
test_statfs;
@@ -436,15 +442,15 @@ function run_tests()
test_rename;
test_chmod;
test_chown;
- test_utimes;
- if [ "$run_lock_tests" = "1" ]; then
- test_locks;
- fi
test_readdir;
test_setxattr;
test_listxattr;
test_getxattr;
test_removexattr;
+ if [ "$run_lock_tests" = "1" ]; then
+ test_locks;
+ fi
+ test_utimes;
test_unlink;
test_rmdir;
}
diff --git a/tests/basic/rpc-coverage.t b/tests/basic/rpc-coverage.t
index a76ba7084eb..2c1bcd5a63a 100755
--- a/tests/basic/rpc-coverage.t
+++ b/tests/basic/rpc-coverage.t
@@ -9,11 +9,11 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
-TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6,7,8};
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6,7,8,9};
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
-EXPECT '8' brick_count $V0
+EXPECT '9' brick_count $V0
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
diff --git a/tests/basic/sdfs-sanity.t b/tests/basic/sdfs-sanity.t
index f25376c3cad..16d0bed866f 100644
--- a/tests/basic/sdfs-sanity.t
+++ b/tests/basic/sdfs-sanity.t
@@ -19,4 +19,10 @@ TEST $GFS -s $H0 --volfile-id $V0 $M1;
# create operations
TEST $(dirname $0)/rpc-coverage.sh $M1
+TEST cp $(dirname ${0})/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
cleanup;
diff --git a/tests/basic/ec/seek.c b/tests/basic/seek.c
index 54fa6f463af..54fa6f463af 100644
--- a/tests/basic/ec/seek.c
+++ b/tests/basic/seek.c
diff --git a/tests/basic/shd-mux-afr.t b/tests/basic/shd-mux-afr.t
new file mode 100644
index 00000000000..cf300c148bb
--- /dev/null
+++ b/tests/basic/shd-mux-afr.t
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Create a one more volume
+TEST $CLI volume create ${V0}_1 replica 3 $H0:$B0/${V0}_1{0,1,2,3,4,5}
+TEST $CLI volume start ${V0}_1
+
+#Check whether the shd has multiplexed or not
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
+
+TEST $CLI volume set ${V0}_1 cluster.background-self-heal-count 0
+TEST $CLI volume set ${V0}_1 cluster.eager-lock off
+TEST $CLI volume set ${V0}_1 performance.flush-behind off
+TEST $GFS --volfile-id=/${V0}_1 --volfile-server=$H0 $M1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_10
+TEST kill_brick ${V0}_1 $H0 $B0/${V0}_14
+
+TEST touch $M0/foo{1..100}
+TEST touch $M1/foo{1..100}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count ${V0}_1
+
+TEST $CLI volume start ${V0} force
+TEST $CLI volume start ${V0}_1 force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_1
+
+TEST rm -rf $M0/*
+TEST rm -rf $M1/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+#Stop the volume
+TEST $CLI volume stop ${V0}_1
+TEST $CLI volume delete ${V0}_1
+
+#Check the stop succeeded and detached the volume with out restarting it
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+
+#Check the thread count become to earlier number after stopping
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+cleanup
diff --git a/tests/basic/shd-mux-ec.t b/tests/basic/shd-mux-ec.t
new file mode 100644
index 00000000000..ef4d65018d3
--- /dev/null
+++ b/tests/basic/shd-mux-ec.t
@@ -0,0 +1,75 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+shd_pid=$(get_shd_mux_pid $V0)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Now create a ec volume and check mux works
+TEST $CLI volume create ${V0}_2 disperse 6 redundancy 2 $H0:$B0/${V0}_2{0,1,2,3,4,5}
+TEST $CLI volume start ${V0}_2
+
+#Check whether the shd has multiplexed or not
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}_2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid ${V0}
+
+TEST $CLI volume set ${V0}_2 cluster.background-self-heal-count 0
+TEST $CLI volume set ${V0}_2 cluster.eager-lock off
+TEST $CLI volume set ${V0}_2 performance.flush-behind off
+TEST $GFS --volfile-id=/${V0}_2 --volfile-server=$H0 $M1
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}4
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_20
+TEST kill_brick ${V0}_2 $H0 $B0/${V0}_22
+
+TEST touch $M0/foo{1..100}
+TEST touch $M1/foo{1..100}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^404$" get_pending_heal_count ${V0}_2
+
+TEST $CLI volume start ${V0} force
+TEST $CLI volume start ${V0}_2 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count ${V0}_2
+
+TEST rm -rf $M0/*
+TEST rm -rf $M1/*
+
+
+#Stop the volume
+TEST $CLI volume stop ${V0}_2
+TEST $CLI volume delete ${V0}_2
+
+#Check the stop succeeded and detached the volume with out restarting it
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^${shd_pid}$" get_shd_mux_pid $V0
+
+#Check the thread count become to zero for ec related threads
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to earlier number after stopping
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+cleanup
diff --git a/tests/basic/trace.t b/tests/basic/trace.t
new file mode 100755
index 00000000000..01e7c9e0a25
--- /dev/null
+++ b/tests/basic/trace.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST mkdir -p $B0/single-brick
+cat > $B0/template.vol <<EOF
+volume posix
+ type storage/posix
+ option directory $B0/single-brick
+end-volume
+
+volume trace
+ type debug/trace
+ option log-file yes
+ option log-history yes
+ subvolumes posix
+end-volume
+EOF
+
+TEST glusterfs -f $B0/template.vol $M0
+
+TEST $(dirname $0)/rpc-coverage.sh --no-locks $M0
+
+# Take statedump to get maximum code coverage
+pid=$(ps auxww | grep glusterfs | grep -E "template.vol" | awk '{print $2}' | head -1)
+
+TEST generate_statedump $pid
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Now, use the glusterd way of enabling trace
+TEST glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+TEST $CLI volume set $V0 debug.trace marker
+TEST $CLI volume set $V0 debug.log-file yes
+#TEST $CLI volume set $V0 debug.log-history yes
+
+TEST $CLI volume start $V0;
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+TEST $(dirname $0)/rpc-coverage.sh --no-locks $M1
+cp $(dirname ${0})/gfapi/glfsxmp-coverage.c ./glfsxmp.c
+build_tester ./glfsxmp.c -lgfapi
+./glfsxmp $V0 $H0 > /dev/null
+cleanup_tester ./glfsxmp
+rm ./glfsxmp.c
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
+
+cleanup;
diff --git a/tests/basic/uss.t b/tests/basic/uss.t
index 47deef66e24..09dd00ef995 100644
--- a/tests/basic/uss.t
+++ b/tests/basic/uss.t
@@ -36,6 +36,7 @@ TEST glusterd;
TEST pidof glusterd;
TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
+
TEST $CLI volume set $V0 nfs.disable false
@@ -374,6 +375,15 @@ TEST rm -f $M0/aaa;
TEST $CLI snapshot delete snap6;
+# drop the caches so that, the dentry for "snap6" is
+# is forgotten from the client cache.
+drop_cache $M0
+
+EXPECT_WITHIN 30 "5" count_snaps $M0;
+
+# This should fail, as snap6 just got deleted.
+TEST ! stat $M0/.history/snap6
+
TEST $CLI snapshot create snap6 $V0 no-timestamp
TEST ls $M0/.history;
@@ -384,4 +394,28 @@ TEST ls $M0/.history/snap6/;
TEST ! stat $M0/.history/snap6/aaa;
+TEST stat $M0
+
+# done with the tests start cleaning up of things
+TEST $CLI volume set $V0 features.uss disable
+
+TEST $CLI snapshot delete snap6;
+
+TEST $CLI snapshot delete snap5;
+
+TEST $CLI snapshot delete snap4;
+
+TEST $CLI snapshot delete snap3;
+
+TEST $CLI snapshot delete snap2;
+
+TEST $CLI snapshot delete snap1;
+
+# nfs client has been already unmounted at line 333
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+TEST $CLI volume stop $V0
+
+TEST $CLI volume delete $V0
+
cleanup;
diff --git a/tests/basic/volume-scale-shd-mux.t b/tests/basic/volume-scale-shd-mux.t
new file mode 100644
index 00000000000..102de22468e
--- /dev/null
+++ b/tests/basic/volume-scale-shd-mux.t
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TESTS_EXPECTED_IN_LOOP=6
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5}
+TEST $CLI volume set $V0 cluster.background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 performance.flush-behind off
+TEST $CLI volume start $V0
+
+for i in $(seq 1 2); do
+ TEST $CLI volume create ${V0}_afr$i replica 3 $H0:$B0/${V0}_afr${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_afr$i
+ TEST $CLI volume create ${V0}_ec$i disperse 6 redundancy 2 $H0:$B0/${V0}_ec${i}{0,1,2,3,4,5}
+ TEST $CLI volume start ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+#Check the thread count become to number of volumes*number of ec subvolume (2*6=12)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+#Check the thread count become to number of volumes*number of afr subvolume (3*6=18)
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{6,7,8};
+#Check the thread count become to number of volumes*number of afr subvolume plus 3 additional threads from newly added bricks (3*6+3=21)
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^21$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Remove the brick and check the detach is successful
+$CLI volume remove-brick $V0 $H0:$B0/${V0}{6,7,8} force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "glusterfs_graph_cleanup"
+TEST $CLI volume add-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5};
+#Check the thread count become to number of volumes*number of ec subvolume plus 2 additional threads from newly added bricks (2*6+6=18)
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^18$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+
+#Remove the brick and check the detach is successful
+$CLI volume remove-brick ${V0}_ec1 $H0:$B0/${V0}_ec1_add{0,1,2,3,4,5} force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^12$" number_healer_threads_shd $V0 "ec_shd_index_healer"
+
+
+for i in $(seq 1 2); do
+ TEST $CLI volume stop ${V0}_afr$i
+ TEST $CLI volume stop ${V0}_ec$i
+done
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}4
+
+TEST touch $M0/foo{1..100}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^204$" get_pending_heal_count $V0
+
+TEST $CLI volume start ${V0} force
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST rm -rf $M0/*
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+shd_pid=$(get_shd_mux_pid $V0)
+TEST $CLI volume create ${V0}_distribute1 $H0:$B0/${V0}_distribute10
+TEST $CLI volume start ${V0}_distribute1
+
+#Creating a non-replicate/non-ec volume should not have any effect in shd
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+EXPECT "^${shd_pid}$" get_shd_mux_pid $V0
+
+TEST mkdir $B0/add/
+#Now convert the distributed volume to replicate
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^9$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#scale down the volume
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^6$" number_healer_threads_shd $V0 "afr_shd_index_healer"
+
+#Before stopping the process, make sure there is no pending clenup threads hanging
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" number_healer_threads_shd $V0 "glusterfs_graph_cleanup"
+
+TEST $CLI volume stop ${V0}
+TEST $CLI volume delete ${V0}
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+TEST rm -rf $B0/add/2 $B0/add/3
+
+#Now convert the distributed volume back to replicate and make sure that a new shd is spawned
+TEST $CLI volume add-brick ${V0}_distribute1 replica 3 $H0:$B0/add/{2..3};
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" shd_count
+EXPECT_WITHIN $HEAL_TIMEOUT "^3$" number_healer_threads_shd ${V0}_distribute1 "afr_shd_index_healer"
+
+#Now convert the replica volume to distribute again and make sure the shd is now stopped
+TEST $CLI volume remove-brick ${V0}_distribute1 replica 1 $H0:$B0/add/{2..3} force
+TEST rm -rf $B0/add/
+
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^0$" shd_count
+
+cleanup
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1708929
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=1708929
diff --git a/tests/basic/volume-snap-scheduler.t b/tests/basic/volume-snap-scheduler.t
new file mode 100644
index 00000000000..a638c5cc46a
--- /dev/null
+++ b/tests/basic/volume-snap-scheduler.t
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+TEST $CLI volume start $V0
+
+## Create, start and mount meta_volume as
+## snap_scheduler expects shared storage to be enabled.
+## This test is very basic in nature not creating any snapshot
+## and purpose is to validate snap scheduling commands.
+
+TEST $CLI volume create $META_VOL replica 3 $H0:$B0/${META_VOL}{1,2,3};
+TEST $CLI volume start $META_VOL
+TEST mkdir -p $META_MNT
+TEST glusterfs -s $H0 --volfile-id $META_VOL $META_MNT
+
+##function to check status
+function check_status_scheduler()
+{
+ local key=$1
+ snap_scheduler.py status | grep -F "$key" | wc -l
+}
+
+##Basic snap_scheduler command test init/enable/disable/list
+
+TEST snap_scheduler.py init
+
+TEST snap_scheduler.py enable
+
+EXPECT 1 check_status_scheduler "Enabled"
+
+TEST snap_scheduler.py disable
+
+EXPECT 1 check_status_scheduler "Disabled"
+
+TEST snap_scheduler.py list
+
+TEST $CLI volume stop $V0;
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/basic/volume-snapshot-xml.t b/tests/basic/volume-snapshot-xml.t
index 3ba25f4ddbb..ff63b54538d 100755
--- a/tests/basic/volume-snapshot-xml.t
+++ b/tests/basic/volume-snapshot-xml.t
@@ -1,13 +1,9 @@
#!/bin/bash
. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
. $(dirname $0)/../snapshot.rc
-function get-xml()
-{
- $CLI $1 --xml | xmllint --format - | grep $2 | sed 's/\(<"$2">\|<\/"$2">\)//g'
-}
-
cleanup;
TEST verify_lvm_version;
TEST glusterd;
diff --git a/tests/basic/volume-status.t b/tests/basic/volume-status.t
index a79e202d4ab..01d7ebf6c07 100644
--- a/tests/basic/volume-status.t
+++ b/tests/basic/volume-status.t
@@ -34,6 +34,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" nfs_up_status
## Mount FUSE
TEST $GFS -s $H0 --volfile-id $V0 $M0;
+TEST touch $M0/file{1..20}
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" gluster_fd_status
@@ -57,6 +58,8 @@ function test_nfs_cmds () {
for cmd in ${nfs_cmds[@]}; do
$CLI volume status $V0 nfs $cmd
(( ret += $? ))
+ $CLI volume status $V0 nfs $cmd --xml
+ (( ret += $? ))
done
return $ret
}
@@ -67,6 +70,8 @@ function test_shd_cmds () {
for cmd in ${shd_cmds[@]}; do
$CLI volume status $V0 shd $cmd
(( ret += $? ))
+ $CLI volume status $V0 shd $cmd --xml
+ (( ret += $? ))
done
return $ret
}
@@ -78,14 +83,29 @@ function test_brick_cmds () {
for i in {1..2}; do
$CLI volume status $V0 $H0:$B0/${V0}$i $cmd
(( ret += $? ))
+ $CLI volume status $V0 $H0:$B0/${V0}$i $cmd --xml
+ (( ret += $? ))
done
done
return $ret
}
+function test_status_cmds () {
+ local ret=0
+ declare -a cmds=("detail" "clients" "mem" "inode" "fd" "callpool" "tasks" "client-list")
+ for cmd in ${cmds[@]}; do
+ $CLI volume status $V0 $cmd
+ (( ret += $? ))
+ $CLI volume status $V0 $cmd --xml
+ (( ret += $? ))
+ done
+ return $ret
+}
+
TEST test_shd_cmds;
TEST test_nfs_cmds;
TEST test_brick_cmds;
+TEST test_status_cmds;
## Before killing daemon to avoid deadlocks
diff --git a/tests/basic/volume.t b/tests/basic/volume.t
index 35ad55c3c5c..27fe093d07d 100755..100644
--- a/tests/basic/volume.t
+++ b/tests/basic/volume.t
@@ -11,7 +11,6 @@ TEST $CLI volume info;
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
-
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
EXPECT '6' brick_count $V0
@@ -25,10 +24,37 @@ EXPECT '9' brick_count $V0
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}{1,2,3} force;
EXPECT '6' brick_count $V0
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume top $V0 read-perf bs 4096 count 1000
+TEST $CLI volume top $V0 write-perf bs 1048576 count 2
+
+TEST touch $M0/foo
+
+# statedump path should be a directory, setting it to a file path should fail
+
+TEST ! $CLI v set $V0 server.statedump-path $M0/foo;
+EXPECT '/var/run/gluster' $CLI v get $V0 server.statedump-path
+
+#set the statedump path to an existing ditectory which should succeed
+TEST mkdir $D0/level;
+TEST $CLI v set $V0 server.statedump-path $D0/level
+EXPECT '/level' volinfo_field $V0 'server.statedump-path'
+
+ret=$(ls $D0/level | wc -l);
+TEST [ $ret == 0 ]
+TEST $CLI v statedump $V0;
+ret=$(ls $D0/level | wc -l);
+TEST ! [ $ret == 0 ]
+
+#set the statedump path to a non - existing directory which should fail
+TEST ! $CLI v set $V0 server.statedump-path /root/test
+EXPECT '/level' volinfo_field $V0 'server.statedump-path'
+
+TEST rm -rf $D0/level
+
+TEST $CLI volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status'
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
+TEST $CLI volume delete $V0
+TEST ! $CLI volume info $V0
cleanup;
diff --git a/tests/bitrot/br-signer-threads-config-1797869.t b/tests/bitrot/br-signer-threads-config-1797869.t
new file mode 100644
index 00000000000..657ef3eedaf
--- /dev/null
+++ b/tests/bitrot/br-signer-threads-config-1797869.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../cluster.rc
+
+function get_bitd_count_1 {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H1 | wc -l
+}
+
+function get_bitd_count_2 {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H2 | wc -l
+}
+
+function get_bitd_pid_1 {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H1 | awk '{print $2}'
+}
+
+function get_bitd_pid_2 {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | grep $H2 | awk '{print $2}'
+}
+
+function get_signer_th_count_1 {
+ ps -eL | grep $(get_bitd_pid_1) | grep glfs_brpobj | wc -l
+}
+
+function get_signer_th_count_2 {
+ ps -eL | grep $(get_bitd_pid_2) | grep glfs_brpobj | wc -l
+}
+
+cleanup;
+
+TEST launch_cluster 2
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count;
+
+TEST $CLI_1 volume create $V0 $H1:$B1
+TEST $CLI_1 volume create $V1 $H2:$B2
+EXPECT 'Created' volinfo_field_1 $V0 'Status';
+EXPECT 'Created' volinfo_field_1 $V1 'Status';
+
+TEST $CLI_1 volume start $V0
+TEST $CLI_1 volume start $V1
+EXPECT 'Started' volinfo_field_1 $V0 'Status';
+EXPECT 'Started' volinfo_field_1 $V1 'Status';
+
+#Enable bitrot
+TEST $CLI_1 volume bitrot $V0 enable
+TEST $CLI_1 volume bitrot $V1 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count_2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_2
+
+old_bitd_pid_1=$(get_bitd_pid_1)
+old_bitd_pid_2=$(get_bitd_pid_2)
+TEST $CLI_1 volume bitrot $V0 signer-threads 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_signer_th_count_1
+EXPECT_NOT "$old_bitd_pid_1" get_bitd_pid_1;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "4" get_signer_th_count_2
+EXPECT "$old_bitd_pid_2" get_bitd_pid_2;
+
+old_bitd_pid_1=$(get_bitd_pid_1)
+old_bitd_pid_2=$(get_bitd_pid_2)
+TEST $CLI_1 volume bitrot $V1 signer-threads 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" get_signer_th_count_2
+EXPECT_NOT "$old_bitd_pid_2" get_bitd_pid_2;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_signer_th_count_1
+EXPECT "$old_bitd_pid_1" get_bitd_pid_1;
+
+cleanup;
diff --git a/tests/bitrot/bug-1373520.t b/tests/bitrot/bug-1373520.t
index 5c53ebb82e2..6af5124e86e 100644
--- a/tests/bitrot/bug-1373520.t
+++ b/tests/bitrot/bug-1373520.t
@@ -59,6 +59,9 @@ TEST rm -rf $(find $B0/${V0}5 -inum $(stat -c %i $B0/${V0}5/FILE1))
#New mount for recovery
TEST $GFS -s $H0 --use-readdirp=no --attribute-timeout=0 --entry-timeout=0 --volfile-id $V0 $M1
+$CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume heal $V0
+
#Access files
TEST cat $M1/FILE1
EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" path_size $B0/${V0}5/FILE1
diff --git a/tests/bitrot/bug-1700078.t b/tests/bitrot/bug-1700078.t
new file mode 100644
index 00000000000..f27374211fe
--- /dev/null
+++ b/tests/bitrot/bug-1700078.t
@@ -0,0 +1,87 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+
+## Lets create and start the volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}1
+TEST $CLI volume start $V0
+
+## Enable bitrot for volume $V0
+TEST $CLI volume bitrot $V0 enable
+
+## Turn off quick-read so that it wont cache the contents
+# of the file in lookup. For corrupted files, it might
+# end up in reads being served from the cache instead of
+# an error.
+TEST $CLI volume set $V0 performance.quick-read off
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Active' scrub_status $V0 'State of scrub'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '/var/log/glusterfs/bitd.log' scrub_status $V0 'Bitrot error log location'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '/var/log/glusterfs/scrub.log' scrub_status $V0 'Scrubber error log location'
+
+## Set expiry-timeout to 1 sec
+TEST $CLI volume set $V0 features.expiry-time 1
+
+##Mount $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+## Turn off quick-read xlator so that, the contents are not served from the
+# quick-read cache.
+TEST $CLI volume set $V0 performance.quick-read off
+
+#Create sample file
+TEST `echo "1234" > $M0/FILE1`
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' "/$B0/${V0}1/FILE1"
+
+##disable bitrot
+TEST $CLI volume bitrot $V0 disable
+
+## modify the file
+TEST `echo "write" >> $M0/FILE1`
+
+# unmount and remount when the file has to be accessed.
+# This is to ensure that, when the remount happens,
+# and the file is read, its contents are served from the
+# brick instead of cache.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+##enable bitrot
+TEST $CLI volume bitrot $V0 enable
+
+# expiry time is set to 1 second. Hence sleep for 2 seconds for the
+# oneshot crawler to finish its crawling and sign the file properly.
+sleep 2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Active' scrub_status $V0 'State of scrub'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '/var/log/glusterfs/bitd.log' scrub_status $V0 'Bitrot error log location'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT '/var/log/glusterfs/scrub.log' scrub_status $V0 'Scrubber error log location'
+
+## Ondemand scrub
+TEST $CLI volume bitrot $V0 scrub ondemand
+
+# the scrub ondemand CLI command, just ensures that
+# the scrubber has received the ondemand scrub directive
+# and started. sleep for 2 seconds for scrubber to finish
+# crawling and marking file(s) as bad (if if finds that
+# corruption has happened) which are filesystem operations.
+sleep 2
+
+TEST ! getfattr -n 'trusted.bit-rot.bad-file' $B0/${V0}1/FILE1
+
+##Mount $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+TEST cat $M0/FILE1
+
+cleanup;
diff --git a/tests/bugs/bitrot/bug-1227996.t b/tests/bugs/bitrot/bug-1227996.t
index 47ebc4235cf..121c7b5f279 100644
--- a/tests/bugs/bitrot/bug-1227996.t
+++ b/tests/bugs/bitrot/bug-1227996.t
@@ -17,7 +17,6 @@ TEST pidof glusterd;
## Lets create and start the volume
TEST $CLI volume create $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}1
TEST $CLI volume start $V0
-
## Enable bitrot on volume $V0
TEST $CLI volume bitrot $V0 enable
diff --git a/tests/bugs/bitrot/bug-1245981.t b/tests/bugs/bitrot/bug-1245981.t
index 2bed4d980fa..f3955256b01 100644
--- a/tests/bugs/bitrot/bug-1245981.t
+++ b/tests/bugs/bitrot/bug-1245981.t
@@ -47,9 +47,9 @@ touch $M0/5
sleep `expr $SLEEP_TIME \* 2`
backpath=$(get_backend_paths $fname)
-TEST getfattr -m . -n trusted.bit-rot.signature $backpath
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' $backpath
backpath=$(get_backend_paths $M0/new_file)
-TEST getfattr -m . -n trusted.bit-rot.signature $backpath
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'trusted.bit-rot.signature' check_for_xattr 'trusted.bit-rot.signature' $backpath
cleanup;
diff --git a/tests/bugs/bug-1064147.t b/tests/bugs/bug-1064147.t
new file mode 100755
index 00000000000..27ffde4eb44
--- /dev/null
+++ b/tests/bugs/bug-1064147.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+#------------------------------------------------------------
+
+# Test case 1 - Subvolume down + Healing
+#------------------------------------------------------------
+# Kill 2nd brick process
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "1" online_brick_count
+
+# Change root permissions
+TEST chmod 444 $M0
+
+# Store permission for comparision
+TEST permission_new=`stat -c "%A" $M0`
+
+# Bring up the killed brick process
+TEST $CLI volume start $V0 force
+
+# Perform lookup
+sleep 5
+TEST ls $M0
+
+# Check brick permissions
+TEST brick_perm=`stat -c "%A" $B0/${V0}2`
+TEST [ ${brick_perm} = ${permission_new} ]
+#------------------------------------------------------------
+
+# Test case 2 - Add-brick + Healing
+#------------------------------------------------------------
+# Change root permissions
+TEST chmod 777 $M0
+
+# Store permission for comparision
+TEST permission_new_2=`stat -c "%A" $M0`
+
+# Add a 3rd brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
+
+# Perform lookup
+sleep 5
+TEST ls $M0
+
+# Check permissions on the new brick
+TEST brick_perm2=`stat -c "%A" $B0/${V0}3`
+
+TEST [ ${brick_perm2} = ${permission_new_2} ]
+
+cleanup;
diff --git a/tests/bugs/bug-1371806.t b/tests/bugs/bug-1371806.t
index 7dc1613a4f2..08180525650 100644
--- a/tests/bugs/bug-1371806.t
+++ b/tests/bugs/bug-1371806.t
@@ -28,6 +28,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume start $V0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
diff --git a/tests/bugs/bug-1371806_acl.t b/tests/bugs/bug-1371806_acl.t
index aa41e04b96f..c39165628cc 100644
--- a/tests/bugs/bug-1371806_acl.t
+++ b/tests/bugs/bug-1371806_acl.t
@@ -39,6 +39,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
TEST $CLI volume start $V0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "6" online_brick_count
TEST glusterfs --volfile-id=$V0 --acl --volfile-server=$H0 --entry-timeout=0 $M0;
@@ -77,6 +78,10 @@ cd $M0
## was up at the time of updated xattrs
TEST stat ./tmp{1..10}
+# Make sure to send a write and read on the file inside mount
+echo "helloworld" > ./tmp1/file
+TEST cat ./tmp1/file
+
## Compare succ value with updated acl attributes
count=`getfacl -p $B0/${V0}5/tmp{1..10} | grep -c "user:tmpuser:rw-"`
EXPECT "$succ" echo $count
@@ -87,4 +92,5 @@ EXPECT "$succ1" echo $count
cd -
userdel --force tmpuser
+
cleanup
diff --git a/tests/bugs/bug-1620580.t b/tests/bugs/bug-1620580.t
new file mode 100644
index 00000000000..0c74d4a6089
--- /dev/null
+++ b/tests/bugs/bug-1620580.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+## Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+## Lets create volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+
+## Verify volume is created
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs -s $H0 --volfile-id=$V0 $M0
+
+#do some operation on mount, so that kill_brick is guaranteed to be
+#done _after_ first lookup on root
+
+TEST ls $M0
+TEST touch $M0/file
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+# Case of Same volume name, but different bricks
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{3,4};
+TEST $CLI volume start $V0;
+
+# Give time for 'reconnect' to happen
+sleep 4
+
+TEST ! ls $M0
+TEST ! touch $M0/file1
+
+# Case of Same brick, but different volume (ie, recreated).
+TEST $CLI volume create $V1 $H0:$B0/${V0}{1,2};
+TEST $CLI volume start $V1;
+
+# Give time for 'reconnect' to happen
+sleep 4
+TEST ! ls $M0
+TEST ! touch $M0/file2
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+TEST $CLI volume stop $V1
+TEST $CLI volume delete $V1
+
+# Case of Same brick, but different volume (but same volume name)
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+TEST $CLI volume start $V0;
+
+# Give time for 'reconnect' to happen
+sleep 4
+TEST ! ls $M0
+TEST ! touch $M0/file3
+
+
+cleanup
diff --git a/tests/bugs/bug-1694920.t b/tests/bugs/bug-1694920.t
new file mode 100644
index 00000000000..5bf93c92f94
--- /dev/null
+++ b/tests/bugs/bug-1694920.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=300
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../fileio.rc
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0};
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume start $V0
+TEST $GFS -s $H0 --volfile-id=$V0 $M0;
+
+TEST touch $M0/a
+
+#When all bricks are up, lock and unlock should succeed
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST fd_close $fd1
+
+#When all bricks are down, lock/unlock should fail
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST $CLI volume stop $V0
+TEST ! flock -x $fd1
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST fd_close $fd1
+
+#When a brick goes down and comes back up operations on fd which had locks on it should succeed by default
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST $CLI volume stop $V0
+sleep 2
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST fd_write $fd1 "data"
+TEST fd_close $fd1
+
+#When a brick goes down and comes back up operations on fd which had locks on it should fail when client.strict-locks is on
+TEST $CLI volume set $V0 client.strict-locks on
+TEST fd1=`fd_available`
+TEST fd_open $fd1 'w' $M0/a
+TEST flock -x $fd1
+TEST $CLI volume stop $V0
+sleep 2
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" client_connected_status_meta $M0 $V0-client-0
+TEST ! fd_write $fd1 "data"
+TEST fd_close $fd1
+
+cleanup
diff --git a/tests/bugs/bug-1702299.t b/tests/bugs/bug-1702299.t
new file mode 100644
index 00000000000..1cff2ed5d3d
--- /dev/null
+++ b/tests/bugs/bug-1702299.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../dht.rc
+cleanup;
+
+function get_getfattr {
+ local path=$1
+ echo `getfattr -n user.foo $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+function set_fattr {
+ for i in `seq 1 10`
+ do
+ setfattr -n user.foo -v "newabc" ./tmp${i}
+ if [ "$?" = "0" ]
+ then
+ succ=$((succ+1))
+ else
+ fail=$((fail+1))
+ fi
+ done
+}
+
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 --attribute-timeout=0 $M0;
+
+cd $M0
+TEST mkdir tmp{1..10}
+
+succ=fail=0
+## set user.foo xattr with value newabc after kill one brick
+set_fattr
+count=10
+EXPECT "$succ" echo $count
+count=0
+EXPECT "$fail" echo $count
+
+cd -
+
+# Add-brick
+TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{4,5}
+
+cd $M0
+## At this point dht code will heal xattr on down brick only for those dirs
+## hashed subvol was up at the time of update xattr
+TEST stat ./tmp{1..10}
+
+
+## Count the user.foo xattr value with newabc on brick and compare with succ value
+count=`getfattr -n user.foo $B0/${V0}4/tmp{1..10} | grep "user.foo" | grep -iw "newabc" | wc -l`
+EXPECT "$succ" echo $count
+
+## Count the user.foo xattr value with newabc on brick and compare with succ value
+count=`getfattr -n user.foo $B0/${V0}5/tmp{1..10} | grep "user.foo" | grep -iw "newabc" | wc -l`
+EXPECT "$succ" echo $count
+
+
+cd -
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/cli/bug-1320388.t b/tests/bugs/cli/bug-1320388.t
index f5ffcbe082a..e719fc59033 100755
--- a/tests/bugs/cli/bug-1320388.t
+++ b/tests/bugs/cli/bug-1320388.t
@@ -21,7 +21,7 @@ cleanup;
rm -f $SSL_BASE/glusterfs.*
touch "$GLUSTERD_WORKDIR"/secure-access
-TEST openssl genrsa -out $SSL_KEY 1024
+TEST openssl genrsa -out $SSL_KEY 2048
TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
ln $SSL_CERT $SSL_CA
diff --git a/tests/bugs/cli/bug-1378842-volume-get-all.t b/tests/bugs/cli/bug-1378842-volume-get-all.t
index c798ce5ceff..be41f25b000 100644
--- a/tests/bugs/cli/bug-1378842-volume-get-all.t
+++ b/tests/bugs/cli/bug-1378842-volume-get-all.t
@@ -12,9 +12,6 @@ TEST $CLI volume set all server-quorum-ratio 80
# Execute volume get without having an explicit option, this should fail
TEST ! $CLI volume get all
-# Also volume get on an option not applicable for all volumes should fail
-TEST ! $CLI volume get all cluster.tier-mode
-
# Execute volume get with an explicit global option
TEST $CLI volume get all server-quorum-ratio
EXPECT '80' volume_get_field all 'cluster.server-quorum-ratio'
diff --git a/tests/bugs/cli/bug-983317-volume-get.t b/tests/bugs/cli/bug-983317-volume-get.t
index 8f09d588565..c793bbc9f0c 100644
--- a/tests/bugs/cli/bug-983317-volume-get.t
+++ b/tests/bugs/cli/bug-983317-volume-get.t
@@ -7,7 +7,8 @@ cleanup;
TEST glusterd
TEST pidof glusterd
-TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
+EXPECT 'Created' volinfo_field $V0 'Status';
# Set a volume option
TEST $CLI volume set $V0 open-behind on
@@ -32,3 +33,13 @@ EXPECT '80' volume_get_field $V0 'server-quorum-ratio'
# Check user.* options can also be retrived using volume get
EXPECT 'dummy' volume_get_field $V0 'user.metadata'
+
+TEST $CLI volume set all brick-multiplex enable
+EXPECT 'enable' volume_get_field $V0 'brick-multiplex'
+
+TEST $CLI volume set all brick-multiplex disable
+EXPECT 'disable' volume_get_field $V0 'brick-multiplex'
+
+#setting an cluster level option for single volume should fail
+TEST ! $CLI volume set $V0 brick-multiplex enable
+
diff --git a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
index 6351ba22511..a1b9a851bf7 100755
--- a/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
+++ b/tests/bugs/core/bug-1402841.t-mt-dir-scan-race.t
@@ -3,6 +3,8 @@
. $(dirname $0)/../../volume.rc
cleanup;
+FILE_COUNT=500
+
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
@@ -11,15 +13,14 @@ TEST $CLI volume set $V0 cluster.shd-wait-qlength 100
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
-touch $M0/file{1..200}
-
+for i in `seq 1 $FILE_COUNT`; do touch $M0/file$i; done
TEST kill_brick $V0 $H0 $B0/${V0}1
-for i in {1..200}; do echo hello>$M0/file$i; done
+for i in `seq 1 $FILE_COUNT`; do echo hello>$M0/file$i; chmod -x $M0/file$i; done
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-EXPECT "200" get_pending_heal_count $V0
+EXPECT "$FILE_COUNT" get_pending_heal_count $V0
TEST $CLI volume set $V0 self-heal-daemon on
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
diff --git a/tests/bugs/core/bug-1650403.t b/tests/bugs/core/bug-1650403.t
index dadf9ddcc4c..43d09bc8bd9 100644
--- a/tests/bugs/core/bug-1650403.t
+++ b/tests/bugs/core/bug-1650403.t
@@ -88,7 +88,8 @@ for i in $(seq 1 $NUM_VOLS); do
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $mnt_point
done
-glustershd_pid=$(cat $GLUSTERD_PIDFILEDIR/glustershd/glustershd.pid || echo -1);
+glustershd_pid=`ps auxwww | grep glustershd | grep -v grep | awk -F " " '{print $2}'`
+TEST [ $glustershd_pid != 0 ]
start=`pmap -x $glustershd_pid | grep total | awk -F " " '{print $4}'`
echo "Memory consumption for glustershd process"
for i in $(seq 1 50); do
diff --git a/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t b/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t
new file mode 100644
index 00000000000..1acbaa8dc0b
--- /dev/null
+++ b/tests/bugs/core/bug-1699025-brick-mux-detach-brick-fd-issue.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+cleanup
+
+#bug-1444596 - validating brick mux
+
+TEST glusterd
+TEST $CLI volume create $V0 $H0:$B0/brick{0,1}
+TEST $CLI volume create $V1 $H0:$B0/brick{2,3}
+
+TEST $CLI volume set all cluster.brick-multiplex on
+
+TEST $CLI volume start $V0
+TEST $CLI volume start $V1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
+EXPECT 1 count_brick_processes
+
+TEST $CLI volume stop $V1
+# At the time initialize brick daemon it always keeps open
+# standard fd's (0, 1 , 2) so after stop 1 volume fd's should
+# be open
+nofds=$(ls -lrth /proc/`pgrep glusterfsd`/fd | grep dev/null | wc -l)
+TEST [ $((nofds)) -eq 3 ]
+
+cleanup
diff --git a/tests/bugs/ctime/issue-832.t b/tests/bugs/ctime/issue-832.t
new file mode 100755
index 00000000000..740f731ab73
--- /dev/null
+++ b/tests/bugs/ctime/issue-832.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../traps.rc
+
+#Trigger trusted.glusterfs.mdata setting codepath and see things work as expected
+cleanup
+
+TEST_USER=test-ctime-user
+TEST_UID=27341
+
+TEST useradd -o -M -u ${TEST_UID} ${TEST_USER}
+push_trapfunc "userdel --force ${TEST_USER}"
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+TEST $CLI volume start $V0
+
+$GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+echo abc > $M0/test
+TEST chmod 755 $M0/
+TEST chmod 744 $M0/test
+TEST setfattr -x trusted.glusterfs.mdata $B0/$V0/test
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+$GFS --volfile-id=/$V0 --volfile-server=$H0 $M0;
+su ${TEST_USER} -c "cat $M0/test"
+TEST getfattr -n trusted.glusterfs.mdata $B0/$V0/test
+
+cleanup
diff --git a/tests/bugs/distribute/bug-1161311.t b/tests/bugs/distribute/bug-1161311.t
index d88642edc32..62796068928 100755
--- a/tests/bugs/distribute/bug-1161311.t
+++ b/tests/bugs/distribute/bug-1161311.t
@@ -1,5 +1,7 @@
#!/bin/bash
+SCRIPT_TIMEOUT=350
+
# This tests for hard link preservation for files that are linked, when the
# file is undergoing migration
@@ -74,14 +76,12 @@ TEST glusterfs -s $H0 --volfile-id $V0 $M0;
TEST mkdir $M0/dir1
TEST mkdir -p $M0/dir2/dir3
-# Create a large file (6.4 GB), so that rebalance takes time
-# Reading from /dev/urandom is slow, so we'll cat it together
-dd if=/dev/urandom of=/tmp/FILE2 bs=64k count=10240
-for i in {1..10}; do
- cat /tmp/FILE2 >> $M0/dir1/FILE2
-done
-
-#dd if=/dev/urandom of=$M0/dir1/FILE2 bs=64k count=10240
+# Create a large file (8 GB), so that rebalance takes time
+# Since we really don't care about the contents of the file, we use fallocate
+# to generate the file much faster. We could also use truncate, which is even
+# faster, but rebalance could take advantage of an sparse file and migrate it
+# in an optimized way, but we don't want a fast migration.
+TEST fallocate -l 8G $M0/dir1/FILE2
# Rename the file to create a linkto, for rebalance to
# act on the file
@@ -89,6 +89,8 @@ done
## into separate bricks when brick count is 3
TEST mv $M0/dir1/FILE2 $M0/dir1/FILE1
+brick_loc=$(get_backend_paths $M0/dir1/FILE1)
+
# unmount and remount the volume
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST glusterfs -s $H0 --volfile-id $V0 $M0;
@@ -98,7 +100,7 @@ TEST $CLI volume rebalance $V0 start force
# Wait for FILE to get the sticky bit on, so that file is under
# active rebalance, before creating the links
-TEST checksticky $B0/${V0}3/dir1/FILE1
+TEST checksticky $brick_loc
# Create the links
## FILE3 FILE5 FILE7 have hashes, c8c91469 566d26ce 22ce7eba
@@ -119,7 +121,7 @@ cd /
# Ideally for this test to have done its job, the file should still be
# under migration, so check the sticky bit again
-TEST checksticky $B0/${V0}3/dir1/FILE1
+TEST checksticky $brick_loc
# Wait for rebalance to complete
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field $V0
@@ -152,6 +154,11 @@ TEST ln ./dir1/FILE7 ./FILE7
cd /
linkcountsrc=$(stat -c %h $M0/dir1/FILE1)
TEST [[ $linkcountsrc == 14 ]]
+
+
+# Stop the volume
+TEST $CLI volume stop $V0;
+
UMOUNT_LOOP ${B0}/${V0}{1..3}
rm -f ${B0}/brick{1..3}
cleanup;
diff --git a/tests/bugs/distribute/bug-1193636.t b/tests/bugs/distribute/bug-1193636.t
index ccde02edc70..b377910336e 100644
--- a/tests/bugs/distribute/bug-1193636.t
+++ b/tests/bugs/distribute/bug-1193636.t
@@ -41,11 +41,13 @@ dd if=/dev/zero of=$M0/dir1/FILE2 bs=64k count=10240
# act on the file
TEST mv $M0/dir1/FILE2 $M0/dir1/FILE1
+brick_loc=$(get_backend_paths $M0/dir1/FILE1)
+
build_tester $(dirname $0)/bug-1193636.c
TEST $CLI volume rebalance $V0 start force
-TEST checksticky $B0/${V0}3/dir1/FILE1
+TEST checksticky $brick_loc
TEST setfattr -n "user.test1" -v "test1" $M0/dir1/FILE1
TEST setfattr -n "user.test2" -v "test1" $M0/dir1/FILE1
diff --git a/tests/bugs/distribute/bug-1600379.t b/tests/bugs/distribute/bug-1600379.t
new file mode 100644
index 00000000000..8d2f6154100
--- /dev/null
+++ b/tests/bugs/distribute/bug-1600379.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# Initialize
+#------------------------------------------------------------
+cleanup;
+
+# Start glusterd
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+# Create a volume
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
+
+# Verify volume creation
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+# Start volume and verify successful start
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+#------------------------------------------------------------
+
+# Test case - Remove xattr from killed brick on lookup
+#------------------------------------------------------------
+# Create a dir and set custom xattr
+TEST mkdir $M0/testdir
+TEST setfattr -n user.attr -v val $M0/testdir
+xattr_val=`getfattr -d $B0/${V0}2/testdir | awk '{print $1}'`;
+TEST ${xattr_val}='user.attr="val"';
+
+# Kill 2nd brick process
+TEST kill_brick $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "1" online_brick_count
+
+# Remove custom xattr
+TEST setfattr -x user.attr $M0/testdir
+
+# Bring up the killed brick process
+TEST $CLI volume start $V0 force
+
+# Perform lookup
+sleep 5
+TEST ls $M0/testdir
+
+# Check brick xattrs
+xattr_val_2=`getfattr -d $B0/${V0}2/testdir`;
+TEST [ ${xattr_val_2} = ''] ;
+
+cleanup;
diff --git a/tests/bugs/distribute/bug-1786679.t b/tests/bugs/distribute/bug-1786679.t
new file mode 100755
index 00000000000..219ce51c8a9
--- /dev/null
+++ b/tests/bugs/distribute/bug-1786679.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=250
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+
+# create 2 subvols
+# create a dir
+# create a file
+# change layout
+# remove the file
+# execute create from a different mount
+# Without the patch, the file will be present on both of the bricks
+
+cleanup
+
+function get_layout () {
+
+layout=`getfattr -n trusted.glusterfs.dht -e hex $1 2>&1 | grep dht | gawk -F"=" '{print $2}'`
+
+echo $layout
+
+}
+
+function set_layout()
+{
+ setfattr -n "trusted.glusterfs.dht" -v $1 $2
+}
+
+TEST glusterd
+TEST pidof glusterd
+
+BRICK1=$B0/${V0}-0
+BRICK2=$B0/${V0}-1
+
+TEST $CLI volume create $V0 $H0:$BRICK1 $H0:$BRICK2
+TEST $CLI volume start $V0
+
+# Mount FUSE and create symlink
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+TEST ! stat "$BRICK1/dir/file"
+TEST stat "$BRICK2/dir/file"
+
+layout1="$(get_layout "$BRICK1/dir")"
+layout2="$(get_layout "$BRICK2/dir")"
+
+TEST set_layout $layout1 "$BRICK2/dir"
+TEST set_layout $layout2 "$BRICK1/dir"
+
+TEST rm $M0/dir/file -f
+TEST gluster v set $V0 client-log-level DEBUG
+
+#Without the patch in place, this client will create the file in $BRICK2
+#which will lead to two files being on both the bricks when a new client
+#create the file with the same name
+TEST touch $M0/dir/file
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M1
+TEST touch $M1/dir/file
+
+TEST stat "$BRICK1/dir/file"
+TEST ! stat "$BRICK2/dir/file"
+
+cleanup
diff --git a/tests/bugs/distribute/issue-1327.t b/tests/bugs/distribute/issue-1327.t
new file mode 100755
index 00000000000..acd8c8c6614
--- /dev/null
+++ b/tests/bugs/distribute/issue-1327.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+SCRIPT_TIMEOUT=250
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../dht.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+BRICK1=$B0/${V0}-0
+BRICK2=$B0/${V0}-1
+
+TEST $CLI volume create $V0 $H0:$BRICK1 $H0:$BRICK2
+TEST $CLI volume start $V0
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST mkdir $M0/dir
+
+#remove dir from one of the brick
+TEST rmdir $BRICK2/dir
+
+#safe cache timeout for lookup to be triggered
+sleep 2
+
+TEST ls $M0/dir
+
+TEST stat $BRICK2/dir
+
+cleanup
diff --git a/tests/bugs/distribute/overlap.py b/tests/bugs/distribute/overlap.py
index 0941d377624..2813979787b 100755
--- a/tests/bugs/distribute/overlap.py
+++ b/tests/bugs/distribute/overlap.py
@@ -17,7 +17,7 @@ def calculate_one (ov, nv):
def calculate_all (values):
total = 0
- nv_index = len(values) / 2
+ nv_index = len(values) // 2
for old_val in values[:nv_index]:
new_val = values[nv_index]
nv_index += 1
diff --git a/tests/bugs/ec/bug-1236065.t b/tests/bugs/ec/bug-1236065.t
index 76d25d739fa..9181e73ec19 100644
--- a/tests/bugs/ec/bug-1236065.t
+++ b/tests/bugs/ec/bug-1236065.t
@@ -85,7 +85,6 @@ TEST pidof glusterd
EXPECT "$V0" volinfo_field $V0 'Volume Name'
EXPECT 'Started' volinfo_field $V0 'Status'
EXPECT '7' online_brick_count
-
## cleanup
cd
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
diff --git a/tests/bugs/ec/bug-1699866-check-reopen-fd.t b/tests/bugs/ec/bug-1699866-check-reopen-fd.t
new file mode 100644
index 00000000000..4386d010318
--- /dev/null
+++ b/tests/bugs/ec/bug-1699866-check-reopen-fd.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume set $V0 disperse.background-heals 0
+TEST $CLI volume set $V0 write-behind off
+TEST $CLI volume set $V0 open-behind off
+TEST $CLI volume start $V0
+TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+
+TEST mkdir -p $M0/dir
+
+fd="$(fd_available)"
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "5" ec_child_up_count $V0 0
+
+TEST fd_open ${fd} rw $M0/dir/test
+TEST fd_write ${fd} "test1"
+TEST $CLI volume replace-brick ${V0} $H0:$B0/${V0}0 $H0:$B0/${V0}0_1 commit force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0
+TEST fd_write ${fd} "test2"
+TEST fd_close ${fd}
+
+cleanup
diff --git a/tests/bugs/ec/bug-1708156-honor-inodelk-contention-notify-on-partial-locks.t b/tests/bugs/ec/bug-1708156-honor-inodelk-contention-notify-on-partial-locks.t
new file mode 100644
index 00000000000..67fdb184b46
--- /dev/null
+++ b/tests/bugs/ec/bug-1708156-honor-inodelk-contention-notify-on-partial-locks.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function do_ls() {
+ local dir="${1}"
+ local i
+
+ for i in {1..50}; do
+ ls -l $M0/${dir} >/dev/null &
+ ls -l $M1/${dir} >/dev/null &
+ ls -l $M2/${dir} >/dev/null &
+ ls -l $M3/${dir} >/dev/null &
+ done
+ wait
+}
+
+function measure_time() {
+ {
+ LC_ALL=C
+ time -p "${@}"
+ } 2>&1 | awk '/^real/ { print $2 * 1000 }'
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 disperse 6 redundancy 2 $H0:$B0/${V0}{0..5}
+
+TEST $CLI volume set $V0 disperse.eager-lock on
+TEST $CLI volume set $V0 disperse.other-eager-lock on
+TEST $CLI volume set $V0 features.locks-notify-contention on
+TEST $CLI volume set $V0 disperse.eager-lock-timeout 10
+TEST $CLI volume set $V0 disperse.other-eager-lock-timeout 10
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M1
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 $M1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 $M2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 $M3
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file.{1..10}
+
+# Run multiple 'ls' concurrently from multiple clients so that they collide and
+# cause partial locks.
+TEST [[ $(measure_time do_ls dir) -lt 10000 ]]
+
+cleanup
diff --git a/tests/bugs/fuse/bug-858215.t b/tests/bugs/fuse/bug-858215.t
index d2719a6e1d4..95999f6ad24 100755
--- a/tests/bugs/fuse/bug-858215.t
+++ b/tests/bugs/fuse/bug-858215.t
@@ -41,8 +41,8 @@ TEST stat $M0/newfile;
TEST rm $M0/newfile;
nfs_pid=$(cat $GLUSTERD_PIDFILEDIR/nfs/nfs.pid || echo -1);
-glustershd_pid=$(cat $GLUSTERD_PIDFILEDIR/glustershd/glustershd.pid || echo -1);
-
+glustershd_pid=`ps auxwww | grep glustershd | grep -v grep | awk -F " " '{print $2}'`
+TEST [ $glustershd_pid != 0 ];
pids=$(pidof glusterfs);
for i in $pids
do
diff --git a/tests/bugs/fuse/bug-985074.t b/tests/bugs/fuse/bug-985074.t
index d10fd9f8b41..ffa6df54144 100644
--- a/tests/bugs/fuse/bug-985074.t
+++ b/tests/bugs/fuse/bug-985074.t
@@ -30,7 +30,7 @@ TEST glusterd
TEST $CLI volume create $V0 $H0:$B0/$V0
TEST $CLI volume start $V0
-TEST $CLI volume set $V0 md-cache-timeout 3
+TEST $CLI volume set $V0 performance.stat-prefetch off
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --entry-timeout=0 --attribute-timeout=0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1 --entry-timeout=0 --attribute-timeout=0
@@ -40,8 +40,7 @@ TEST ln $M0/file $M0/file.link
TEST ls -ali $M0 $M1
TEST rm -f $M1/file.link
TEST ls -ali $M0 $M1
-# expire the md-cache timeout
-sleep 3
+
TEST mv $M0/file $M0/file.link
TEST stat $M0/file.link
TEST ! stat $M0/file
diff --git a/tests/bugs/fuse/many-groups-for-acl.t b/tests/bugs/fuse/many-groups-for-acl.t
index d959f750ee0..a51b1bc7267 100755
--- a/tests/bugs/fuse/many-groups-for-acl.t
+++ b/tests/bugs/fuse/many-groups-for-acl.t
@@ -38,6 +38,13 @@ do
done
TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -G ${NEW_USER}-${NEW_GIDS} ${NEW_USER}
+# Linux < 3.8 exports only first 32 gids of pid to userspace
+kernel_exports_few_gids=0
+if [ "$OSTYPE" = Linux ] && \
+ su -m ${NEW_USER} -c "grep ^Groups: /proc/self/status | wc -w | xargs -I@ expr @ - 1 '<' $LAST_GID - $NEW_GID + 1" > /dev/null; then
+ kernel_exports_few_gids=1
+fi
+
# preparation done, start the tests
TEST glusterd
@@ -48,6 +55,8 @@ TEST $CLI volume set $V0 nfs.disable off
TEST $CLI volume set ${V0} server.manage-gids off
TEST $CLI volume start ${V0}
+# This is just a synchronization hack to make sure the bricks are
+# up before going on.
EXPECT_WITHIN ${NFS_EXPORT_TIMEOUT} "1" is_nfs_export_available
# mount the volume with POSIX ACL support, without --resolve-gids
@@ -69,8 +78,8 @@ TEST [ $? -eq 0 ]
su -m ${NEW_USER} -c "touch ${M0}/first-32-gids-2/success > /dev/null"
TEST [ $? -eq 0 ]
-su -m ${NEW_USER} -c "touch ${M0}/gid-64/failure > /dev/null"
-TEST [ $? -ne 0 ]
+su -m ${NEW_USER} -c "touch ${M0}/gid-64/success--if-all-gids-exported > /dev/null"
+TEST [ $? -eq $kernel_exports_few_gids ]
su -m ${NEW_USER} -c "touch ${M0}/gid-120/failure > /dev/null"
TEST [ $? -ne 0 ]
diff --git a/tests/bugs/gfapi/bug-1319374.c b/tests/bugs/gfapi/bug-1319374.c
index bd80462e3ba..ea0dfb6b0f2 100644
--- a/tests/bugs/gfapi/bug-1319374.c
+++ b/tests/bugs/gfapi/bug-1319374.c
@@ -3,6 +3,7 @@
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#define NO_INIT 1
diff --git a/tests/bugs/gfapi/bug-1447266/bug-1447266.t b/tests/bugs/gfapi/bug-1447266/bug-1447266.t
index 2bf72f8c6d7..45547f4f0e7 100644
--- a/tests/bugs/gfapi/bug-1447266/bug-1447266.t
+++ b/tests/bugs/gfapi/bug-1447266/bug-1447266.t
@@ -56,5 +56,5 @@ TEST ! $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/.././snap3"
TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/../."
TEST $(dirname $0)/bug-1447266 $V0 $H0 "/.snaps/./snap1/./../snap1/dir/."
-cleanup_tester $(dirname $0)/bug-1319374
+cleanup_tester $(dirname $0)/bug-1447266
cleanup;
diff --git a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
index 4e570381701..b6af487a791 100644
--- a/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
+++ b/tests/bugs/glusterd/brick-mux-validation-in-cluster.t
@@ -7,6 +7,20 @@ function count_brick_processes {
pgrep glusterfsd | wc -l
}
+function count_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -v "N/A" | sort | uniq | wc -l
+}
+
+function count_N/A_brick_pids {
+ $CLI_1 --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
+ | grep -- '\-1' | sort | uniq | wc -l
+}
+
+function check_peers {
+ $CLI_2 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
cleanup;
TEST launch_cluster 3
@@ -48,4 +62,47 @@ TEST $CLI_1 volume stop $V1
EXPECT 3 count_brick_processes
-cleanup
+TEST $CLI_1 volume stop $META_VOL
+
+TEST $CLI_1 volume delete $META_VOL
+TEST $CLI_1 volume delete $V0
+TEST $CLI_1 volume delete $V1
+
+#bug-1773856 - Brick process fails to come up with brickmux on
+
+TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 $H3:$B3/${V0}1 force
+TEST $CLI_1 volume start $V0
+
+
+EXPECT 3 count_brick_processes
+
+#create and start a new volume
+TEST $CLI_1 volume create $V1 $H1:$B1/${V1}2 $H2:$B2/${V1}2 $H3:$B3/${V1}2 force
+TEST $CLI_1 volume start $V1
+
+EXPECT 3 count_brick_processes
+
+V2=patchy2
+TEST $CLI_1 volume create $V2 $H1:$B1/${V2}3 $H2:$B2/${V2}3 $H3:$B3/${V2}3 force
+TEST $CLI_1 volume start $V2
+
+EXPECT 3 count_brick_processes
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 count_brick_pids
+
+TEST kill_node 1
+
+sleep 10
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers;
+
+$CLI_2 volume set $V0 performance.readdir-ahead on
+$CLI_2 volume set $V1 performance.readdir-ahead on
+
+TEST $glusterd_1;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_N/A_brick_pids
+
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux-validation.t b/tests/bugs/glusterd/brick-mux-validation.t
index 03a476823ca..61b0455f9a8 100644
--- a/tests/bugs/glusterd/brick-mux-validation.t
+++ b/tests/bugs/glusterd/brick-mux-validation.t
@@ -24,7 +24,7 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}
TEST $CLI volume start $V0
EXPECT 1 count_brick_processes
-EXPECT 1 count_brick_pids
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
pkill gluster
@@ -101,4 +101,4 @@ TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
EXPECT 1 count_brick_processes
-cleanup; \ No newline at end of file
+cleanup;
diff --git a/tests/bugs/glusterd/brick-mux.t b/tests/bugs/glusterd/brick-mux.t
index eeaa3ebfea8..927940534c1 100644
--- a/tests/bugs/glusterd/brick-mux.t
+++ b/tests/bugs/glusterd/brick-mux.t
@@ -39,7 +39,7 @@ TEST glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 online_brick_count
EXPECT 1 count_brick_processes
-TEST $CLI volume set $V1 performance.cache-size 32MB
+TEST $CLI volume set $V1 performance.io-cache-size 32MB
TEST $CLI volume stop $V1
TEST $CLI volume start $V1
diff --git a/tests/bugs/glusterd/brick-order-check-add-brick.t b/tests/bugs/glusterd/brick-order-check-add-brick.t
new file mode 100644
index 00000000000..0be31dac768
--- /dev/null
+++ b/tests/bugs/glusterd/brick-order-check-add-brick.t
@@ -0,0 +1,61 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+TEST verify_lvm_version;
+#Create cluster with 3 nodes
+TEST launch_cluster 3 -NO_DEBUG -NO_FORCE
+TEST setup_lvm 3
+
+TEST $CLI_1 peer probe $H2
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TEST $CLI_1 volume create $V0 replica 3 $H1:$L1/$V0 $H2:$L2/$V0 $H3:$L3/$V0
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#add-brick with or without mentioning the replica count should not fail
+TEST $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}_1 $H2:$L2/${V0}_1 $H3:$L3/${V0}_1
+EXPECT '2 x 3 = 6' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_2 $H2:$L2/${V0}_2 $H3:$L3/${V0}_2
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5
+EXPECT '3 x 3 = 9' volinfo_field $V0 'Number of Bricks'
+
+#adding bricks from same host with force should succeed
+TEST $CLI_1 volume add-brick $V0 $H1:$L1/${V0}_3 $H1:$L1/${V0}_4 $H1:$L1/${V0}_5 force
+EXPECT '4 x 3 = 12' volinfo_field $V0 'Number of Bricks'
+
+TEST $CLI_1 volume stop $V0
+TEST $CLI_1 volume delete $V0
+
+TEST $CLI_1 volume create $V0 replica 2 $H1:$L1/${V0}1 $H2:$L2/${V0}1
+EXPECT '1 x 2 = 2' volinfo_field $V0 'Number of Bricks'
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+#Add-brick with Increasing replica count
+TEST $CLI_1 volume add-brick $V0 replica 3 $H3:$L3/${V0}1
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+#Add-brick with Increasing replica count from same host should fail
+TEST ! $CLI_1 volume add-brick $V0 replica 5 $H1:$L1/${V0}2 $H1:$L1/${V0}3
+
+#adding multiple bricks from same host should fail the brick order check
+TEST ! $CLI_1 volume add-brick $V0 replica 3 $H1:$L1/${V0}{4..6} $H2:$L2/${V0}{7..9}
+EXPECT '1 x 3 = 3' volinfo_field $V0 'Number of Bricks'
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1595320.t b/tests/bugs/glusterd/bug-1595320.t
index 3a289f386de..c10e11821a1 100644
--- a/tests/bugs/glusterd/bug-1595320.t
+++ b/tests/bugs/glusterd/bug-1595320.t
@@ -48,7 +48,7 @@ b2_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-2*.pid)
b3_pid_file=$(ls $GLUSTERD_PIDFILEDIR/vols/$V0/*d-backends-3*.pid)
kill -9 $brick_pid
-EXPECT 0 count_brick_processes
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 0 count_brick_processes
# Unmount 3rd brick root from node
brick_root=$L3
diff --git a/tests/bugs/glusterd/bug-1696046.t b/tests/bugs/glusterd/bug-1696046.t
new file mode 100644
index 00000000000..e1c1eb2ceb9
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1696046.t
@@ -0,0 +1,113 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function count_up_bricks {
+ $CLI --xml volume status $1 | grep '<status>1' | wc -l
+}
+
+function count_brick_processes {
+ pgrep glusterfsd | wc -l
+}
+
+logdir=`gluster --print-logdir`
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume set all cluster.brick-multiplex on
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3};
+TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{1,2,3};
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume start $V1;
+EXPECT 'Started' volinfo_field $V1 'Status';
+
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 4 count_up_bricks $V1
+
+EXPECT 1 count_brick_processes
+
+# Mount V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+function client-log-file-name()
+{
+ logfilename=$M0".log"
+ echo ${logfilename:1} | tr / -
+}
+
+function brick-log-file-name()
+{
+ logfilename=$B0"/"$V0"1.log"
+ echo ${logfilename:1} | tr / -
+}
+
+log_file=$logdir"/"`client-log-file-name`
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+brick_log_file=$logdir"/bricks/"`brick-log-file-name`
+nofdlog=$(cat $brick_log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+## Set brick-log-level to DEBUG
+TEST $CLI volume set $V0 diagnostics.brick-log-level DEBUG
+
+# Do some operation
+touch $M0/file1
+
+# Check debug message debug message should be exist only for V0
+# Server xlator is common in brick_mux so after enabling DEBUG log
+# some debug message should be available for other xlators like posix
+
+brick_log_file=$logdir"/bricks/"`brick-log-file-name`
+nofdlog=$(cat $brick_log_file | grep file1 | grep -v server | wc -l)
+TEST [ $((nofdlog)) -ne 0 ]
+
+#Check if any debug log exist in client-log file
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+## Set brick-log-level to INFO
+TEST $CLI volume set $V0 diagnostics.brick-log-level INFO
+
+## Set client-log-level to DEBUG
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+
+# Do some operation
+touch $M0/file2
+
+nofdlog=$(cat $brick_log_file | grep " D " | grep file2 | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+nofdlog=$(cat $log_file | grep " D " | wc -l)
+TEST [ $((nofdlog)) -ne 0 ]
+
+# Unmount V0
+TEST umount $M0
+
+#Mount V1
+TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+#do some operation
+touch $M0/file3
+
+
+# DEBUG log level is enabled only for V0 so no debug message should be available
+# in log specific to file2 creation except for server xlator, server xlator is
+# common xlator in brick mulitplex
+nofdlog=$(cat $brick_log_file | grep file3 | grep -v server | wc -l)
+TEST [ $((nofdlog)) -eq 0 ]
+
+# Unmount V1
+TEST umount $M0
+
+cleanup;
diff --git a/tests/bugs/glusterd/bug-1699339.t b/tests/bugs/glusterd/bug-1699339.t
new file mode 100644
index 00000000000..bb8d4f46eb8
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1699339.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../cluster.rc
+
+cleanup;
+
+NUM_VOLS=15
+
+
+get_brick_base () {
+ printf "%s/vol%02d" $B0 $1
+}
+
+function count_up_bricks {
+ vol=$1;
+ $CLI_1 --xml volume status $vol | grep '<status>1' | wc -l
+}
+
+create_volume () {
+
+ local vol_name=$(printf "%s-vol%02d" $V0 $1)
+
+ TEST $CLI_1 volume create $vol_name replica 3 $H1:$B1/${vol_name} $H2:$B2/${vol_name} $H3:$B3/${vol_name}
+ TEST $CLI_1 volume start $vol_name
+}
+
+TEST launch_cluster 3
+TEST $CLI_1 volume set all cluster.brick-multiplex on
+
+# The option accepts the value in the range from 5 to 200
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 210
+TEST ! $CLI_1 volume set all glusterd.vol_count_per_thread 4
+
+TEST $CLI_1 volume set all glusterd.vol_count_per_thread 5
+
+TEST $CLI_1 peer probe $H2;
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+TEST $CLI_1 peer probe $H3;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+# Our infrastructure can't handle an arithmetic expression here. The formula
+# is (NUM_VOLS-1)*5 because it sees each TEST/EXPECT once but needs the other
+# NUM_VOLS-1 and there are 5 such statements in each iteration.
+TESTS_EXPECTED_IN_LOOP=28
+for i in $(seq 1 $NUM_VOLS); do
+ starttime="$(date +%s)";
+ create_volume $i
+done
+
+TEST kill_glusterd 1
+
+TESTS_EXPECTED_IN_LOOP=4
+for i in `seq 1 3 15`
+do
+vol1=$(printf "%s-vol%02d" $V0 $i)
+TEST $CLI_2 volume set $vol1 performance.readdir-ahead on
+done
+
+# Bring back 1st glusterd
+TEST $glusterd_1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
+
+TESTS_EXPECTED_IN_LOOP=4
+for i in `seq 1 3 15`
+do
+vol1=$(printf "%s-vol%02d" $V0 $i)
+EXPECT_WITHIN $PROBE_TIMEOUT "on" volinfo_field_1 $vol1 performance.readdir-ahead
+done
+
+cleanup
diff --git a/tests/bugs/glusterd/bug-1720566.t b/tests/bugs/glusterd/bug-1720566.t
new file mode 100644
index 00000000000..99bcf6ff785
--- /dev/null
+++ b/tests/bugs/glusterd/bug-1720566.t
@@ -0,0 +1,50 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+
+cleanup;
+V0="TestLongVolnamec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9037fec363b7b536700ff06eedeae0dd9abcd"
+V1="TestLongVolname3102bd28a16c49440bd5210e4ec4d5d93102bd28a16c49440bd5210e4ec4d5d933102bd28a16c49440bd5210e4ebbcd"
+TEST launch_cluster 2;
+TEST $CLI_1 peer probe $H2;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count
+
+$CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+$CLI_1 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1
+EXPECT 'Created' cluster_volinfo_field 1 $V1 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V1
+EXPECT 'Started' cluster_volinfo_field 1 $V1 'Status';
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V1 $M1;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST mkdir $M1/dir{1..4};
+TEST touch $M1/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}_1 $H2:$B2/${V0}_1
+TEST $CLI_1 volume add-brick $V1 $H1:$B1/${V1}_1 $H2:$B2/${V1}_1
+
+
+TEST $CLI_1 volume rebalance $V0 start
+TEST $CLI_1 volume rebalance $V1 start
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V1
+
+cleanup;
diff --git a/tests/bugs/glusterd/check_elastic_server.t b/tests/bugs/glusterd/check_elastic_server.t
new file mode 100644
index 00000000000..41d2140aa2b
--- /dev/null
+++ b/tests/bugs/glusterd/check_elastic_server.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+. $(dirname $0)/../../volume.rc
+
+function cluster_rebalance_status {
+ local vol=$1
+ $CLI_2 volume status | grep -iw "Rebalance" -A 5 | grep "Status" | sed 's/.*: //'
+}
+
+cleanup;
+TEST launch_cluster 4;
+TEST $CLI_1 peer probe $H2;
+TEST $CLI_1 peer probe $H3;
+TEST $CLI_1 peer probe $H4;
+
+EXPECT_WITHIN $PROBE_TIMEOUT 3 peer_count
+
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0
+EXPECT 'Created' cluster_volinfo_field 1 $V0 'Status';
+
+$CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+#Mount invalid volume
+TEST ! glusterfs -s $H1 --volfile-id=$V0_NA $M0;
+
+#Mount FUSE
+TEST glusterfs -s $H1 --volfile-id=$V0 $M0;
+
+TEST mkdir $M0/dir{1..4};
+TEST touch $M0/dir{1..4}/files{1..4};
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_remove_brick_status_completed_field "$V0 $H1:$B1/$V0"
+
+TEST $CLI_1 volume remove-brick $V0 $H1:$B1/$V0 commit
+
+kill_glusterd 1
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H3:$B3/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+TEST $CLI_2 volume add-brick $V0 $H4:$B4/$V0
+
+TEST $CLI_2 volume rebalance $V0 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status $V0
+kill_glusterd 2
+
+total_files=`find $M0 -name "files*" | wc -l`
+TEST [ $total_files -eq 16 ];
+
+cleanup;
+
diff --git a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
index fdc0a73f60c..8001359e6b3 100644
--- a/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
+++ b/tests/bugs/glusterd/mgmt-handshake-and-volume-sync-post-glusterd-restart.t
@@ -4,7 +4,7 @@
. $(dirname $0)/../../cluster.rc
function check_peers {
-$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup
@@ -36,23 +36,35 @@ TEST [[ $OP_VERS_ORIG == $OP_VERS_NEW ]]
#bug-948686 - volume sync after bringing up the killed node
TEST $CLI_1 peer probe $H3
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/$V0 $H1:$B1/${V0}_1 $H2:$B2/$V0 $H3:$B3/$V0
TEST $CLI_1 volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
TEST glusterfs --volfile-server=$H1 --volfile-id=$V0 $M0
#kill a node
TEST kill_node 3
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers 2
#modify volume config to see change in volume-sync
TEST $CLI_1 volume set $V0 write-behind off
#add some files to the volume to see effect of volume-heal cmd
TEST touch $M0/{1..100};
TEST $CLI_1 volume stop $V0;
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 'Stopped' volinfo_field_1 $V0 'Status'
+
TEST $glusterd_3;
-EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers 3
+
+sleep 5
TEST $CLI_3 volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field_1 $V0 'Status'
TEST $CLI_2 volume stop $V0;
TEST $CLI_2 volume delete $V0;
diff --git a/tests/bugs/glusterd/optimized-basic-testcases.t b/tests/bugs/glusterd/optimized-basic-testcases.t
index dd98a65fa9a..b89ca22415e 100644
--- a/tests/bugs/glusterd/optimized-basic-testcases.t
+++ b/tests/bugs/glusterd/optimized-basic-testcases.t
@@ -32,6 +32,16 @@ function get_brick_host_uuid()
echo $host_uuid_list | awk '{print $1}'
}
+function generate_statedump_and_check_for_glusterd_info {
+ pid=`pidof glusterd`
+ #remove old stale statedumps
+ cleanup_statedump $pid
+ kill -USR1 $pid
+ #Wait till the statedump is generated
+ sleep 1
+ fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
+ cat $statedumpdir/$fname | grep "xlator.glusterd.priv" | wc -l
+}
cleanup;
@@ -59,6 +69,11 @@ TEST pidof glusterd;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
EXPECT 'Created' volinfo_field $V0 'Status';
+#bug-1786478 - default volume option after volume reset
+addr_family=`volinfo_field $V0 'transport.address-family'`
+TEST $CLI volume reset $V0
+EXPECT $addr_family volinfo_field $V0 'transport.address-family'
+
#bug-955588 - uuid validation
uuid=`grep UUID $GLUSTERD_WORKDIR/glusterd.info | cut -f2 -d=`
@@ -114,7 +129,8 @@ TEST ! $CLI volume set all $V0 cluster.op-version $OP_VERS_NEW
#bug-1022055 - validate log rotate command
-TEST $CLI volume log rotate $V0;
+TEST ! $CLI volume log rotate $V0;
+TEST $CLI volume log $V0 rotate;
#bug-1092841 - validating barrier enable/disable
@@ -276,7 +292,14 @@ TEST ! $CLI volume create "test" $H0:/var/lib/glusterd force
TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc
TEST ! $CLI volume create "test" $H0:/var/lib/glusterd/abc force
mkdir -p /xyz/var/lib/glusterd/abc
-TEST $CLI volume create "test" $H0:/xyz/var/lib/glusterd/abc
+
+#bug 1716812 - volfile should be created with transport type both
+TEST $CLI volume create "test" transport tcp,rdma $H0:/xyz/var/lib/glusterd/abc
EXPECT 'Created' volinfo_field "test" 'Status';
+#While taking a statedump, there is a TRY_LOCK on call_frame, which might may cause
+#failure. So Adding a EXPECT_WITHIN
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" generate_statedump_and_check_for_glusterd_info
+
+cleanup_statedump `pidof glusterd`
cleanup
diff --git a/tests/bugs/glusterd/quorum-validation.t b/tests/bugs/glusterd/quorum-validation.t
index 05aef4edccb..3cc3351b43b 100644
--- a/tests/bugs/glusterd/quorum-validation.t
+++ b/tests/bugs/glusterd/quorum-validation.t
@@ -34,9 +34,13 @@ TEST ! $CLI_1 volume add-brick $V0 $H1:$B1/${V0}2
TEST ! $CLI_1 volume remove-brick $V0 $H1:$B1/${V0}0 start
TEST ! $CLI_1 volume set $V0 barrier enable
-# Now execute a command which goes through op state machine and it should fail
+#quorum is not met, rebalance/profile start should fail
+TEST ! $CLI_1 volume rebalance $V0 start
TEST ! $CLI_1 volume profile $V0 start
+#bug-1690753 - Volume stop when quorum not met is successful
+TEST ! $CLI_1 volume stop $V0
+
#Bring back the 2nd glusterd
TEST $glusterd_2
diff --git a/tests/bugs/glusterd/rebalance-in-cluster.t b/tests/bugs/glusterd/rebalance-in-cluster.t
index 9565faef01d..469ec6cd48e 100644
--- a/tests/bugs/glusterd/rebalance-in-cluster.t
+++ b/tests/bugs/glusterd/rebalance-in-cluster.t
@@ -4,6 +4,10 @@
. $(dirname $0)/../../cluster.rc
. $(dirname $0)/../../volume.rc
+function rebalance_status_field_1 {
+ $CLI_1 volume rebalance $1 status | awk '{print $7}' | sed -n 3p
+}
+
cleanup;
TEST launch_cluster 2;
TEST $CLI_1 peer probe $H2;
@@ -29,6 +33,11 @@ TEST $CLI_1 volume add-brick $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1
TEST $CLI_1 volume rebalance $V0 start
EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" cluster_rebalance_status_field 1 $V0
+#bug - 1764119 - rebalance status should display detailed info when any of the node is dowm
+TEST kill_glusterd 2
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" rebalance_status_field_1 $V0
+
+TEST start_glusterd 2
#bug-1245142
$CLI_1 volume rebalance $V0 start &
diff --git a/tests/bugs/glusterd/rebalance-operations-in-single-node.t b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
index 9144b4a5000..ef85887f440 100644
--- a/tests/bugs/glusterd/rebalance-operations-in-single-node.t
+++ b/tests/bugs/glusterd/rebalance-operations-in-single-node.t
@@ -119,13 +119,13 @@ TEST touch $M0/dir{21..30}/files{1..10};
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{7,8}
TEST $CLI volume rebalance $V0 start force
-EXPECT_WITHIN 90 "completed" rebalance_status_field $V0
+EXPECT_WITHIN 180 "completed" rebalance_status_field $V0
TEST pkill gluster
TEST glusterd
TEST pidof glusterd
# status should be "completed" immediate after glusterd has respawned.
-EXPECT_WITHIN 5 "completed" rebalance_status_field $V0
+EXPECT_WITHIN 20 "completed" rebalance_status_field $V0
cleanup
diff --git a/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t b/tests/bugs/glusterd/remove-brick-validation.t
index 11ed0d94d79..a0ff4ff6a24 100644
--- a/tests/bugs/glusterd/enable-shared-storage-and-remove-brick-validation.t
+++ b/tests/bugs/glusterd/remove-brick-validation.t
@@ -18,20 +18,6 @@ TEST $CLI_1 peer probe $H2;
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
-#test case for bug 1266818 - disabling enable-shared-storage option
-##should not delete user created volume with name glusterd_shared_storage
-
-## creating a volume with name glusterd_shared_storage
-TEST $CLI_1 volume create glusterd_shared_storage $H1:$B1/${V0}0 $H2:$B2/${V0}1
-TEST $CLI_1 volume start glusterd_shared_storage
-
-## disabling enable-shared-storage should not succeed and should not delete the
-## user created volume with name "glusterd_shared_storage"
-TEST ! $CLI_1 volume all enable-shared-storage disable
-
-## volume with name should exist
-TEST $CLI_1 volume info glusterd_shared_storage
-
#testcase: bug-1245045-remove-brick-validation
TEST $CLI_1 peer probe $H3;
diff --git a/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
index 20c84d26b9c..00beab59137 100644
--- a/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
+++ b/tests/bugs/glusterd/removing-multiple-bricks-in-single-remove-brick-command.t
@@ -49,6 +49,7 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
#Create a 3x3 dist-rep volume
TEST $CLI volume create $V1 replica 3 $H0:$B0/${V1}{0,1,2,3,4,5,6,7,8};
TEST $CLI volume start $V1
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "9" brick_count ${V1}
# Mount FUSE and create file/directory
TEST glusterfs -s $H0 --volfile-id $V1 $M0
diff --git a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
index cdb1a3399c9..e6e65c48456 100644
--- a/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
+++ b/tests/bugs/glusterd/reset-brick-and-daemons-follow-quorum.t
@@ -55,9 +55,9 @@ TEST kill_glusterd 1
#Bring back 1st glusterd
TEST $glusterd_1
-# We need to wait till PROCESS_UP_TIMEOUT and then check shd service does not
-# come up on node 2
-sleep $PROCESS_UP_TIMEOUT
-EXPECT "N" shd_up_status_2
+# We need to wait till PROCESS_UP_TIMEOUT and then check shd service started
+#on node 2, because once glusterd regains quorum, it will restart all volume
+#level daemons
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" shd_up_status_2
cleanup;
diff --git a/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
new file mode 100644
index 00000000000..a871e112d87
--- /dev/null
+++ b/tests/bugs/glusterd/serialize-shd-manager-glusterd-restart.t
@@ -0,0 +1,54 @@
+#! /bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../cluster.rc
+
+function check_peers {
+count=`$CLI_3 peer status | grep 'Peer in Cluster (Connected)' | wc -l`
+echo $count
+}
+
+function check_shd {
+ps aux | grep $1 | grep glustershd | wc -l
+}
+
+cleanup
+
+
+TEST launch_cluster 6
+
+TESTS_EXPECTED_IN_LOOP=25
+for i in $(seq 2 6); do
+ hostname="H$i"
+ TEST $CLI_1 peer probe ${!hostname}
+done
+
+
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers;
+for i in $(seq 1 5); do
+
+ TEST $CLI_1 volume create ${V0}_$i replica 3 $H1:$B1/${V0}_$i $H2:$B2/${V0}_$i $H3:$B3/${V0}_$i $H4:$B4/${V0}_$i $H5:$B5/${V0}_$i $H6:$B6/${V0}_$i
+ TEST $CLI_1 volume start ${V0}_$i force
+
+done
+
+#kill a node
+TEST kill_node 3
+
+TEST $glusterd_3;
+EXPECT_WITHIN $PROBE_TIMEOUT 5 check_peers
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 check_shd $H3
+
+for i in $(seq 1 5); do
+
+ TEST $CLI_1 volume stop ${V0}_$i
+ TEST $CLI_1 volume delete ${V0}_$i
+
+done
+
+for i in $(seq 1 6); do
+ hostname="H$i"
+ EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 0 check_shd ${!hostname}
+done
+cleanup
diff --git a/tests/bugs/glusterd/validating-options-for-replicated-volume.t b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
index ee231338ff1..ddc80b17870 100644
--- a/tests/bugs/glusterd/validating-options-for-replicated-volume.t
+++ b/tests/bugs/glusterd/validating-options-for-replicated-volume.t
@@ -61,10 +61,15 @@ TEST ! $CLI volume set $V0 background-self-heal-count " "
TEST $CLI volume set $V0 background-self-heal-count 10
EXPECT "10" volume_option $V0 cluster.background-self-heal-count
-TEST ! $CLI volume set $V0 cache-size ""
-TEST ! $CLI volume set $V0 cache-size " "
-TEST $CLI volume set $V0 cache-size 512MB
-EXPECT "512MB" volume_option $V0 performance.cache-size
+TEST ! $CLI volume set $V0 io-cache-size ""
+TEST ! $CLI volume set $V0 io-cache-size " "
+TEST $CLI volume set $V0 io-cache-size 64MB
+EXPECT "64MB" volume_option $V0 performance.io-cache-size
+
+TEST ! $CLI volume set $V0 quick-read-cache-size ""
+TEST ! $CLI volume set $V0 quick-read-cache-size " "
+TEST $CLI volume set $V0 quick-read-cache-size 512MB
+EXPECT "512MB" volume_option $V0 performance.quick-read-cache-size
TEST ! $CLI volume set $V0 self-heal-daemon ""
TEST ! $CLI volume set $V0 self-heal-daemon " "
diff --git a/tests/bugs/glusterfs-server/bug-852147.t b/tests/bugs/glusterfs-server/bug-852147.t
index c644cfa62dc..75db2a26e05 100755
--- a/tests/bugs/glusterfs-server/bug-852147.t
+++ b/tests/bugs/glusterfs-server/bug-852147.t
@@ -66,7 +66,7 @@ ren_file=$log_file".*"
rm -rf $ren_file
#Initiating log rotate
-TEST $CLI volume log rotate $V0
+TEST $CLI volume log $V0 rotate
#Capturing new log file's size
new_file_size=`file-size $log_file`
diff --git a/tests/bugs/glusterfs-server/bug-873549.t b/tests/bugs/glusterfs-server/bug-873549.t
index a3b2f9c9bf7..8b5534728fd 100644
--- a/tests/bugs/glusterfs-server/bug-873549.t
+++ b/tests/bugs/glusterfs-server/bug-873549.t
@@ -10,7 +10,7 @@ TEST $CLI volume info;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume set $V0 performance.cache-size 512MB
+TEST $CLI volume set $V0 performance.quick-read-cache-size 512MB
TEST $CLI volume start $V0
TEST $CLI volume statedump $V0 all
diff --git a/tests/bugs/glusterfs-server/bug-877992.t b/tests/bugs/glusterfs-server/bug-877992.t
index aeb73ed94dd..300000bcf2c 100755
--- a/tests/bugs/glusterfs-server/bug-877992.t
+++ b/tests/bugs/glusterfs-server/bug-877992.t
@@ -46,7 +46,9 @@ TEST $CLI volume create $V0 $H0:$B0/${V0}1;
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
EXPECT 'createPre' cat /tmp/pre.out;
-EXPECT 'createPost' cat /tmp/post.out;
+# Spost.sh comes after S10selinux-label-brick.sh under create post hook script
+# list. So consider the delay in setting SELinux context on bricks
+EXPECT_WITHIN 5 'createPost' cat /tmp/post.out;
hooks_cleanup 'create'
diff --git a/tests/bugs/glusterfs-server/bug-887145.t b/tests/bugs/glusterfs-server/bug-887145.t
index 82f7ccacfc1..db2cf3c050b 100755
--- a/tests/bugs/glusterfs-server/bug-887145.t
+++ b/tests/bugs/glusterfs-server/bug-887145.t
@@ -29,7 +29,15 @@ chmod 600 $M0/file;
TEST mount_nfs $H0:/$V0 $N0 nolock;
-chown -R nfsnobody:nfsnobody $M0/dir;
+grep nfsnobody /etc/passwd > /dev/null
+if [ $? -eq 1 ]; then
+usr=nobody
+grp=nobody
+else
+usr=nfsnobody
+grp=nfsnobody
+fi
+chown -R $usr:$grp $M0/dir;
chown -R tmp_user:tmp_user $M0/other;
TEST $CLI volume set $V0 server.root-squash on;
@@ -38,7 +46,7 @@ EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
# create files and directories in the root of the glusterfs and nfs mount
# which is owned by root and hence the right behavior is getting EACCESS
-# as the fops are executed as nfsnobody.
+# as the fops are executed as nfsnobody/nobody.
touch $M0/foo 2>/dev/null;
TEST [ $? -ne 0 ]
touch $N0/foo 2>/dev/null;
@@ -61,7 +69,7 @@ cat $N0/passwd 1>/dev/null;
TEST [ $? -eq 0 ]
# create files and directories should succeed as the fops are being executed
-# inside the directory owned by nfsnobody
+# inside the directory owned by nfsnobody/nobody
TEST touch $M0/dir/file;
TEST touch $N0/dir/foo;
TEST mkdir $M0/dir/new;
diff --git a/tests/bugs/glusterfs/bug-844688.t b/tests/bugs/glusterfs/bug-844688.t
index 39f04092cf7..65f41b342a5 100755
--- a/tests/bugs/glusterfs/bug-844688.t
+++ b/tests/bugs/glusterfs/bug-844688.t
@@ -3,6 +3,17 @@
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
+function check_callstack_log {
+ local text=$1
+ statedump_file=$(generate_mount_statedump $V0);
+ grep $text $statedump_file 2>/dev/null 1>/dev/null;
+ if [ $? -eq 0 ]; then
+ echo "1";
+ else
+ echo "0";
+ fi;
+}
+
cleanup;
TEST glusterd
@@ -15,19 +26,29 @@ mount_pid=$(get_mount_process_pid $V0);
# enable dumping of call stack creation and frame creation times in statedump
# monitoring is enabled by default
-TEST touch $M0/touchfile;
-(dd if=/dev/urandom of=$M0/file bs=5k 2>/dev/null 1>/dev/null)&
-back_pid=$!;
-statedump_file=$(generate_mount_statedump $V0);
-grep "callstack-creation-time" $statedump_file 2>/dev/null 1>/dev/null;
-TEST [ $? -eq 0 ];
-grep "frame-creation-time" $statedump_file 2>/dev/null 1>/dev/null;
-TEST [ $? -eq 0 ];
+# We want to make sure that there is a pending frame in gluster stack.
+# For that we are creating a blocking lock scenario.
+
+TEST touch $M0/lockfile;
+# Open two fd's on the same file
+exec 8>$M0/lockfile;
+exec 9>$M0/lockfile;
+
+# First flock will succeed and the second one will block, hence the background run.
+flock -x 8 ;
+flock -x 9 &
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" check_callstack_log "callstack-creation-time";
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" check_callstack_log "frame-creation-time";
+
+flock -u 8
+flock -u 9;
-kill -SIGTERM $back_pid;
-wait >/dev/null 2>&1;
+# Closing the fd's
+exec 8>&-
+exec 9>&-
-TEST rm -f $M0/touchfile $M0/file;
+TEST rm -f $M0/lockfile;
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
rm -f $statedumpdir/glusterdump.$mount_pid.*;
diff --git a/tests/bugs/glusterfs/bug-873962-spb.t b/tests/bugs/glusterfs/bug-873962-spb.t
index db84a223089..db71cc0f6fe 100644
--- a/tests/bugs/glusterfs/bug-873962-spb.t
+++ b/tests/bugs/glusterfs/bug-873962-spb.t
@@ -14,6 +14,7 @@ TEST $CLI volume set $V0 performance.io-cache off
TEST $CLI volume set $V0 performance.write-behind off
TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.open-behind off
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST $CLI volume start $V0
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
diff --git a/tests/bugs/glusterfs/bug-902610.t b/tests/bugs/glusterfs/bug-902610.t
index b45e92b8a3b..112c947e116 100755
--- a/tests/bugs/glusterfs/bug-902610.t
+++ b/tests/bugs/glusterfs/bug-902610.t
@@ -28,7 +28,7 @@ function get_layout()
fi
# Figure out where the join point is.
- target=$( $PYTHON -c "print '%08x' % (0x$layout1_e + 1)")
+ target=$( $PYTHON -c "print('%08x' % (0x$layout1_e + 1))")
#echo "target for layout2 = $target" > /dev/tty
# The second layout should cover everything that the first doesn't.
diff --git a/tests/bugs/logging/bug-823081.t b/tests/bugs/logging/bug-823081.t
index 0ed8f4c26c1..bd1965d2d49 100755
--- a/tests/bugs/logging/bug-823081.t
+++ b/tests/bugs/logging/bug-823081.t
@@ -22,20 +22,20 @@ function set_tail ()
set_tail $V0;
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-`
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
TEST [[ \"$tail\" == \"$tail_success\" ]]
TEST ! $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-`
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
TEST [[ \"$tail\" == \"$tail_failure\" ]]
set_tail $V1;
TEST gluster volume create $V1 $H0:$B0/${V1}{1,2} force;
-tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-`
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
TEST [[ \"$tail\" == \"$tail_success_force\" ]]
TEST ! gluster volume create $V1 $H0:$B0/${V1}{1,2} force;
-tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 5-`
+tail=`tail -n 1 $logdir/$cmd_log_history | cut -d " " -f 6-`
TEST [[ \"$tail\" == \"$tail_failure_force\" ]]
cleanup;
diff --git a/tests/bugs/md-cache/bug-1726205.t b/tests/bugs/md-cache/bug-1726205.t
new file mode 100644
index 00000000000..795130e9bd8
--- /dev/null
+++ b/tests/bugs/md-cache/bug-1726205.t
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2,3};
+
+TEST $CLI volume start $V0
+
+TEST $CLI volume set $V0 group samba
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST touch $M0/file
+TEST "setfattr -n "user.DosStream.Zone.Identifier:\$DATA" -v '\0' $M0/file"
+TEST "getfattr -n "user.DosStream.Zone.Identifier:\$DATA" -e hex $M0/file | grep -q 0x00"
+
+cleanup;
diff --git a/tests/bugs/nfs/showmount-many-clients.t b/tests/bugs/nfs/showmount-many-clients.t
index f1b6859d528..c6c9c35d60a 100644
--- a/tests/bugs/nfs/showmount-many-clients.t
+++ b/tests/bugs/nfs/showmount-many-clients.t
@@ -12,6 +12,8 @@
# the groups into their own structures, this testcase passes.
#
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../nfs.rc
. $(dirname $0)/../../volume.rc
diff --git a/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t b/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t
index 016e637dd0d..3839c6e3380 100755
--- a/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t
+++ b/tests/bugs/posix/bug-1040275-brick-uid-reset-on-volume-restart.t
@@ -11,6 +11,10 @@ function get_gid() {
stat -c '%g' $1;
}
+function check_stat() {
+ stat $1
+ echo $?
+}
cleanup;
@@ -37,7 +41,10 @@ EXPECT 100 get_uid $M0;
EXPECT 101 get_gid $M0;
TEST $CLI volume stop $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" online_brick_count
+
TEST $CLI volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "6" online_brick_count
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
@@ -46,6 +53,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "0" check_stat $M0
EXPECT 100 get_uid $M0;
EXPECT 101 get_gid $M0;
diff --git a/tests/bugs/posix/bug-1651445.t b/tests/bugs/posix/bug-1651445.t
new file mode 100644
index 00000000000..4d08b69b9b0
--- /dev/null
+++ b/tests/bugs/posix/bug-1651445.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup
+
+TEST verify_lvm_version
+TEST glusterd
+TEST pidof glusterd
+TEST init_n_bricks 3
+TEST setup_lvm 3
+
+TEST $CLI volume create $V0 replica 3 $H0:$L{1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+#Setting the size in bytes
+TEST $CLI volume set $V0 storage.reserve 40MB
+
+#wait 5s to reset disk_space_full flag
+sleep 5
+
+TEST dd if=/dev/zero of=$M0/a bs=100M count=1
+TEST dd if=/dev/zero of=$M0/b bs=10M count=1
+
+# Wait 5s to update disk_space_full flag because thread check disk space
+# after every 5s
+
+sleep 5
+# setup_lvm create lvm partition of 150M and 40M are reserve so after
+# consuming more than 110M next dd should fail
+TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1
+TEST dd if=/dev/urandom of=$M0/a bs=1022 count=1 oflag=seek_bytes,sync seek=102 conv=notrunc
+
+rm -rf $M0/*
+
+#Setting the size in percent and repeating the above steps
+TEST $CLI volume set $V0 storage.reserve 40
+
+sleep 5
+
+TEST dd if=/dev/zero of=$M0/a bs=80M count=1
+TEST dd if=/dev/zero of=$M0/b bs=10M count=1
+
+sleep 5
+TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/protocol/bug-1321578.t b/tests/bugs/protocol/bug-1321578.t
index 160fc408fba..83904817467 100644
--- a/tests/bugs/protocol/bug-1321578.t
+++ b/tests/bugs/protocol/bug-1321578.t
@@ -6,6 +6,7 @@ check_mounted () {
df | grep $1 | wc -l
}
+CHECK_MOUNT_TIMEOUT=7
TEST glusterd
TEST $CLI volume create $V0 $H0:$B0/$V0
@@ -23,15 +24,59 @@ $CLI system getspec $V0 | sed -e /username/d -e /password/d > fubar.vol
# This mount should fail because auth.allow doesn't include us.
TEST $GFS -f fubar.vol $M0
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 0 check_mounted $M0
+
+# Add tests when only username is present, but not password
+# "System getspec" will include the username and password if the request comes
+# from a server (which we are). Unfortunately, this will cause authentication
+# to succeed in auth.login regardless of whether auth.addr is working properly
+# or not, which is useless to us. To get a proper test, strip out those lines.
+$CLI system getspec $V0 | sed -e /password/d > fubar.vol
+
+# This mount should fail because auth.allow doesn't include our password.
+TEST $GFS -f fubar.vol $M0
+
# If we had DONT_EXPECT_WITHIN we could use that, but we don't.
-sleep 10
-EXPECT 0 check_mounted $M0
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 0 check_mounted $M0
+
+# Now, add a test for login failure when server doesn't have the password entry
+# Add tests when only username is present, but not password
+# "System getspec" will include the username and password if the request comes
+# from a server (which we are). Unfortunately, this will cause authentication
+# to succeed in auth.login regardless of whether auth.addr is working properly
+# or not, which is useless to us. To get a proper test, strip out those lines.
+$CLI system getspec $V0 > fubar.vol
+TEST $CLI volume stop $V0
+
+sed -i -e '/password /d' /var/lib/glusterd/vols/$V0/$V0.*$V0.vol
+
+TEST $CLI volume start $V0
+
+# This mount should fail because auth.allow doesn't include our password.
+TEST $GFS -f fubar.vol $M0
+
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 0 check_mounted $M0
# Set auth.allow to include us. This mount should therefore succeed.
TEST $CLI volume set $V0 auth.allow $H0
+$CLI system getspec $V0 | sed -e /password/d > fubar.vol
+
+TEST $GFS -f fubar.vol $M0
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 1 check_mounted $M0
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+# Set auth.reject to include us. This mount should therefore fail.
+TEST $CLI volume stop $V0
+
+TEST $CLI volume set $V0 auth.allow "\*"
+TEST $CLI volume set $V0 auth.reject $H0
+TEST $CLI volume start $V0
+
+# Do this, so login module is not in picture
+$CLI system getspec $V0 | sed -e /password/d > fubar.vol
TEST $GFS -f fubar.vol $M0
-sleep 10
-EXPECT 1 check_mounted $M0
+EXPECT_WITHIN $CHECK_MOUNT_TIMEOUT 0 check_mounted $M0
cleanup
diff --git a/tests/bugs/protocol/bug-1390914.t b/tests/bugs/protocol/bug-1390914.t
new file mode 100644
index 00000000000..e3dab92de5a
--- /dev/null
+++ b/tests/bugs/protocol/bug-1390914.t
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+cleanup;
+
+#test that fops are not wound on anon-fd when fd is not open on that brick
+TEST glusterd;
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3};
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume set $V0 diagnostics.client-log-level DEBUG
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $GFS -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0;
+
+TEST touch $M0/1
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST fd_open 200 'w' "$M0/1"
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#lk should only happen on 2 bricks, if there is a bug, it will plant a lock
+#with anon-fd on first-brick which will never be released because flush won't
+#be wound below server xlator for anon-fd
+TEST flock -x -n 200
+TEST fd_close 200
+
+TEST fd_open 200 'w' "$M0/1"
+#this lock will fail if there is a stale lock
+TEST flock -x -n 200
+TEST fd_close 200
+cleanup;
diff --git a/tests/bugs/protocol/bug-1433815-auth-allow.t b/tests/bugs/protocol/bug-1433815-auth-allow.t
index fa22ad8afd5..a78c0eb7111 100644
--- a/tests/bugs/protocol/bug-1433815-auth-allow.t
+++ b/tests/bugs/protocol/bug-1433815-auth-allow.t
@@ -17,6 +17,7 @@ TEST $CLI volume create $V0 $H0:$B0/$V0
# Set auth.allow so it *doesn't* include ourselves.
TEST $CLI volume set $V0 auth.allow 1.2.3.4
TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
# "System getspec" will include the username and password if the request comes
# from a server (which we are). Unfortunately, this will cause authentication
diff --git a/tests/bugs/readdir-ahead/bug-1436090.t b/tests/bugs/readdir-ahead/bug-1436090.t
index 58e9093f1c3..e0877f15684 100755
--- a/tests/bugs/readdir-ahead/bug-1436090.t
+++ b/tests/bugs/readdir-ahead/bug-1436090.t
@@ -19,12 +19,12 @@ EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
TEST glusterfs -s $H1 --volfile-id $V0 $M0;
TEST mkdir $M0/dir1
-# Create a large file (3.2 GB), so that rebalance takes time
-# Reading from /dev/urandom is slow, so we will cat it together
-dd if=/dev/urandom of=/tmp/FILE2 bs=64k count=10240
-for i in {1..5}; do
- cat /tmp/FILE2 >> $M0/dir1/foo
-done
+# Create a large file (4 GB), so that rebalance takes time
+# Since we really don't care about the contents of the file, we use fallocate
+# to generate the file much faster. We could also use truncate, which is even
+# faster, but rebalance could take advantage of an sparse file and migrate it
+# in an optimized way, but we don't want a fast migration.
+TEST fallocate -l 4G $M0/dir1/foo
TEST mv $M0/dir1/foo $M0/dir1/bar
diff --git a/tests/bugs/replicate/bug-1046624.t b/tests/bugs/replicate/bug-1046624.t
index 9ae40879228..e2762ea6764 100755
--- a/tests/bugs/replicate/bug-1046624.t
+++ b/tests/bugs/replicate/bug-1046624.t
@@ -25,11 +25,12 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Mount native
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 --use-readdirp=no
+TEST ${GFS} --volfile-server=$H0 --volfile-id=$V0 --use-readdirp=no $M0
TEST `echo "TEST-FILE" > $M0/File`
TEST `mkdir $M0/Dir`
TEST kill_brick $V0 $H0 $B0/${V0}-0
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
TEST `ln -s $M0/File $M0/Link1`
TEST `ln -s $M0/Dir $M0/Link2`
diff --git a/tests/bugs/replicate/bug-1101647.t b/tests/bugs/replicate/bug-1101647.t
index 8f420eec012..708bc1a1e29 100644
--- a/tests/bugs/replicate/bug-1101647.t
+++ b/tests/bugs/replicate/bug-1101647.t
@@ -12,6 +12,8 @@ TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0;
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
#Create base entry in indices/xattrop
echo "Data">$M0/file
diff --git a/tests/bugs/replicate/bug-1130892.t b/tests/bugs/replicate/bug-1130892.t
index 0f57d669674..c7509f33cc2 100644
--- a/tests/bugs/replicate/bug-1130892.t
+++ b/tests/bugs/replicate/bug-1130892.t
@@ -17,9 +17,9 @@ EXPECT 'Created' volinfo_field $V0 'Status';
TEST gluster volume set $V0 self-heal-daemon off
# Enable Client side heal
-TEST $CLI volume set $V0 cluster.data-self-heal on
-TEST $CLI volume set $V0 cluster.metadata-self-heal on
-TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
# Disable all perf-xlators
TEST $CLI volume set $V0 performance.quick-read off
@@ -33,7 +33,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
# FUSE Mount
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST ${GFS} -s $H0 --volfile-id $V0 $M0
# Create files and dirs
TEST mkdir -p $M0/one/two/
@@ -41,6 +41,7 @@ TEST `echo "Carpe diem" > $M0/one/two/three`
# Simulate disk-replacement
TEST kill_brick $V0 $H0 $B0/${V0}-1
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
TEST rm -rf $B0/${V0}-1/one
TEST rm -rf $B0/${V0}-1/.glusterfs
@@ -55,10 +56,12 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST stat $M0/one
+sleep 1
+
# Check pending xattrs
EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry
-EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
TEST gluster volume set $V0 self-heal-daemon on
diff --git a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
index 5fe8be07b50..b69a38ae788 100644
--- a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
+++ b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
@@ -25,9 +25,11 @@ iatt=$(stat -c "%g:%u:%A" file)
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT 2 get_pending_heal_count $V0
#Trigger metadataheal
TEST stat file
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#iattrs must be matching
iatt1=$(stat -c "%g:%u:%A" $B0/brick0/file)
diff --git a/tests/bugs/replicate/bug-1180545.t b/tests/bugs/replicate/bug-1180545.t
index e9531625ee2..5e40edd6c38 100644
--- a/tests/bugs/replicate/bug-1180545.t
+++ b/tests/bugs/replicate/bug-1180545.t
@@ -7,6 +7,31 @@
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../afr.rc
+function check_sh_entries() {
+ local expected="$1"
+ local count=
+ local good="0"
+ shift
+
+ for i in $*; do
+ count="$(count_sh_entries $i)"
+ if [[ "x${count}" == "x${expected}" ]]; then
+ good="$((good + 1))"
+ fi
+ done
+ if [[ "x${good}" != "x${last_good}" ]]; then
+ last_good="${good}"
+# This triggers a sweep of the heal index. However if more than one brick
+# tries to heal the same directory at the same time, one of them will take
+# the lock and the other will give up, waiting for the next heal cycle, which
+# is set to 60 seconds (the minimum valid value). So, each time we detect
+# that one brick has completed the heal, we trigger another heal.
+ $CLI volume heal $V0
+ fi
+
+ echo "${good}"
+}
+
cleanup;
TEST glusterd
@@ -15,6 +40,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
TEST $CLI volume set $V0 cluster.heal-timeout 60
TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
TEST $CLI volume start $V0
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
@@ -35,13 +61,16 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
#Trigger heal and verify number of entries in backend
TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/brick0
-EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/brick1
+last_good=""
+
+EXPECT_WITHIN $HEAL_TIMEOUT "2" check_sh_entries 2 $B0/brick{0,1}
+
#Two entries for DIR and two for FILE
EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
TEST diff <(ls $B0/brick0/DIR) <(ls $B0/brick1/DIR)
diff --git a/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t b/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
index c4752c488f4..6ff471fbf15 100644
--- a/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
+++ b/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
@@ -11,19 +11,27 @@ TEST pidof glusterd;
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume start $V0;
-TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST mkdir $M0/dir
TEST touch $M0/dir/file{1..5}
#Create entry split-brain
TEST kill_brick $V0 $H0 $B0/$V0"1"
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
TEST touch $M0/dir/FILE
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "^Y$" force_umount $M0
TEST $CLI volume start $V0 force
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
TEST kill_brick $V0 $H0 $B0/$V0"0"
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
TEST touch $M0/dir/FILE
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "^Y$" force_umount $M0
TEST $CLI volume start $V0 force
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
cd $M0/dir
EXPECT "6" echo $(ls | wc -l)
diff --git a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
index 0767f47fdda..10ce0131f4f 100644
--- a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
+++ b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
@@ -49,25 +49,15 @@ TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
-#Kill brick 0 and turn on the client side heal and do ls to trigger the heal.
-#The pending xattrs on bricks 1 & 2 should have pending entry on brick 0.
-TEST kill_brick $V0 $H0 $B0/${V0}0
+# We were killing one brick and checking that entry heal does not reset the
+# pending xattrs for the down brick. Now that we need all bricks to be up for
+# entry heal, I'm removing that test from the .t
+
TEST $CLI volume set $V0 cluster.data-self-heal on
TEST $CLI volume set $V0 cluster.metadata-self-heal on
TEST $CLI volume set $V0 cluster.entry-self-heal on
TEST ls $M0
-EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
-EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
-EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
-EXPECT_WITHIN $HEAL_TIMEOUT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
-
-#Bring back all the bricks and trigger the heal again by doing ls. Now the
-#pending xattrs on all the bricks should be 0.
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-TEST ls $M0
-
TEST cat $M0/f1
TEST cat $M0/f2
TEST cat $M0/f3
diff --git a/tests/bugs/replicate/bug-1468279-source-not-blaming-sinks.t b/tests/bugs/replicate/bug-1468279-source-not-blaming-sinks.t
deleted file mode 100644
index 054a4adb90d..00000000000
--- a/tests/bugs/replicate/bug-1468279-source-not-blaming-sinks.t
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
-TEST $CLI volume start $V0
-TEST $CLI volume set $V0 cluster.self-heal-daemon off
-TEST $CLI volume set $V0 cluster.metadata-self-heal off
-TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
-TEST touch $M0/file
-
-# Kill B1, create a pending metadata heal.
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST setfattr -n user.xattr -v value1 $M0/file
-EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/file
-EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2/file
-
-# Kill B2, heal from B3 to B1.
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-TEST kill_brick $V0 $H0 $B0/${V0}1
-TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-$CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-0 "metadata"
-TEST $CLI volume set $V0 cluster.self-heal-daemon off
-
-# Create another pending metadata heal.
-TEST setfattr -n user.xattr -v value2 $M0/file
-EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/file
-EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2/file
-
-# Kill B1, heal from B3 to B2
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
-TEST kill_brick $V0 $H0 $B0/${V0}0
-TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-$CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-1 "metadata"
-TEST $CLI volume set $V0 cluster.self-heal-daemon off
-
-# ALL bricks up again.
-TEST $CLI volume start $V0 force
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
-# B1 and B2 blame each other, B3 doesn't blame anyone.
-EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/file
-EXPECT "0000000000000010000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/file
-EXPECT "0000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2/file
-EXPECT "0000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2/file
-TEST $CLI volume set $V0 cluster.self-heal-daemon on
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
-TEST $CLI volume heal $V0
-EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
-
-cleanup;
diff --git a/tests/bugs/replicate/bug-1493415-gfid-heal.t b/tests/bugs/replicate/bug-1493415-gfid-heal.t
index 125c35a7a21..8a79febf4b4 100644
--- a/tests/bugs/replicate/bug-1493415-gfid-heal.t
+++ b/tests/bugs/replicate/bug-1493415-gfid-heal.t
@@ -27,6 +27,11 @@ gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
TEST setfattr -x trusted.gfid $B0/${V0}1/f1
TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
# Assume there were no pending xattrs on parent dir due to 1st brick crashing
# too. Then name heal from client must heal the gfid.
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
@@ -52,6 +57,11 @@ TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir
create_brick_xattrop_entry $B0/${V0}0 dir
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
#Trigger entry-heal via shd
TEST $CLI volume set $V0 self-heal-daemon on
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
diff --git a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
index 26f90497d6f..49c4dea4e9c 100644
--- a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
+++ b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
@@ -53,8 +53,6 @@ TEST ! ls $B0/${V0}1/file$i
TEST ls $B0/${V0}2/file$i
dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}2)
TEST [ "$dirty" != "000000000000000000000000" ]
-EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2/file$i
-EXPECT "000000010000000100000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2/file$i
TEST $CLI volume set $V0 self-heal-daemon on
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
diff --git a/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
new file mode 100644
index 00000000000..78581e99614
--- /dev/null
+++ b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+CHANGELOG_PATH_0="$B0/${V0}2/.glusterfs/changelogs"
+ROLLOVER_TIME=100
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST dd if=/dev/zero of=$M0/file1 bs=128K count=5
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST $CLI volume profile $V0 info
+truncate_count=$($CLI volume profile $V0 info | grep TRUNCATE | awk '{count += $8} END {print count}')
+
+EXPECT "1" echo $truncate_count
+EXPECT "1" check_changelog_op ${CHANGELOG_PATH_0} "^ D "
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1696599-io-hang.t b/tests/bugs/replicate/bug-1696599-io-hang.t
new file mode 100755
index 00000000000..869cdb94bda
--- /dev/null
+++ b/tests/bugs/replicate/bug-1696599-io-hang.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+#Tests that local structures in afr are removed from granted/blocked list of
+#locks when inodelk fails on all bricks
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $CLI volume set $V0 delay-gen locks
+TEST $CLI volume set $V0 delay-gen.delay-duration 5000000
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.enable finodelk
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file
+#Trigger write and stop bricks so inodelks fail on all bricks leading to
+#lock failure condition
+echo abc >> $M0/file &
+
+TEST $CLI volume stop $V0
+TEST $CLI volume reset $V0 delay-gen
+wait
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 2
+#Test that only one write succeeded, this tests that delay-gen worked as
+#expected
+echo abc >> $M0/file
+EXPECT "abc" cat $M0/file
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
new file mode 100644
index 00000000000..76d1f2170f2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
@@ -0,0 +1,136 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume heal $V0 disable
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+###############################################################################
+# Case of 2 bricks blaming the third and the third blaming the other two.
+
+TEST mkdir $M0/dir
+
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST setfattr -n user.metadata -v 1 $M0/dir
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/dir trusted.afr.$V0-client-1 metadata
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/dir trusted.afr.$V0-client-1 metadata
+CLIENT_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $M0/dir)
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"1"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir)
+
+TEST [ "$CLIENT_XATTR" == "$B0_XATTR" ]
+TEST [ "$CLIENT_XATTR" == "$B1_XATTR" ]
+TEST [ "$CLIENT_XATTR" == "$B2_XATTR" ]
+TEST setfattr -x user.metadata $M0/dir
+
+###############################################################################
+# Case of each brick blaming the next one in a cyclic manner
+
+TEST $CLI volume heal $V0 disable
+TEST `echo "hello" >> $M0/dir/file`
+# Mark cyclic xattrs and modify metadata directly on the bricks.
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000100000000 $B0/$V0"0"/dir/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"2"/dir/file
+
+setfattr -n user.metadata -v 1 $B0/$V0"0"/dir/file
+setfattr -n user.metadata -v 2 $B0/$V0"1"/dir/file
+setfattr -n user.metadata -v 3 $B0/$V0"2"/dir/file
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/file))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir/file)
+
+TEST [ "$B0_XATTR" == "$B1_XATTR" ]
+TEST [ "$B0_XATTR" == "$B2_XATTR" ]
+TEST rm -f $M0/dir/file
+
+###############################################################################
+# Case of 2 bricks having quorum blaming and the other having only one blaming.
+
+TEST $CLI volume heal $V0 disable
+TEST `echo "hello" >> $M0/dir/file`
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST setfattr -n user.metadata -v 1 $M0/dir/file
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/dir/file trusted.afr.$V0-client-1 metadata
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/dir/file trusted.afr.$V0-client-1 metadata
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+
+# B0 must blame B2
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"0"/dir/file
+
+# Modify the metadata directly on the bricks B1 & B2.
+setfattr -n user.metadata -v 2 $B0/$V0"1"/dir/file
+setfattr -n user.metadata -v 3 $B0/$V0"2"/dir/file
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir/file)
+
+TEST [ "$B0_XATTR" == "$B1_XATTR" ]
+TEST [ "$B0_XATTR" == "$B2_XATTR" ]
+
+###############################################################################
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
new file mode 100644
index 00000000000..0aeaaafc84c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+
+##########################################################################################
+# GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
+
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on one brick.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+rm -f $M0/dir/file
+
+
+###########################################################################################
+# GFID link file and the GFID is missing on two bricks and all the bricks are being blamed.
+
+TEST $CLI volume heal $V0 disable
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on two bricks.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/file
+TEST rm -f $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}1/dir/file
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1728770-pass-xattrs.t b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
new file mode 100644
index 00000000000..159c4fcc6a1
--- /dev/null
+++ b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+function fop_on_bad_disk {
+ local path=$1
+ mkdir $path/dir{1..1000} 2>/dev/null
+ mv $path/dir1 $path/newdir
+ touch $path/foo.txt
+ echo $?
+}
+
+function ls_fop_on_bad_disk {
+ local path=$1
+ ls $path
+ echo $?
+}
+
+TEST init_n_bricks 6;
+TEST setup_lvm 6;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3 $H0:$L4 $H0:$L5 $H0:$L6;
+TEST $CLI volume set $V0 health-check-interval 1000;
+
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+#corrupt last disk
+dd if=/dev/urandom of=/dev/mapper/patchy_snap_vg_6-brick_lvm bs=512K count=200 status=progress && sync
+
+
+# Test the disk is now returning EIO for touch and ls
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^1$" fop_on_bad_disk "$L6"
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^2$" ls_fop_on_bad_disk "$L6"
+
+TEST touch $M0/foo{1..100}
+TEST $CLI volume remove-brick $V0 replica 3 $H0:$L4 $H0:$L5 $H0:$L6 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$L4 $H0:$L5 $H0:$L6";
+
+#check that remove-brick status should not have any failed or skipped files
+var=`$CLI volume remove-brick $V0 $H0:$L4 $H0:$L5 $H0:$L6 status | grep completed`
+TEST [ `echo $var | awk '{print $5}'` = "0" ]
+TEST [ `echo $var | awk '{print $6}'` = "0" ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
new file mode 100644
index 00000000000..14dfae89135
--- /dev/null
+++ b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function time_stamps_match {
+ path=$1
+ mtime_source_b0=$(get_mtime $B0/${V0}0/$path)
+ atime_source_b0=$(get_atime $B0/${V0}0/$path)
+ mtime_source_b2=$(get_mtime $B0/${V0}2/$path)
+ atime_source_b2=$(get_atime $B0/${V0}2/$path)
+ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path)
+ atime_sink_b1=$(get_atime $B0/${V0}1/$path)
+
+ #The same brick must be the source of heal for both atime and mtime.
+ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1 && $atime_source_b0 -eq $atime_sink_b1 ) || \
+ ( $mtime_source_b2 -eq $mtime_sink_b1 && $atime_source_b2 -eq $atime_sink_b1 ) ]]
+ then
+ echo "Y"
+ else
+ echo "Mtimes: $mtime_source_b0:$mtime_sink_b1:$mtime_source_b2 Atimes: $atime_source_b0:$atime_sink_b1:$atime_source_b2"
+ fi
+
+}
+
+function mtimes_match {
+ path=$1
+ mtime_source_b0=$(get_mtime $B0/${V0}0/$path)
+ mtime_source_b2=$(get_mtime $B0/${V0}2/$path)
+ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path)
+
+ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1) || \
+ ( $mtime_source_b2 -eq $mtime_sink_b1) ]]
+ then
+ echo "Y"
+ else
+ echo "Mtimes: $mtime_source_b0:$mtime_sink_b1:$mtime_source_b2"
+ fi
+
+}
+
+# Test that the parent dir's timestamps are restored during entry-heal.
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+###############################################################################
+TEST mkdir $M0/DIR
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/DIR/FILE
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+EXPECT "Y" time_stamps_match DIR
+ctime_source1=$(get_ctime $B0/${V0}0/$path)
+ctime_source2=$(get_ctime $B0/${V0}2/$path)
+ctime_sink=$(get_ctime $B0/${V0}1/$path)
+TEST [ $ctime_source1 -eq $ctime_sink ]
+TEST [ $ctime_source2 -eq $ctime_sink ]
+
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+TEST mkdir $M0/DIR2
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/DIR2/FILE
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+#Executing parallel heal may lead to changing atime after heal. So better
+#to test just the mtime
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+EXPECT "Y" mtimes_match DIR2
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+cleanup;
diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
new file mode 100644
index 00000000000..011535066f9
--- /dev/null
+++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+function get_cumulative_opendir_count {
+#sed command prints content between Cumulative and Interval, this keeps content from Cumulative stats
+ $CLI volume profile $V0 info |sed -n '/^Cumulative/,/^Interval/p'|grep OPENDIR| awk '{print $8}'|tr -d '\n'
+}
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST ! $CLI volume heal $V0
+
+# Enable shd and verify that index crawl is triggered immediately.
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info clear
+TEST $CLI volume heal $V0 enable
+# Each brick does 4 opendirs, corresponding to dirty, xattrop and entry-changes, anonymous-inode
+EXPECT_WITHIN 4 "^444$" get_cumulative_opendir_count
+
+# Check that a change in heal-timeout is honoured immediately.
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+sleep 10
+# Two crawls must have happened.
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^121212$" get_cumulative_opendir_count
+
+# shd must not heal if it is disabled and heal-timeout is changed.
+TEST $CLI volume heal $V0 disable
+#Wait for configuration update and any opendir fops to complete
+sleep 10
+TEST $CLI volume profile $V0 info clear
+TEST $CLI volume set $V0 cluster.heal-timeout 6
+#Better to wait for more than 6 seconds to account for configuration updates
+sleep 10
+COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
+TEST [ -z $COUNT ]
+cleanup;
diff --git a/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
new file mode 100644
index 00000000000..96279084065
--- /dev/null
+++ b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+function check_gfid_and_link_count
+{
+ local file=$1
+
+ file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
+ TEST [ ! -z $file_gfid_b0 ]
+ file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
+ file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
+ EXPECT $file_gfid_b0 echo $file_gfid_b1
+ EXPECT $file_gfid_b0 echo $file_gfid_b2
+
+ EXPECT "2" stat -c %h $B0/${V0}0/$file
+ EXPECT "2" stat -c %h $B0/${V0}1/$file
+ EXPECT "2" stat -c %h $B0/${V0}2/$file
+}
+TESTS_EXPECTED_IN_LOOP=18
+
+################################################################################
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST `echo "File 1 " > $M0/dir/file1`
+TEST touch $M0/dir/file{2..4}
+
+# Remove file2 from 1st & 3rd bricks
+TEST rm -f $B0/$V0"0"/dir/file2
+TEST rm -f $B0/$V0"2"/dir/file2
+
+# Remove file3 and the .glusterfs hardlink from 1st & 2nd bricks
+gfid_file3=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file3)
+gfid_str_file3=$(gf_gfid_xattr_to_str $gfid_file3)
+TEST rm $B0/$V0"0"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm $B0/$V0"1"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm -f $B0/$V0"0"/dir/file3
+TEST rm -f $B0/$V0"1"/dir/file3
+
+# Remove the .glusterfs hardlink and the gfid xattr of file4 on 3rd brick
+gfid_file4=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file4)
+gfid_str_file4=$(gf_gfid_xattr_to_str $gfid_file4)
+TEST rm $B0/$V0"2"/.glusterfs/${gfid_str_file4:0:2}/${gfid_str_file4:2:2}/$gfid_str_file4
+TEST setfattr -x trusted.gfid $B0/$V0"2"/dir/file4
+
+# B0 and B2 blame each other
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir on first brick.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# All the files must be present on all the bricks after conservative merge and
+# should have the gfid xattr and the .glusterfs hardlink.
+check_gfid_and_link_count dir/file1
+check_gfid_and_link_count dir/file2
+check_gfid_and_link_count dir/file3
+check_gfid_and_link_count dir/file4
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
new file mode 100644
index 00000000000..c1bdf34ee6d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
@@ -0,0 +1,111 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard enable
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+#Create split-brain by setting afr xattrs/gfids manually.
+#file1 is non-sharded and will be in data split-brain.
+#file2 will have one shard which will be in data split-brain.
+#file3 will have one shard which will be in gfid split-brain.
+#file4 will have one shard which will be in data & metadata split-brain.
+TEST dd if=/dev/zero of=$M0/file1 bs=1024 count=1024 oflag=direct
+TEST dd if=/dev/zero of=$M0/file2 bs=1M count=6 oflag=direct
+TEST dd if=/dev/zero of=$M0/file3 bs=1M count=6 oflag=direct
+TEST dd if=/dev/zero of=$M0/file4 bs=1M count=6 oflag=direct
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#-------------------------------------------------------------------------------
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/file1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/file1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/file1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/file1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/file1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/file1
+
+#-------------------------------------------------------------------------------
+gfid_f2=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file2))
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
+
+#-------------------------------------------------------------------------------
+TESTS_EXPECTED_IN_LOOP=5
+function assign_new_gfid {
+ brickpath=$1
+ filename=$2
+ gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/$filename))
+ gfid_shard=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/.shard/$gfid.1))
+
+ TEST rm $brickpath/.glusterfs/${gfid_shard:0:2}/${gfid_shard:2:2}/$gfid_shard
+ TEST setfattr -x trusted.gfid $brickpath/.shard/$gfid.1
+ new_gfid=$(get_random_gfid)
+ new_gfid_str=$(gf_gfid_xattr_to_str $new_gfid)
+ TEST setfattr -n trusted.gfid -v $new_gfid $brickpath/.shard/$gfid.1
+ TEST mkdir -p $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}
+ TEST ln $brickpath/.shard/$gfid.1 $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}/$new_gfid_str
+}
+assign_new_gfid $B0/$V0"1" file3
+assign_new_gfid $B0/$V0"2" file3
+
+#-------------------------------------------------------------------------------
+gfid_f4=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file4))
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
+
+#-------------------------------------------------------------------------------
+#Add entry to xattrop dir on first brick and check for split-brain.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+
+gfid_f1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f1
+
+gfid_f2_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f2.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f2_shard1
+
+gfid_f3=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file3))
+gfid_f3_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f3.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f3_shard1
+
+gfid_f4_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f4.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f4_shard1
+
+#-------------------------------------------------------------------------------
+#gfid split-brain won't show up in split-brain count.
+EXPECT "3" afr_get_split_brain_count $V0
+EXPECT_NOT "^0$" get_pending_heal_count $V0
+
+#Resolve split-brains
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /file1
+GFIDSTR="gfid:$gfid_f2_shard1"
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 $GFIDSTR
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f3.1
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f4.1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
new file mode 100644
index 00000000000..7e24eaec03d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup
+
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0..2}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch $M0/a
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/a
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime0=$(get_mtime $B0/brick0/a)
+mtime1=$(get_mtime $B0/brick1/a)
+TEST [ $mtime0 -eq $mtime1 ]
+
+ctime0=$(get_ctime $B0/brick0/a)
+ctime1=$(get_ctime $B0/brick1/a)
+TEST [ $ctime0 -eq $ctime1 ]
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+
+TEST touch $M0/b
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/b
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime2=$(get_mtime $B0/brick0/b)
+mtime3=$(get_mtime $B0/brick1/b)
+TEST [ $mtime2 -eq $mtime3 ]
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1801624-entry-heal.t b/tests/bugs/replicate/bug-1801624-entry-heal.t
new file mode 100644
index 00000000000..94b465181fa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1801624-entry-heal.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+TEST $CLI volume set $V0 heal-timeout 5
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Re-create the file when a brick is down.
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST rm $M0/FILE
+echo "New Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
+
+# Launching index heal must not reset parent dir afr xattrs or remove granular entry indices.
+$CLI volume heal $V0 # CLI will fail but heal is launched anyway.
+TEST sleep 5 # give index heal a chance to do one run.
+brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick0/)
+brick2_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick2/)
+TEST [ $brick0_pending -eq "000000000000000000000002" ]
+TEST [ $brick2_pending -eq "000000000000000000000002" ]
+EXPECT "FILE" ls $B0/brick0/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+EXPECT "FILE" ls $B0/brick2/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+$CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# No gfid-split-brain (i.e. EIO) must be seen. Try on fresh mount to avoid cached values.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST cat $M0/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-880898.t b/tests/bugs/replicate/bug-880898.t
index 123e7e16425..660d34ca25f 100644
--- a/tests/bugs/replicate/bug-880898.t
+++ b/tests/bugs/replicate/bug-880898.t
@@ -1,12 +1,19 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
pkill glusterfs
uuid=""
for line in $(cat $GLUSTERD_WORKDIR/glusterd.info)
diff --git a/tests/bugs/replicate/bug-977797.t b/tests/bugs/replicate/bug-977797.t
index c2c0e67ebff..9a8f36c956c 100755
--- a/tests/bugs/replicate/bug-977797.t
+++ b/tests/bugs/replicate/bug-977797.t
@@ -30,7 +30,7 @@ TEST $CLI volume set $V0 cluster.data-self-heal on
TEST $CLI volume set $V0 cluster.metadata-self-heal on
TEST $CLI volume set $V0 cluster.entry-self-heal on
-TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST mkdir -p $M0/a
@@ -77,7 +77,7 @@ afr_get_specific_changelog_xattr $B0/$V0"2"/a/file trusted.afr.$V0-client-1 "dat
EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
afr_get_specific_changelog_xattr $B0/$V0"1"/a trusted.afr.$V0-client-0 "entry"
-EXPECT_WITHIN HEAL_TIMEOUT "00000000" \
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
afr_get_specific_changelog_xattr $B0/$V0"1"/a trusted.afr.$V0-client-1 "entry"
EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
diff --git a/tests/bugs/replicate/issue-1254-prioritize-enospc.t b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
new file mode 100644
index 00000000000..fab94b71b27
--- /dev/null
+++ b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function create_bricks {
+ TEST truncate -s 100M $B0/brick0
+ TEST truncate -s 100M $B0/brick1
+ TEST truncate -s 20M $B0/brick2
+ LO1=`SETUP_LOOP $B0/brick0`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO1
+ LO2=`SETUP_LOOP $B0/brick1`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO2
+ LO3=`SETUP_LOOP $B0/brick2`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO3
+ TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+ TEST MOUNT_LOOP $LO1 $B0/${V0}0
+ TEST MOUNT_LOOP $LO2 $B0/${V0}1
+ TEST MOUNT_LOOP $LO3 $B0/${V0}2
+}
+
+function create_files {
+ local i=1
+ while (true)
+ do
+ touch $M0/file$i
+ if [ -e $B0/${V0}2/file$i ];
+ then
+ ((i++))
+ else
+ break
+ fi
+ done
+}
+
+TESTS_EXPECTED_IN_LOOP=13
+
+#Arbiter volume: Check for ENOSPC when arbiter brick becomes full#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((echo "Test" > $M0/file-3) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
+
+#Replica-3 volume: Check for ENOSPC when one of the brick becomes full#
+#Keeping the third brick of lower size to simulate disk full scenario#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((cat /dev/zero > $M0/file1) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
diff --git a/tests/bugs/replicate/mdata-heal-no-xattrs.t b/tests/bugs/replicate/mdata-heal-no-xattrs.t
new file mode 100644
index 00000000000..d3b0c504c80
--- /dev/null
+++ b/tests/bugs/replicate/mdata-heal-no-xattrs.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Change permission on brick-0: simulates the case where there is metadata
+# mismatch but no pending xattrs. This brick will become the source for heal.
+TEST chmod +x $B0/$V0"0"/FILE
+
+# Add gfid to xattrop
+xattrop_b0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_b0`
+gfid_str_FILE=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/FILE))
+TEST ln $xattrop_b0/$base_entry_b0 $xattrop_b0/$gfid_str_FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Brick-0 should contain xattrs blaming other 2 bricks.
+# The values will be zero because heal is over.
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/FILE
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}0/FILE
+
+# Brick-1 and Brick-2 must not contain any afr xattrs.
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+# check permission bits.
+EXPECT '755' stat -c %a $B0/${V0}0/FILE
+EXPECT '755' stat -c %a $B0/${V0}1/FILE
+EXPECT '755' stat -c %a $B0/${V0}2/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/ta-inode-refresh-read.t b/tests/bugs/replicate/ta-inode-refresh-read.t
new file mode 100644
index 00000000000..6dd6ff7f163
--- /dev/null
+++ b/tests/bugs/replicate/ta-inode-refresh-read.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# Test read transaction inode refresh logic for thin-arbiter.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../thin-arbiter.rc
+cleanup;
+TEST ta_create_brick_and_volfile brick0
+TEST ta_create_brick_and_volfile brick1
+TEST ta_create_ta_and_volfile ta
+TEST ta_start_brick_process brick0
+TEST ta_start_brick_process brick1
+TEST ta_start_ta_process ta
+
+TEST ta_create_mount_volfile brick0 brick1 ta
+# Set afr xlator options to choose brick0 as read-subvol.
+sed -i '/iam-self-heal-daemon/a \ option read-subvolume-index 0' $B0/mount.vol
+TEST [ $? -eq 0 ]
+sed -i '/iam-self-heal-daemon/a \ option choose-local false' $B0/mount.vol
+TEST [ $? -eq 0 ]
+
+TEST ta_start_mount_process $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "trusted.afr.patchy-ta-2" ls $B0/ta
+
+TEST touch $M0/FILE
+TEST ls $B0/brick0/FILE
+TEST ls $B0/brick1/FILE
+TEST ! ls $B0/ta/FILE
+TEST setfattr -n user.name -v ravi $M0/FILE
+
+# Remove gfid hardlink from brick0 which is the read-subvol for FILE.
+# This triggers inode refresh up on a getfattr and eventually calls
+# afr_ta_read_txn(). Without this patch, afr_ta_read_txn() will again query
+# brick0 causing getfattr to fail.
+TEST rm -f $(gf_get_gfid_backend_file_path $B0/brick0 FILE)
+TEST getfattr -n user.name $M0/FILE
+
+cleanup;
diff --git a/tests/bugs/rpc/bug-954057.t b/tests/bugs/rpc/bug-954057.t
index 65af274f09d..40acdc2fdc7 100755
--- a/tests/bugs/rpc/bug-954057.t
+++ b/tests/bugs/rpc/bug-954057.t
@@ -25,7 +25,15 @@ TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST mkdir $M0/dir
TEST mkdir $M0/nobody
-TEST chown nfsnobody:nfsnobody $M0/nobody
+grep nfsnobody /etc/passwd > /dev/null
+if [ $? -eq 1 ]; then
+usr=nobody
+grp=nobody
+else
+usr=nfsnobody
+grp=nfsnobody
+fi
+TEST chown $usr:$grp $M0/nobody
TEST `echo "file" >> $M0/file`
TEST cp $M0/file $M0/new
TEST chmod 700 $M0/new
diff --git a/tests/bugs/shard/bug-1272986.t b/tests/bugs/shard/bug-1272986.t
index 762887051fa..66e896ad0c4 100644
--- a/tests/bugs/shard/bug-1272986.t
+++ b/tests/bugs/shard/bug-1272986.t
@@ -16,16 +16,16 @@ TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1
# Write some data into a file, such that its size crosses the shard block size.
-TEST dd if=/dev/zero of=$M1/file bs=1M count=5 conv=notrunc
+TEST dd if=/dev/urandom of=$M1/file bs=1M count=5 conv=notrunc oflag=direct
md5sum1_reader=$(md5sum $M0/file | awk '{print $1}')
EXPECT "$md5sum1_reader" echo `md5sum $M1/file | awk '{print $1}'`
# Append some more data into the file.
-TEST `echo "abcdefg" >> $M1/file`
+TEST dd if=/dev/urandom of=$M1/file bs=256k count=1 conv=notrunc oflag=direct
-md5sum2_reader=$(md5sum $M0/file | awk '{print $1}')
+md5sum2_reader=$(dd if=$M0/file iflag=direct bs=256k| md5sum | awk '{print $1}')
# Test to see if the reader refreshes its cache correctly as part of the reads
# triggered through md5sum. If it does, then the md5sum on the reader and writer
diff --git a/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t b/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t
new file mode 100644
index 00000000000..3e4a65af19a
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136-lru-limit-equals-deletion-rate.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 120
+TEST $CLI volume set $V0 features.shard-deletion-rate 120
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST build_tester $(dirname $0)/bug-1696136.c -lgfapi -Wall -O2
+
+# Create a file
+TEST touch $M0/file1
+
+# Fallocate a 500M file. This will make sure number of participant shards are > lru-limit
+TEST $(dirname $0)/bug-1696136 $H0 $V0 "0" "0" "536870912" /file1 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+rm -f $(dirname $0)/bug-1696136
+
+cleanup
diff --git a/tests/bugs/shard/bug-1696136.c b/tests/bugs/shard/bug-1696136.c
new file mode 100644
index 00000000000..cb650535b09
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136.c
@@ -0,0 +1,122 @@
+#define _GNU_SOURCE
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <glusterfs/api/glfs.h>
+#include <glusterfs/api/glfs-handles.h>
+
+enum fallocate_flag {
+ TEST_FALLOCATE_NONE,
+ TEST_FALLOCATE_KEEP_SIZE,
+ TEST_FALLOCATE_ZERO_RANGE,
+ TEST_FALLOCATE_PUNCH_HOLE,
+ TEST_FALLOCATE_MAX,
+};
+
+int
+get_fallocate_flag(int opcode)
+{
+ int ret = 0;
+
+ switch (opcode) {
+ case TEST_FALLOCATE_NONE:
+ ret = 0;
+ break;
+ case TEST_FALLOCATE_KEEP_SIZE:
+ ret = FALLOC_FL_KEEP_SIZE;
+ break;
+ case TEST_FALLOCATE_ZERO_RANGE:
+ ret = FALLOC_FL_ZERO_RANGE;
+ break;
+ case TEST_FALLOCATE_PUNCH_HOLE:
+ ret = FALLOC_FL_PUNCH_HOLE;
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+ return ret;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret = 1;
+ int opcode = -1;
+ off_t offset = 0;
+ size_t len = 0;
+ glfs_t *fs = NULL;
+ glfs_fd_t *fd = NULL;
+
+ if (argc != 8) {
+ fprintf(stderr,
+ "Syntax: %s <host> <volname> <opcode> <offset> <len> "
+ "<file-path> <log-file>\n",
+ argv[0]);
+ return 1;
+ }
+
+ fs = glfs_new(argv[2]);
+ if (!fs) {
+ fprintf(stderr, "glfs_new: returned NULL\n");
+ return 1;
+ }
+
+ ret = glfs_set_volfile_server(fs, "tcp", argv[1], 24007);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_volfile_server: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_set_logging(fs, argv[7], 7);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_set_logging: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_init(fs);
+ if (ret != 0) {
+ fprintf(stderr, "glfs_init: returned %d\n", ret);
+ goto out;
+ }
+
+ opcode = atoi(argv[3]);
+ opcode = get_fallocate_flag(opcode);
+ if (opcode < 0) {
+ fprintf(stderr, "get_fallocate_flag: invalid flag \n");
+ goto out;
+ }
+
+ /* Note that off_t is signed but size_t isn't. */
+ offset = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
+
+ fd = glfs_open(fs, argv[6], O_RDWR);
+ if (fd == NULL) {
+ fprintf(stderr, "glfs_open: returned NULL\n");
+ goto out;
+ }
+
+ ret = glfs_fallocate(fd, opcode, offset, len);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_fallocate: returned %d\n", ret);
+ goto out;
+ }
+
+ ret = glfs_unlink(fs, argv[6]);
+ if (ret < 0) {
+ fprintf(stderr, "glfs_unlink: returned %d\n", ret);
+ goto out;
+ }
+ /* Sleep for 3s to give enough time for background deletion to complete
+ * during which if the bug exists, the process will crash.
+ */
+ sleep(3);
+ ret = 0;
+
+out:
+ if (fd)
+ glfs_close(fd);
+ glfs_fini(fs);
+ return ret;
+}
diff --git a/tests/bugs/shard/bug-1696136.t b/tests/bugs/shard/bug-1696136.t
new file mode 100644
index 00000000000..b6dc858f083
--- /dev/null
+++ b/tests/bugs/shard/bug-1696136.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 features.shard-lru-limit 120
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST build_tester $(dirname $0)/bug-1696136.c -lgfapi -Wall -O2
+
+# Create a file
+TEST touch $M0/file1
+
+# Fallocate a 500M file. This will make sure number of participant shards are > lru-limit
+TEST $(dirname $0)/bug-1696136 $H0 $V0 "0" "0" "536870912" /file1 `gluster --print-logdir`/glfs-$V0.log
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+rm -f $(dirname $0)/bug-1696136
+
+cleanup
diff --git a/tests/bugs/shard/bug-1705884.t b/tests/bugs/shard/bug-1705884.t
new file mode 100644
index 00000000000..f6e50376a58
--- /dev/null
+++ b/tests/bugs/shard/bug-1705884.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fallocate.rc
+
+cleanup
+
+require_fallocate -l 1m $M0/file
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST fallocate -l 200M $M0/foo
+EXPECT `echo "$(( ( 200 * 1024 * 1024 ) / 512 ))"` stat -c %b $M0/foo
+TEST truncate -s 0 $M0/foo
+EXPECT "0" stat -c %b $M0/foo
+TEST fallocate -l 100M $M0/foo
+EXPECT `echo "$(( ( 100 * 1024 * 1024 ) / 512 ))"` stat -c %b $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-1738419.t b/tests/bugs/shard/bug-1738419.t
new file mode 100644
index 00000000000..8d0a31d9754
--- /dev/null
+++ b/tests/bugs/shard/bug-1738419.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 network.remote-dio off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST dd if=/dev/zero of=$M0/metadata bs=501 count=1
+
+EXPECT "501" echo $("dd" if=$M0/metadata bs=4096 count=1 of=/dev/null iflag=direct 2>&1 | awk '/bytes/ {print $1}')
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/shard/bug-shard-discard.c b/tests/bugs/shard/bug-shard-discard.c
index 15dca6c2181..6fa93fb89d1 100644
--- a/tests/bugs/shard/bug-shard-discard.c
+++ b/tests/bugs/shard/bug-shard-discard.c
@@ -50,8 +50,9 @@ main(int argc, char *argv[])
goto out;
}
- off = atoi(argv[4]);
- len = atoi(argv[5]);
+ /* Note that off_t is signed but size_t isn't. */
+ off = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
ret = glfs_discard(fd, off, len);
if (ret <= 0) {
diff --git a/tests/bugs/shard/issue-1243.t b/tests/bugs/shard/issue-1243.t
new file mode 100644
index 00000000000..ba22d2b74fe
--- /dev/null
+++ b/tests/bugs/shard/issue-1243.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.strict-o-direct on
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST $CLI volume set $V0 md-cache-timeout 10
+
+# Write data into a file such that its size crosses shard-block-size
+TEST dd if=/dev/zero of=$M0/foo bs=1048576 count=8 oflag=direct
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Execute a setxattr on the file.
+TEST setfattr -n trusted.libvirt -v some-value $M0/foo
+
+# Size of the file should be the aggregated size, not the shard-block-size
+EXPECT '8388608' stat -c %s $M0/foo
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+# Execute a removexattr on the file.
+TEST setfattr -x trusted.libvirt $M0/foo
+
+# Size of the file should be the aggregated size, not the shard-block-size
+EXPECT '8388608' stat -c %s $M0/foo
+cleanup
diff --git a/tests/bugs/shard/issue-1281.t b/tests/bugs/shard/issue-1281.t
new file mode 100644
index 00000000000..9704caa8944
--- /dev/null
+++ b/tests/bugs/shard/issue-1281.t
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+#Open a file and store descriptor in fd = 5
+exec 5>$M0/foo
+
+#Unlink the same file which is opened in prev step
+TEST unlink $M0/foo
+
+#Write something on the file using the open fd = 5
+echo "issue-1281" >&5
+
+#Write on the descriptor should be succesful
+EXPECT 0 echo $?
+
+#Close the fd = 5
+exec 5>&-
+
+cleanup
diff --git a/tests/bugs/shard/issue-1425.t b/tests/bugs/shard/issue-1425.t
new file mode 100644
index 00000000000..bbe82c0e5b2
--- /dev/null
+++ b/tests/bugs/shard/issue-1425.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+FILE_COUNT_TIME=5
+
+function get_file_count {
+ ls $1* | wc -l
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST fallocate -l 20M $M0/foo
+gfid_new=$(get_gfid_string $M0/foo)
+
+# Check for the base shard
+TEST stat $M0/foo
+TEST stat $B0/${V0}0/foo
+
+# There should be 4 associated shards
+EXPECT_WITHIN $FILE_COUNT_TIME 4 get_file_count $B0/${V0}0/.shard/$gfid_new
+
+# There should be 1+4 shards and we expect 4 lookups less than on the build without this patch
+EXPECT "21" echo `$CLI volume profile $V0 info incremental | grep -w LOOKUP | awk '{print $8}'`
+
+# Delete the base shard and check shards get cleaned up
+TEST unlink $M0/foo
+
+TEST ! stat $M0/foo
+TEST ! stat $B0/${V0}0/foo
+
+# There should be no shards now
+EXPECT_WITHIN $FILE_COUNT_TIME 0 get_file_count $B0/${V0}0/.shard/$gfid_new
+cleanup
diff --git a/tests/bugs/shard/shard-fallocate.c b/tests/bugs/shard/shard-fallocate.c
index 3a784d3c02c..cb0714e8564 100644
--- a/tests/bugs/shard/shard-fallocate.c
+++ b/tests/bugs/shard/shard-fallocate.c
@@ -87,8 +87,9 @@ main(int argc, char *argv[])
goto out;
}
- offset = atoi(argv[4]);
- len = atoi(argv[5]);
+ /* Note that off_t is signed but size_t isn't. */
+ offset = strtol(argv[4], NULL, 10);
+ len = strtoul(argv[5], NULL, 10);
fd = glfs_open(fs, argv[6], O_RDWR);
if (fd == NULL) {
@@ -97,7 +98,7 @@ main(int argc, char *argv[])
}
ret = glfs_fallocate(fd, opcode, offset, len);
- if (ret <= 0) {
+ if (ret < 0) {
fprintf(stderr, "glfs_fallocate: returned %d\n", ret);
goto out;
}
diff --git a/tests/bugs/snapshot/bug-1109889.t b/tests/bugs/snapshot/bug-1109889.t
index 6b29cdd9eb1..5fdc7dc9506 100644
--- a/tests/bugs/snapshot/bug-1109889.t
+++ b/tests/bugs/snapshot/bug-1109889.t
@@ -19,9 +19,9 @@ TEST $CLI volume create $V0 $H0:$L1 $H0:$L2 $H0:$L3;
TEST $CLI volume start $V0;
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
-MOUNT_PID=`ps ax |grep "glusterfs --volfile-sever $H0 --volfile-id=$V0 $M0" | grep -v grep | awk '{print $1}' | head -1`
+MOUNT_PID=$(get_mount_process_pid $V0 $M0)
for i in {1..10} ; do echo "file" > $M0/file$i ; done
diff --git a/tests/bugs/snapshot/bug-1111041.t b/tests/bugs/snapshot/bug-1111041.t
index f771d64f2a3..efda9688d8b 100755
--- a/tests/bugs/snapshot/bug-1111041.t
+++ b/tests/bugs/snapshot/bug-1111041.t
@@ -11,6 +11,10 @@ function is_snapd_running {
$CLI volume status $1 | grep "Snapshot Daemon" | wc -l;
}
+function snapd_pid {
+ $CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'}
+}
+
TEST glusterd;
TEST pidof glusterd;
@@ -25,14 +29,12 @@ TEST $CLI volume set $V0 features.uss enable;
EXPECT "1" is_snapd_running $V0
-SNAPD_PID=$($CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'});
+SNAPD_PID=$(snapd_pid);
TEST [ $SNAPD_PID -gt 0 ]
kill -9 $SNAPD_PID
-SNAPD_PID=$($CLI volume status $V0 | grep "Snapshot Daemon" | awk {'print $8'});
-
-TEST [ $SNAPD_PID = 'N/A' ]
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "^N/A$" snapd_pid
cleanup ;
diff --git a/tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t b/tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t
deleted file mode 100644
index c536c8261e4..00000000000
--- a/tests/bugs/snapshot/bug-1140162-file-snapshot-features-encrypt-opts-validation.t
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-## Test case for BZ-1140160 Volume option set <vol> <file-snapshot> and
-## <features.encryption> <value> command input should validate correctly.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-## Start glusterd
-TEST glusterd;
-TEST pidof glusterd;
-
-## Lets create and start volume
-TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2};
-TEST $CLI volume start $V0
-
-## Set features.file-snapshot and features.encryption option with non-boolean
-## value. These options should fail.
-TEST ! $CLI volume set $V0 features.file-snapshot abcd
-TEST ! $CLI volume set $V0 features.encryption redhat
-
-## Set other options with valid value. These options should succeed.
-TEST $CLI volume set $V0 barrier enable
-TEST $CLI volume set $V0 ping-timeout 60
-
-## Set features.file-snapshot and features.encryption option with valid boolean
-## value. These options should succeed.
-TEST $CLI volume set $V0 features.file-snapshot on
-
-## Before setting the crypt xlator on, it is required to create master key
-## Otherwise glusterfs client process will fail to start
-echo "0000111122223333444455556666777788889999aaaabbbbccccddddeeeeffff" > $GLUSTERD_WORKDIR/$V0-master-key
-
-## Specify location of master key
-TEST $CLI volume set $V0 encryption.master-key $GLUSTERD_WORKDIR/$V0-master-key
-
-TEST $CLI volume set $V0 features.encryption on
-
-cleanup;
-#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
-#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t b/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
index f30194b6339..04a85db0c1a 100644
--- a/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
+++ b/tests/bugs/snapshot/bug-1482023-snpashot-issue-with-other-processes-accessing-mounted-path.t
@@ -130,3 +130,4 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" mounted_snaps ${V1}
cleanup;
# run first!
+#G_TESTDEF_TEST_STATUS_CENTOS6=BRICK_MUX_BAD_TEST,BUG=1743069
diff --git a/tests/bugs/snapshot/bug-1597662.t b/tests/bugs/snapshot/bug-1597662.t
index dc87d17a0ef..f582930476a 100644
--- a/tests/bugs/snapshot/bug-1597662.t
+++ b/tests/bugs/snapshot/bug-1597662.t
@@ -34,12 +34,13 @@ function is_snap_path
EXPECT "1" is_snap_path
$CLI snapshot deactivate snap1;
-
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} 'Stopped' snapshot_status snap1
# snap is deactivated so snap_path should not exist
EXPECT "0" is_snap_path
# activate snap again
$CLI snapshot activate snap1;
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} 'Started' snapshot_status snap1
# snap is active so snap_path should exist
EXPECT "1" is_snap_path
diff --git a/tests/bugs/transport/bug-873367.t b/tests/bugs/transport/bug-873367.t
index d4c07024ed0..8070bc1b83c 100755
--- a/tests/bugs/transport/bug-873367.t
+++ b/tests/bugs/transport/bug-873367.t
@@ -13,7 +13,7 @@ rm -f $SSL_BASE/glusterfs.*
mkdir -p $B0/1
mkdir -p $M0
-TEST openssl genrsa -out $SSL_KEY 1024
+TEST openssl genrsa -out $SSL_KEY 2048
TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
ln $SSL_CERT $SSL_CA
diff --git a/tests/bugs/write-behind/issue-884.c b/tests/bugs/write-behind/issue-884.c
new file mode 100644
index 00000000000..e9c33b351ad
--- /dev/null
+++ b/tests/bugs/write-behind/issue-884.c
@@ -0,0 +1,267 @@
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <assert.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <pthread.h>
+
+#include <glusterfs/api/glfs.h>
+
+/* Based on a reproducer by Stefan Ring. It seems to be quite sensible to any
+ * timing modification, so the code has been maintained as is, only with minor
+ * changes. */
+
+struct glfs *glfs;
+
+pthread_mutex_t the_mutex = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t the_cond = PTHREAD_COND_INITIALIZER;
+
+typedef struct _my_aiocb {
+ int64_t size;
+ volatile int64_t seq;
+ int which;
+} my_aiocb;
+
+typedef struct _worker_data {
+ my_aiocb cb;
+ struct iovec iov;
+ int64_t offset;
+} worker_data;
+
+typedef struct {
+ worker_data wdata[2];
+
+ volatile unsigned busy;
+} all_data_t;
+
+all_data_t all_data;
+
+static void
+completion_fnc(struct glfs_fd *fd, ssize_t ret, struct glfs_stat *pre,
+ struct glfs_stat *post, void *arg)
+{
+ void *the_thread;
+ my_aiocb *cb = (my_aiocb *)arg;
+ long seq = cb->seq;
+
+ assert(ret == cb->size);
+
+ pthread_mutex_lock(&the_mutex);
+ pthread_cond_broadcast(&the_cond);
+
+ all_data.busy &= ~(1 << cb->which);
+ cb->seq = -1;
+
+ the_thread = (void *)pthread_self();
+ printf("worker %d is done from thread %p, seq %ld!\n", cb->which,
+ the_thread, seq);
+
+ pthread_mutex_unlock(&the_mutex);
+}
+
+static void
+init_wdata(worker_data *data, int which)
+{
+ data->cb.which = which;
+ data->cb.seq = -1;
+
+ data->iov.iov_base = malloc(1024 * 1024);
+ memset(data->iov.iov_base, 6,
+ 1024 * 1024); /* tail part never overwritten */
+}
+
+static void
+init()
+{
+ all_data.busy = 0;
+
+ init_wdata(&all_data.wdata[0], 0);
+ init_wdata(&all_data.wdata[1], 1);
+}
+
+static void
+do_write(struct glfs_fd *fd, int content, int size, int64_t seq,
+ worker_data *wdata, const char *name)
+{
+ int ret;
+
+ wdata->cb.size = size;
+ wdata->cb.seq = seq;
+
+ if (content >= 0)
+ memset(wdata->iov.iov_base, content, size);
+ wdata->iov.iov_len = size;
+
+ pthread_mutex_lock(&the_mutex);
+ printf("(%d) dispatching write \"%s\", offset %lx, len %x, seq %ld\n",
+ wdata->cb.which, name, (long)wdata->offset, size, (long)seq);
+ pthread_mutex_unlock(&the_mutex);
+ ret = glfs_pwritev_async(fd, &wdata->iov, 1, wdata->offset, 0,
+ completion_fnc, &wdata->cb);
+ assert(ret >= 0);
+}
+
+#define IDLE 0 // both workers must be idle
+#define ANY 1 // use any worker, other one may be busy
+
+int
+get_worker(int waitfor, int64_t excl_seq)
+{
+ int which;
+
+ pthread_mutex_lock(&the_mutex);
+
+ while (waitfor == IDLE && (all_data.busy & 3) != 0 ||
+ waitfor == ANY &&
+ ((all_data.busy & 3) == 3 ||
+ excl_seq >= 0 && (all_data.wdata[0].cb.seq == excl_seq ||
+ all_data.wdata[1].cb.seq == excl_seq)))
+ pthread_cond_wait(&the_cond, &the_mutex);
+
+ if (!(all_data.busy & 1))
+ which = 0;
+ else
+ which = 1;
+
+ all_data.busy |= (1 << which);
+
+ pthread_mutex_unlock(&the_mutex);
+
+ return which;
+}
+
+static int
+doit(struct glfs_fd *fd)
+{
+ int ret;
+ int64_t seq = 0;
+ int64_t offset = 0; // position in file, in blocks
+ int64_t base = 0x1000; // where to place the data, in blocks
+
+ int async_mode = ANY;
+
+ init();
+
+ for (;;) {
+ int which;
+ worker_data *wdata;
+
+ // for growing to the first offset
+ for (;;) {
+ int gap = base + 0x42 - offset;
+ if (!gap)
+ break;
+ if (gap > 80)
+ gap = 80;
+
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = offset << 9;
+ do_write(fd, 0, gap << 9, seq++, wdata, "gap-filling");
+
+ offset += gap;
+ }
+
+ // 8700
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x42) << 9;
+ do_write(fd, 1, 62 << 9, seq++, wdata, "!8700");
+
+ // 8701
+ which = get_worker(IDLE, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x42) << 9;
+ do_write(fd, 2, 55 << 9, seq++, wdata, "!8701");
+
+ // 8702
+ which = get_worker(async_mode, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0x79) << 9;
+ do_write(fd, 3, 54 << 9, seq++, wdata, "!8702");
+
+ // 8703
+ which = get_worker(async_mode, -1);
+ wdata = &all_data.wdata[which];
+
+ wdata->offset = (base + 0xaf) << 9;
+ do_write(fd, 4, 81 << 9, seq++, wdata, "!8703");
+
+ // 8704
+ // this writes both 5s and 6s
+ // the range of 5s is the one that overwrites 8703
+
+ which = get_worker(async_mode, seq - 1);
+ wdata = &all_data.wdata[which];
+
+ memset(wdata->iov.iov_base, 5, 81 << 9);
+ wdata->offset = (base + 0xaf) << 9;
+ do_write(fd, -1, 1623 << 9, seq++, wdata, "!8704");
+
+ offset = base + 0x706;
+ base += 0x1000;
+ if (base >= 0x100000)
+ break;
+ }
+
+ printf("done!\n");
+ fflush(stdout);
+
+ pthread_mutex_lock(&the_mutex);
+
+ while ((all_data.busy & 3) != 0)
+ pthread_cond_wait(&the_cond, &the_mutex);
+
+ pthread_mutex_unlock(&the_mutex);
+
+ ret = glfs_close(fd);
+ assert(ret >= 0);
+ /*
+ ret = glfs_fini(glfs);
+ assert(ret >= 0);
+ */
+ return 0;
+}
+
+int
+main(int argc, char *argv[])
+{
+ int ret;
+ int open_flags = O_RDWR | O_DIRECT | O_TRUNC;
+ struct glfs_fd *fd;
+
+ glfs = glfs_new(argv[1]);
+ if (!glfs) {
+ printf("glfs_new!\n");
+ goto out;
+ }
+ ret = glfs_set_volfile_server(glfs, "tcp", "localhost", 24007);
+ if (ret < 0) {
+ printf("set_volfile!\n");
+ goto out;
+ }
+ ret = glfs_init(glfs);
+ if (ret) {
+ printf("init!\n");
+ goto out;
+ }
+ fd = glfs_open(glfs, argv[2], open_flags);
+ if (!fd) {
+ printf("open!\n");
+ goto out;
+ }
+ srand(time(NULL));
+ return doit(fd);
+out:
+ return 1;
+}
diff --git a/tests/bugs/write-behind/issue-884.t b/tests/bugs/write-behind/issue-884.t
new file mode 100755
index 00000000000..2bcf7d15265
--- /dev/null
+++ b/tests/bugs/write-behind/issue-884.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+# This test tries to detect a race condition in write-behind. It's based on a
+# reproducer written by Stefan Ring that is able to hit it sometimes. On my
+# system, it happened around 10% of the runs. This means that if this bug
+# appears again, this test will fail once every 10 runs. Most probably this
+# failure will be hidden by the automatic test retry of the testing framework.
+#
+# Please, if this test fails, it needs to be analyzed in detail.
+
+function run() {
+ "${@}" >/dev/null
+}
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/$V0
+# This makes it easier to hit the issue
+TEST $CLI volume set $V0 client-log-level TRACE
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+build_tester $(dirname $0)/issue-884.c -lgfapi
+
+TEST touch $M0/testfile
+
+# This program generates a file of 535694336 bytes with a fixed pattern
+TEST run $(dirname $0)/issue-884 $V0 testfile
+
+# This is the md5sum of the expected pattern without corruption
+EXPECT "ad105f9349345a70fc697632cbb5eec8" echo "$(md5sum $B0/$V0/testfile | awk '{ print $1; }')"
+
+cleanup
diff --git a/tests/cluster.rc b/tests/cluster.rc
index 99be8e79c21..34f5b02398f 100644
--- a/tests/cluster.rc
+++ b/tests/cluster.rc
@@ -11,7 +11,7 @@ function launch_cluster() {
define_backends $count;
define_hosts $count;
define_glusterds $count $2;
- define_clis $count;
+ define_clis $count $3;
start_glusterds;
}
@@ -50,15 +50,16 @@ function define_glusterds() {
sopt="management.glusterd-sockfile=${!b}/glusterd/gd.sock"
#Get the logdir
logdir=`gluster --print-logdir`
+ clopt="management.cluster-test-mode=${logdir}/$i";
#Fetch the testcases name and prefix the glusterd log with it
logfile=`echo ${0##*/}`_glusterd$i.log
- lopt="--log-file=$logdir/$logfile"
+ lopt="--log-file=$logdir/$i/$logfile"
if [ "$2" == "-LDEBUG" ]; then
- eval "glusterd_$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt $lopt $popt'";
- eval "glusterd$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt $lopt $popt'";
+ eval "glusterd_$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt --xlator-option $clopt $lopt $popt'";
+ eval "glusterd$i='glusterd -LDEBUG --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt --xlator-option $clopt $lopt $popt'";
else
- eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt $lopt $popt'";
- eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt $lopt $popt'";
+ eval "glusterd_$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt --xlator-option $clopt $lopt $popt'";
+ eval "glusterd$i='glusterd --xlator-option $wopt --xlator-option $bopt --xlator-option $ropt --xlator-option $sopt --xlator-option $clopt $lopt $popt'";
fi
done
}
@@ -89,6 +90,20 @@ function kill_glusterd() {
kill `cat $pidfile`;
}
+function restart_glusterd() {
+ local index=$1
+ local b
+ local pidfile
+ local g
+
+ b="B$index"
+ pidfile="${!b}/glusterd.pid"
+
+ kill `cat $pidfile`
+
+ g="glusterd_${index}"
+ ${!g}
+}
function kill_node() {
local index=$1;
@@ -133,8 +148,13 @@ function define_clis() {
lopt1="--log-file=$logdir/$logfile1"
- eval "CLI_$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
- eval "CLI$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
+ if [ "$2" == "-NO_FORCE" ]; then
+ eval "CLI_$i='$CLI_NO_FORCE --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
+ eval "CLI$i='$CLI_NO_FORCE --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
+ else
+ eval "CLI_$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt'";
+ eval "CLI$i='$CLI --glusterd-sock=${!b}/glusterd/gd.sock $lopt1'";
+ fi
done
}
@@ -191,3 +211,8 @@ function cluster_brick_up_status {
eval \$CLI_$1 volume status $vol $host:$brick --xml | sed -ne 's/.*<status>\([01]\)<\/status>/\1/p'
}
+function cluster_remove_brick_status_completed_field {
+ local vol=$1
+ local brick_list=$2
+ $CLI_1 volume remove-brick $vol $brick_list status | awk '{print $7}' | sed -n 3p
+}
diff --git a/tests/ec.rc b/tests/ec.rc
index 04405ecb829..f18752fc99a 100644
--- a/tests/ec.rc
+++ b/tests/ec.rc
@@ -7,3 +7,12 @@ function ec_up_status()
local ec_id=$3
grep -E "^up =" $m/.meta/graphs/active/${v}-disperse-${ec_id}/private | cut -f2 -d'='
}
+
+function ec_option_value()
+{
+ local v=$1
+ local m=$2
+ local ec_id=$3
+ local opt=$4
+ grep -E "^$opt =" $m/.meta/graphs/active/${v}-disperse-${ec_id}/private | cut -f2 -d'='| awk '{print $1}'
+}
diff --git a/tests/encryption/crypt.t b/tests/encryption/crypt.t
deleted file mode 100755
index 2f965b0e8b7..00000000000
--- a/tests/encryption/crypt.t
+++ /dev/null
@@ -1,90 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../include.rc
-. $(dirname $0)/../volume.rc
-
-cleanup;
-
-TEST glusterd
-TEST pidof glusterd
-
-## Create a volume with one brick
-TEST $CLI volume create $V0 $H0:$B0/${V0}1;
-EXPECT "$V0" volinfo_field $V0 'Volume Name';
-EXPECT 'Created' volinfo_field $V0 'Status';
-EXPECT '1' brick_count $V0
-
-## Turn off performance translators
-
-TEST $CLI volume set $V0 performance.quick-read off
-EXPECT 'off' volinfo_field $V0 'performance.quick-read'
-TEST $CLI volume set $V0 performance.write-behind off
-EXPECT 'off' volinfo_field $V0 'performance.write-behind'
-TEST $CLI volume set $V0 performance.open-behind off
-EXPECT 'off' volinfo_field $V0 'performance.open-behind'
-
-## Create a file with master key
-
-echo "0000111122223333444455556666777788889999aaaabbbbccccddddeeeeffff" > $GLUSTERD_WORKDIR/$V0-master-key
-
-## Specify location of master key
-TEST $CLI volume set $V0 encryption.master-key $GLUSTERD_WORKDIR/$V0-master-key
-
-## Turn on crypt xlator by setting features.encryption to on
-TEST $CLI volume set $V0 encryption on
-EXPECT 'on' volinfo_field $V0 'features.encryption'
-
-## Start the volume
-TEST $CLI volume start $V0;
-EXPECT 'Started' volinfo_field $V0 'Status';
-
-## Mount the volume
-TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
-
-## Testing writev, readv, ftruncate:
-## Create fragmented files and compare them with the reference files
-
-build_tester $(dirname $0)/frag.c
-TEST $(dirname $0)/frag $M0/testfile /tmp/$V0-goodfile 262144 500
-
-## Testing link, unlink, symlink, rename
-
-TEST ln $M0/testfile $M0/testfile-link
-TEST mv $M0/testfile $M0/testfile-renamed
-TEST ln -s $M0/testfile-link $M0/testfile-symlink
-TEST rm -f $M0/testfile-renamed
-
-## Remount the volume
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0;
-
-TEST diff -u $M0/testfile-symlink /tmp/$V0-goodfile
-EXPECT ''
-
-TEST rm -f $M0/testfile-symlink
-TEST rm -f $M0/testfile-link
-
-## Cleanup files
-
-TEST rm -f /tmp/$V0-master-key
-TEST rm -f /tmp/$V0-goodfile
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-
-## Reset crypt options
-TEST $CLI volume reset $V0 encryption.block-size
-TEST $CLI volume reset $V0 encryption.data-key-size
-
-## Stop the volume
-TEST $CLI volume stop $V0;
-EXPECT 'Stopped' volinfo_field $V0 'Status';
-
-## Delete the volume
-TEST $CLI volume delete $V0;
-TEST ! $CLI volume info $V0;
-
-TEST rm -rf $(dirname $0)/frag
-cleanup;
-
-#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
-#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/encryption/frag.c b/tests/encryption/frag.c
deleted file mode 100644
index 7b4510f6a4d..00000000000
--- a/tests/encryption/frag.c
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- Copyright (c) 2008-2013 Red Hat, Inc. <http://www.redhat.com>
- This file is part of GlusterFS.
-
- This file is licensed to you under your choice of the GNU Lesser
- General Public License, version 3 or any later version (LGPLv3 or
- later), or the GNU General Public License, version 2 (GPLv2), in all
- cases as published by the Free Software Foundation.
-*/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-#include <fcntl.h>
-
-#define MAX_NUM_OPS (1 << 20)
-#define MAX_FILE_SIZE (1 << 30)
-
-typedef enum { READ_OP, WRITE_OP, TRUNC_OP, LAST_OP } frag_op;
-
-struct frag_ctx {
- int test_fd;
- int good_fd;
- char *test_buf;
- char *good_buf;
- char *content;
- int max_file_size;
-};
-
-typedef int (*frag_op_t)(struct frag_ctx *ctx, off_t offset, size_t count);
-
-static int
-doread(int fd, off_t offset, size_t count, char *buf, int max_file_size)
-{
- int ret = 0;
- int was_read = 0;
-
- if (lseek(fd, offset, SEEK_SET) == -1) {
- perror("lseek failed");
- return -1;
- }
- while (count) {
- ret = read(fd, buf + offset + was_read, count);
- if (ret < 0)
- return -1;
- if (ret == 0)
- break;
- if (ret > count) {
- fprintf(stderr, "READ: read more than asked\n");
- return -1;
- }
- count -= ret;
- was_read += ret;
- }
- return ret;
-}
-
-static int
-dowrite(int fd, off_t offset, size_t count, char *buf)
-{
- int ret;
-
- ret = lseek(fd, offset, SEEK_SET);
- if (ret == -1)
- return ret;
- return write(fd, buf, count);
-}
-
-static int
-dotrunc(int fd, off_t offset)
-{
- int ret;
-
- ret = ftruncate(fd, offset);
- if (ret == -1)
- perror("truncate failed");
- return ret;
-}
-
-static int
-prepare_file(char *filename, int *fd, char **buf, int max_file_size)
-{
- int ret;
-
- *buf = malloc(max_file_size);
- if (*buf == NULL) {
- perror("malloc failed");
- return -1;
- }
- *fd = open(filename, O_CREAT | O_RDWR, S_IRWXU);
- if (*fd == -1) {
- perror("open failed");
- free(*buf);
- *buf = NULL;
- return -1;
- }
- return 0;
-}
-
-/*
- * @offset, @count: random values from [0, max_file_size - 1]
- */
-static int
-frag_write(struct frag_ctx *ctx, off_t offset, size_t count)
-{
- int ret;
- struct stat test_stbuf;
- struct stat good_stbuf;
-
- if (offset + count > ctx->max_file_size)
- offset = offset / 2;
- if (offset + count > ctx->max_file_size)
- count = count / 2;
-
- if (fstat(ctx->test_fd, &test_stbuf)) {
- fprintf(stderr, "WRITE: fstat of test file failed\n");
- return -1;
- }
- if (offset > test_stbuf.st_size)
- printf("writing hole\n");
-
- ret = dowrite(ctx->test_fd, offset, count, ctx->content);
- if (ret < 0 || ret != count) {
- fprintf(stderr, "WRITE: failed to write test file\n");
- return -1;
- }
- ret = dowrite(ctx->good_fd, offset, count, ctx->content);
- if (ret < 0 || ret != count) {
- fprintf(stderr, "WRITE: failed to write test file\n");
- return -1;
- }
- if (fstat(ctx->test_fd, &test_stbuf)) {
- fprintf(stderr, "WRITE: fstat of test file failed\n");
- return -1;
- }
- if (fstat(ctx->good_fd, &good_stbuf)) {
- fprintf(stderr, "WRITE: fstat of good file failed\n");
- return -1;
- }
- if (test_stbuf.st_size != good_stbuf.st_size) {
- fprintf(stderr, "READ: Bad file size %d (expected %d)\n",
- (int)test_stbuf.st_size, (int)good_stbuf.st_size);
- return -1;
- }
- return 0;
-}
-
-/*
- * @offset, @count: random values from [0, max_file_size - 1]
- */
-static int
-frag_read(struct frag_ctx *ctx, off_t offset, size_t count)
-{
- ssize_t test_ret;
- ssize_t good_ret;
-
- test_ret = doread(ctx->test_fd, offset, count, ctx->test_buf,
- ctx->max_file_size);
- if (test_ret < 0) {
- fprintf(stderr, "READ: failed to read test file\n");
- return -1;
- }
- good_ret = doread(ctx->good_fd, offset, count, ctx->good_buf,
- ctx->max_file_size);
- if (good_ret < 0) {
- fprintf(stderr, "READ: failed to read good file\n");
- return -1;
- }
- if (test_ret != good_ret) {
- fprintf(stderr, "READ: Bad return value %d (expected %d\n)", test_ret,
- good_ret);
- return -1;
- }
- if (memcmp(ctx->test_buf + offset, ctx->good_buf + offset, good_ret)) {
- fprintf(stderr, "READ: bad data\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * @offset: random value from [0, max_file_size - 1]
- */
-static int
-frag_truncate(struct frag_ctx *ctx, off_t offset,
- __attribute__((unused)) size_t count)
-{
- int ret;
- struct stat test_stbuf;
- struct stat good_stbuf;
-
- if (fstat(ctx->test_fd, &test_stbuf)) {
- fprintf(stderr, "TRUNCATE: fstat of test file failed\n");
- return -1;
- }
- if (offset > test_stbuf.st_size)
- printf("expanding truncate to %d\n", offset);
- else if (offset < test_stbuf.st_size)
- printf("shrinking truncate to %d\n", offset);
- else
- printf("trivial truncate\n");
-
- ret = dotrunc(ctx->test_fd, offset);
- if (ret == -1) {
- fprintf(stderr, "TRUNCATE: failed for test file\n");
- return -1;
- }
- ret = dotrunc(ctx->good_fd, offset);
- if (ret == -1) {
- fprintf(stderr, "TRUNCATE: failed for good file\n");
- return -1;
- }
- if (fstat(ctx->test_fd, &test_stbuf)) {
- fprintf(stderr, "TRUNCATE: fstat of test file failed\n");
- return -1;
- }
- if (fstat(ctx->good_fd, &good_stbuf)) {
- fprintf(stderr, "TRUNCATE: fstat of good file failed\n");
- return -1;
- }
- if (test_stbuf.st_size != good_stbuf.st_size) {
- fprintf(stderr, "TRUNCATE: bad test file size %d (expected %d)\n",
- test_stbuf.st_size, good_stbuf.st_size);
- return -1;
- }
- return 0;
-}
-
-frag_op_t frag_ops[LAST_OP] = {[READ_OP] = frag_read,
- [WRITE_OP] = frag_write,
- [TRUNC_OP] = frag_truncate};
-
-static void
-put_ctx(struct frag_ctx *ctx)
-{
- if (ctx->test_buf)
- free(ctx->test_buf);
- if (ctx->good_buf)
- free(ctx->good_buf);
- if (ctx->content)
- free(ctx->content);
-}
-
-main(int argc, char *argv[])
-{
- int i;
- int ret = 0;
- struct frag_ctx ctx;
- char *test_filename = NULL;
- char *good_filename = NULL;
- int num_ops;
- int max_file_size;
-
- memset(&ctx, 0, sizeof(ctx));
- if (argc != 5) {
- fprintf(stderr,
- "usage: %s <test-file-name> <good-file-name> <max-file-size> "
- "<number-of-operations>\n",
- argv[0]);
- ret = -1;
- goto exit;
- }
- test_filename = argv[1];
- good_filename = argv[2];
- max_file_size = atoi(argv[3]);
- if (max_file_size > MAX_FILE_SIZE)
- max_file_size = MAX_FILE_SIZE;
- num_ops = atoi(argv[4]);
- if (num_ops > MAX_NUM_OPS)
- num_ops = MAX_NUM_OPS;
-
- ret = prepare_file(test_filename, &ctx.test_fd, &ctx.test_buf,
- max_file_size);
- if (ret)
- goto exit;
- ret = prepare_file(good_filename, &ctx.good_fd, &ctx.good_buf,
- max_file_size);
- if (ret) {
- if (close(ctx.test_fd) == -1)
- perror("close test_buf failed");
- goto exit;
- }
- ctx.content = malloc(max_file_size);
- if (!ctx.content) {
- perror("malloc failed");
- goto close;
- }
- ctx.max_file_size = max_file_size;
- for (i = 0; i < max_file_size; i++)
- ctx.content[i] = random() % 256;
-
- for (i = 0; i < num_ops; i++) {
- ret = frag_ops[random() % LAST_OP](
- &ctx, random() % max_file_size, /* offset */
- random() % max_file_size /* count */);
- if (ret)
- break;
- }
-close:
- if (close(ctx.test_fd) == -1)
- perror("close test_fd failed");
- if (close(ctx.good_fd) == -1)
- perror("close good_fd failed");
-exit:
- put_ctx(&ctx);
- if (ret)
- exit(1);
- exit(0);
-}
-
-/*
- Local variables:
- c-indentation-style: "K&R"
- mode-name: "LC"
- c-basic-offset: 8
- tab-width: 8
- fill-column: 80
- scroll-step: 1
- End:
-*/
diff --git a/tests/env.rc.in b/tests/env.rc.in
index c7472a7988d..0478d66aec6 100644
--- a/tests/env.rc.in
+++ b/tests/env.rc.in
@@ -2,7 +2,7 @@ prefix=@prefix@
exec_prefix=@exec_prefix@
libdir=@libdir@
-PATH=@sbindir@:$PATH
+PATH=@bindir@:@sbindir@:$PATH
export PATH
GLUSTERD_PIDFILEDIR=@localstatedir@/run/gluster
diff --git a/tests/features/delay-gen.t b/tests/features/delay-gen.t
index 712b5b60065..72e6dbb7697 100755
--- a/tests/features/delay-gen.t
+++ b/tests/features/delay-gen.t
@@ -36,5 +36,17 @@ create_max_latency=$($CLI volume profile $V0 info | grep CREATE | awk 'BEGIN {ma
TEST [ ! -z $write_max_latency ];
TEST [ -z $create_max_latency ];
+# Not providing a particular fop will make it test everything
+TEST $CLI volume reset $V0 delay-gen.enable
+TEST $CLI volume set $V0 delay-gen.delay-duration 100
+
+cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+build_tester ./glfsxmp.c -lgfapi
+./glfsxmp $V0 $H0 >/dev/null
+cleanup_tester ./glfsxmp
+rm ./glfsxmp.c
+
+$(dirname $0)/../basic/rpc-coverage.sh $M0 >/dev/null
+
cleanup;
#G_TESTDEF_TEST_STATUS_NETBSD7=1501397
diff --git a/tests/features/flock_interrupt.t b/tests/features/flock_interrupt.t
index 8603b656c24..b8717e30dfb 100644
--- a/tests/features/flock_interrupt.t
+++ b/tests/features/flock_interrupt.t
@@ -22,12 +22,11 @@ EXPECT 'Started' volinfo_field $V0 'Status';
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
TEST touch $M0/testfile;
-function flock_interrupt {
- flock $MO/testfile sleep 3 & flock -w 1 $M0/testfile true;
- echo ok;
-}
+echo > got_lock
+flock $M0/testfile sleep 6 & { sleep 0.3; flock -w 2 $M0/testfile true; echo ok > got_lock; } &
-EXPECT_WITHIN 2 ok flock_interrupt;
+EXPECT_WITHIN 4 ok cat got_lock;
## Finish up
+rm -f got_lock;
cleanup;
diff --git a/tests/features/fuse-lru-limit.t b/tests/features/fuse-lru-limit.t
index 9f1211660ce..dd6be2d5397 100644
--- a/tests/features/fuse-lru-limit.t
+++ b/tests/features/fuse-lru-limit.t
@@ -10,6 +10,7 @@ TEST pidof glusterd
TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
TEST $CLI volume start $V0
TEST glusterfs -s $H0 --volfile-id $V0 $M0
+EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "2" online_brick_count
EXPECT "1" get_mount_active_size_value $V0 $M0
EXPECT "0" get_mount_lru_size_value $V0 $M0
diff --git a/tests/features/interrupt.t b/tests/features/interrupt.t
index bd70ff87545..067eb1b7486 100644
--- a/tests/features/interrupt.t
+++ b/tests/features/interrupt.t
@@ -43,18 +43,22 @@ function test_interrupt {
# If the test helper fails (which is considered a setup error, not failure of the test
# case itself), kill will be invoked without argument, and that will be the actual
# error which is caught.
- TEST "./$(dirname $0)/open_and_sleep $M0/testfile | { sleep 0.1; xargs -n1 kill -INT; }"
+ TEST "./$(dirname $0)/open_and_sleep $M0/testfile-$handlebool | { sleep 0.1; xargs -n1 kill -INT; }"
TEST "grep -E '$logpattern' $log_file"
# Basic sanity check, making sure filesystem has not crashed.
- TEST test -f $M0/testfile
+ TEST test -f $M0/testfile-$handlebool
}
# Theoretically FLUSH might finish before INTERRUPT is handled,
-# in which case we'd get the "no handler found" message (but it's unlikely).
-test_interrupt yes 'FLUSH.*interrupt handler triggered|INTERRUPT.*no handler found'
+# in which case we'd get the "no handler found" message instead of
+# "interrupt handler triggered" (but it's unlikely).
+# If that's observed, the pattern can be changed to
+# 'FLUSH.*interrupt handler triggered|[I]NTERRUPT.*no handler found'
+# to fix the test.
+test_interrupt yes '[F]LUSH.*interrupt handler triggered'
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-test_interrupt no 'INTERRUPT.*no handler found'
+test_interrupt no '[I]NTERRUPT.*no handler found'
## Finish up
TEST $CLI volume stop $V0;
diff --git a/tests/features/ssl-authz.t b/tests/features/ssl-authz.t
index 3cb45b5e582..497083e5a3a 100755
--- a/tests/features/ssl-authz.t
+++ b/tests/features/ssl-authz.t
@@ -25,6 +25,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
+TEST $CLI v set all cluster.brick-multiplex on
# Construct a cipher list that excludes CBC because of POODLE.
# http://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2014-3566
#
@@ -41,16 +42,16 @@ function valid_ciphers {
-e '/:$/s///'
}
-TEST openssl genrsa -out $SSL_KEY 1024
+TEST openssl genrsa -out $SSL_KEY 2048
TEST openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
ln $SSL_CERT $SSL_CA
-TEST $CLI volume create $V0 $H0:$B0/1
+TEST $CLI volume create $V0 replica 3 $H0:$B0/{1,2,3} force
TEST $CLI volume set $V0 server.ssl on
TEST $CLI volume set $V0 client.ssl on
TEST $CLI volume set $V0 ssl.cipher-list $(valid_ciphers)
TEST $CLI volume start $V0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" online_brick_count
# This mount should SUCCEED because ssl-allow=* by default. This effectively
# disables SSL authorization, though authentication and encryption might still
@@ -59,11 +60,28 @@ TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
TEST ping_file $M0/before
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+glusterfsd_pid=`pgrep glusterfsd`
+TEST [ $glusterfsd_pid != 0 ]
+start=`pmap -x $glusterfsd_pid | grep total | awk -F " " '{print $4}'`
+echo "Memory consumption for glusterfsd process"
+for i in $(seq 1 100); do
+ gluster v heal $V0 info >/dev/null
+done
+#Wait to cleanup memory
+sleep 10
+end=`pmap -x $glusterfsd_pid | grep total | awk -F " " '{print $4}'`
+diff=$((end-start))
+
+# If memory consumption is more than 15M some leak in SSL code path
+
+TEST [ $diff -lt 15000 ]
+
+
# Set ssl-allow to a wildcard that includes our identity.
TEST $CLI volume stop $V0
TEST $CLI volume set $V0 auth.ssl-allow Any*
TEST $CLI volume start $V0
-EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "3" online_brick_count
# This mount should SUCCEED because we match the wildcard.
TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0
diff --git a/tests/features/ssl-ciphers.t b/tests/features/ssl-ciphers.t
index 563d37c5277..b70fe360e02 100644
--- a/tests/features/ssl-ciphers.t
+++ b/tests/features/ssl-ciphers.t
@@ -33,18 +33,26 @@ wait_mount() {
openssl_connect() {
ssl_opt="-verify 3 -verify_return_error -CAfile $SSL_CA"
ssl_opt="$ssl_opt -crl_check_all -CApath $TMPDIR"
- #echo openssl s_client $ssl_opt $@ > /dev/tty
- #read -p "Continue? " nothing
- CIPHER=`echo "" |
- openssl s_client $ssl_opt $@ 2>/dev/null |
- awk '/^ Cipher/{print $3}'`
- if [ "x${CIPHER}" = "x" -o "x${CIPHER}" = "x0000" ] ; then
+ cmd="echo "" | openssl s_client $ssl_opt $@ 2>/dev/null"
+ CIPHER=$(eval $cmd | awk -F "Cipher is" '{print $2}' | tr -d '[:space:]' | awk -F " " '{print $1}')
+ if [ "x${CIPHER}" = "x" -o "x${CIPHER}" = "x0000" -o "x${CIPHER}" = "x(NONE)" ] ; then
echo "N"
else
echo "Y"
fi
}
+#Validate the cipher to pass EXPECT test case before call openssl_connect
+check_cipher() {
+ cmd="echo "" | openssl s_client $@ 2> /dev/null"
+ cipher=$(eval $cmd |awk -F "Cipher is" '{print $2}' | tr -d '[:space:]' | awk -F " " '{print $1}')
+ if [ "x${cipher}" = "x" -o "x${cipher}" = "x0000" -o "x${cipher}" = "x(NONE)" ] ; then
+ echo "N"
+ else
+ echo "Y"
+ fi
+}
+
cleanup;
mkdir -p $B0
mkdir -p $M0
@@ -65,7 +73,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
-TEST openssl genrsa -out $SSL_KEY 1024 2>/dev/null
+TEST openssl genrsa -out $SSL_KEY 2048 2>/dev/null
TEST openssl req -config $SSL_CFG -new -key $SSL_KEY -x509 \
-subj /CN=CA -out $SSL_CA
TEST openssl req -config $SSL_CFG -new -key $SSL_KEY \
@@ -102,32 +110,47 @@ EXPECT "N" openssl_connect -ssl2 -connect $H0:$BRICK_PORT
# Test SSLv3 protocol fails
EXPECT "N" openssl_connect -ssl3 -connect $H0:$BRICK_PORT
-# Test TLSv1 protocol fails
-EXPECT "N" openssl_connect -tls1 -connect $H0:$BRICK_PORT
+# Test TLSv1 protocol based on openssl version
+cmd="openssl version"
+ver=$(eval $cmd | awk -F " " '{print $2}' | grep "^1.1")
+if [ "x${ver}" = "x" ]; then
+ supp="N"
+else
+ supp="Y"
+fi
+EXPECT "${supp}" openssl_connect -tls1 -connect $H0:$BRICK_PORT
# Test a HIGH CBC cipher
-EXPECT "Y" openssl_connect -cipher AES256-SHA -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher AES256-SHA -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher AES256-SHA -connect $H0:$BRICK_PORT
# Test EECDH
-EXPECT "Y" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher EECDH -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
# test MD5 fails
-EXPECT "N" openssl_connect -cipher DES-CBC3-MD5 -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher DES-CBC3-MD5 -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher DES-CBC3-MD5 -connect $H0:$BRICK_PORT
# test RC4 fails
-EXPECT "N" openssl_connect -cipher RC4-SHA -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher RC4-SHA -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher RC4-SHA -connect $H0:$BRICK_PORT
# test eNULL fails
-EXPECT "N" openssl_connect -cipher NULL-SHA256 -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher NULL-SHA256 -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher NULL-SHA256 -connect $H0:$BRICK_PORT
# test SHA2
-EXPECT "Y" openssl_connect -cipher AES256-SHA256 -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher AES256-SHA256 -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher AES256-SHA256 -connect $H0:$BRICK_PORT
# test GCM
-EXPECT "Y" openssl_connect -cipher AES256-GCM-SHA384 -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher AES256-GCM-SHA384 -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher AES256-GCM-SHA384 -connect $H0:$BRICK_PORT
# Test DH fails without DH params
-EXPECT "N" openssl_connect -cipher EDH -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher EDH -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher EDH -connect $H0:$BRICK_PORT
# Test DH with DH params
TEST $CLI volume set $V0 ssl.dh-param `pwd`/`dirname $0`/dh1024.pem
@@ -145,8 +168,10 @@ TEST $CLI volume stop $V0
TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
BRICK_PORT=`brick_port $V0`
-EXPECT "Y" openssl_connect -cipher AES256-SHA -connect $H0:$BRICK_PORT
-EXPECT "N" openssl_connect -cipher AES128-SHA -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher AES256-SHA -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher AES256-SHA -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher AES128-SHA -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher AES128-SHA -connect $H0:$BRICK_PORT
# Test the ec-curve option
TEST $CLI volume set $V0 ssl.cipher-list EECDH:EDH:!TLSv1
@@ -155,8 +180,10 @@ TEST $CLI volume stop $V0
TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
BRICK_PORT=`brick_port $V0`
-EXPECT "N" openssl_connect -cipher AES256-SHA -connect $H0:$BRICK_PORT
-EXPECT "Y" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher AES256-SHA -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher AES256-SHA -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher EECDH -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
TEST $CLI volume set $V0 ssl.ec-curve invalid
EXPECT invalid volume_option $V0 ssl.ec-curve
@@ -164,7 +191,8 @@ TEST $CLI volume stop $V0
TEST $CLI volume start $V0
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" online_brick_count
BRICK_PORT=`brick_port $V0`
-EXPECT "N" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
+cph=`check_cipher -cipher EECDH -connect $H0:$BRICK_PORT`
+EXPECT "$cph" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
TEST $CLI volume set $V0 ssl.ec-curve secp521r1
EXPECT secp521r1 volume_option $V0 ssl.ec-curve
@@ -175,8 +203,6 @@ BRICK_PORT=`brick_port $V0`
EXPECT "Y" openssl_connect -cipher EECDH -connect $H0:$BRICK_PORT
# test revocation
-# no need to restart the volume since the options are used
-# by the client here.
TEST $CLI volume set $V0 ssl.crl-path $TMPDIR
EXPECT $TMPDIR volume_option $V0 ssl.crl-path
$GFS --volfile-id=$V0 --volfile-server=$H0 $M0
@@ -189,14 +215,25 @@ TEST openssl ca -batch -config $SSL_CFG -revoke $SSL_CERT 2>&1
TEST openssl ca -config $SSL_CFG -gencrl -out $SSL_CRL 2>&1
# Failed once revoked
+# Although client fails to mount without restarting the server after crl-path
+# is set when no actual crl file is found on the client, it would also fail
+# when server is restarted for the same reason. Since the socket initialization
+# code is the same for client and server, the crl verification flags need to
+# be turned off for the client to avoid SSL searching for CRLs in the
+# ssl.crl-path. If no CRL files are found in the ssl.crl-path, SSL fails the
+# connect() attempt on the client.
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
$GFS --volfile-id=$V0 --volfile-server=$H0 $M0
EXPECT "N" wait_mount $M0
TEST ! test -f $TEST_FILE
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
# Succeed with CRL disabled
+TEST $CLI volume stop $V0
TEST $CLI volume set $V0 ssl.crl-path NULL
EXPECT NULL volume_option $V0 ssl.crl-path
+TEST $CLI volume start $V0
$GFS --volfile-id=$V0 --volfile-server=$H0 $M0
EXPECT "Y" wait_mount $M0
TEST test -f $TEST_FILE
diff --git a/tests/features/subdir-mount.t b/tests/features/subdir-mount.t
index 8401946fb0f..a02bd6befc4 100644
--- a/tests/features/subdir-mount.t
+++ b/tests/features/subdir-mount.t
@@ -85,12 +85,17 @@ TEST $CLI volume start $V0
TEST $GFS --subdir-mount /subdir1/subdir1.1/subdir1.2 -s $H0 --volfile-id $V0 $M2
TEST stat $M2
+initcnt=`grep -i create-subdir-mounts /var/log/glusterfs/glusterd.log | wc -l`
# mount shouldn't fail even after add-brick
TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}{5,6};
-# Give time for client process to get notified and use the new
-# volfile after add-brick
-sleep 1
+# Wait to execute create-subdir-mounts.sh script by glusterd
+newcnt=`grep -i create-subdir-mounts /var/log/glusterfs/glusterd.log | wc -l`
+while [ $newcnt -eq $initcnt ]
+do
+ newcnt=`grep -i create-subdir-mounts /var/log/glusterfs/glusterd.log | wc -l`
+ sleep 1
+done
# Existing mount should still be active
mount_inode=$(stat --format "%i" "$M2")
diff --git a/tests/features/trash.t b/tests/features/trash.t
index 472e909e567..da5b50bc85a 100755
--- a/tests/features/trash.t
+++ b/tests/features/trash.t
@@ -94,105 +94,105 @@ wildcard_not_exists() {
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
}
-# testing glusterd [1-3]
+# testing glusterd
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info
-# creating distributed volume [4]
+# creating distributed volume
TEST $CLI volume create $V0 $H0:$B0/${V0}{1,2}
-# checking volume status [5-7]
+# checking volume status
EXPECT "$V0" volinfo_field $V0 'Volume Name'
EXPECT 'Created' volinfo_field $V0 'Status'
EXPECT '2' brick_count $V0
-# test without enabling trash translator [8]
+# test without enabling trash translator
TEST start_vol $V0 $M0
-# test on enabling trash translator [9-10]
+# test on enabling trash translator
TEST $CLI volume set $V0 features.trash on
EXPECT 'on' volinfo_field $V0 'features.trash'
-# files directly under mount point [11]
+# files directly under mount point
create_files $M0/file1 $M0/file2
TEST file_exists $V0 file1 file2
-# perform unlink [12]
+# perform unlink
TEST unlink_op file1
-# perform truncate [13]
+# perform truncate
TEST truncate_op file2 4
-# create files directory hierarchy and check [14]
+# create files directory hierarchy and check
mkdir -p $M0/1/2/3
create_files $M0/1/2/3/foo1 $M0/1/2/3/foo2
TEST file_exists $V0 1/2/3/foo1 1/2/3/foo2
-# perform unlink [15]
+# perform unlink
TEST unlink_op 1/2/3/foo1
-# perform truncate [16]
+# perform truncate
TEST truncate_op 1/2/3/foo2 4
# create a directory for eliminate pattern
mkdir $M0/a
-# set the eliminate pattern [17-18]
+# set the eliminate pattern
TEST $CLI volume set $V0 features.trash-eliminate-path /a
EXPECT '/a' volinfo_field $V0 'features.trash-eliminate-path'
-# create two files and check [19]
+# create two files and check
create_files $M0/a/test1 $M0/a/test2
TEST file_exists $V0 a/test1 a/test2
-# remove from eliminate pattern [20]
+# remove from eliminate pattern
rm -f $M0/a/test1
EXPECT "Y" wildcard_not_exists $M0/.trashcan/a/test1*
-# truncate from eliminate path [21-23]
+# truncate from eliminate path
truncate -s 2 $M0/a/test2
TEST [ -e $M0/a/test2 ]
TEST [ `ls -l $M0/a/test2 | awk '{print $5}'` -eq 2 ]
EXPECT "Y" wildcard_not_exists $M0/.trashcan/a/test2*
-# set internal op on [24-25]
+# set internal op on
TEST $CLI volume set $V0 features.trash-internal-op on
EXPECT 'on' volinfo_field $V0 'features.trash-internal-op'
-# again create two files and check [26]
+# again create two files and check
create_files $M0/inop1 $M0/inop2
TEST file_exists $V0 inop1 inop2
-# perform unlink [27]
+# perform unlink
TEST unlink_op inop1
-# perform truncate [28]
+# perform truncate
TEST truncate_op inop2 4
-# remove one brick and restart the volume [28-31]
+# remove one brick and restart the volume
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}2 force
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
TEST $CLI volume stop $V0
TEST start_vol $V0 $M0 $M0/.trashcan
-# again create two files and check [33]
+# again create two files and check
create_files $M0/rebal1 $M0/rebal2
TEST file_exists $V0 rebal1 rebal2
-# add one brick [34-35]
+# add one brick
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}3
TEST [ -d $B0/${V0}3 ]
-# perform rebalance [36]
+# perform rebalance
TEST $CLI volume rebalance $V0 start force
EXPECT_WITHIN $REBALANCE_TIMEOUT "0" rebalance_completed
#Find out which file was migrated to the new brick
file_name=$(ls $B0/${V0}3/rebal*| xargs basename)
-# check whether rebalance was succesful [37-40]
+# check whether rebalance was succesful
EXPECT "Y" wildcard_exists $B0/${V0}3/$file_name*
EXPECT "Y" wildcard_exists $B0/${V0}1/.trashcan/internal_op/$file_name*
@@ -201,52 +201,42 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
# force required in case rebalance is not over
TEST $CLI volume stop $V0 force
-# create a replicated volume [41]
+# create a replicated volume
TEST $CLI volume create $V1 replica 2 $H0:$B0/${V1}{1,2}
-# checking volume status [42-45]
+# checking volume status
EXPECT "$V1" volinfo_field $V1 'Volume Name'
EXPECT 'Replicate' volinfo_field $V1 'Type'
EXPECT 'Created' volinfo_field $V1 'Status'
EXPECT '2' brick_count $V1
-# enable trash with options and start the replicate volume by disabling automatic self-heal [46-50]
+# enable trash with options and start the replicate volume by disabling automatic self-heal
TEST $CLI volume set $V1 features.trash on
TEST $CLI volume set $V1 features.trash-internal-op on
EXPECT 'on' volinfo_field $V1 'features.trash'
EXPECT 'on' volinfo_field $V1 'features.trash-internal-op'
TEST start_vol $V1 $M1 $M1/.trashcan
-# mount and check for trash directory [51]
+# mount and check for trash directory
TEST [ -d $M1/.trashcan/internal_op ]
-# create a file and check [52]
+# create a file and check
touch $M1/self
TEST [ -e $B0/${V1}1/self -a -e $B0/${V1}2/self ]
-# kill one brick and delete the file from mount point [53-54]
+# kill one brick and delete the file from mount point
kill_brick $V1 $H0 $B0/${V1}1
EXPECT_WITHIN ${PROCESS_UP_TIMEOUT} "1" online_brick_count
rm -f $M1/self
EXPECT "Y" wildcard_exists $B0/${V1}2/.trashcan/self*
-# force start the volume and trigger the self-heal manually [55-57]
-TEST $CLI volume start $V1 force
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" online_brick_count
-EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
-# Since we created the file under root of the volume, it will be
-# healed automatically
-
-# check for the removed file in trashcan [58]
-EXPECT_WITHIN $HEAL_TIMEOUT "Y" wildcard_exists $B0/${V1}1/.trashcan/internal_op/self*
-
-# check renaming of trash directory through cli [59-62]
+# check renaming of trash directory through cli
TEST $CLI volume set $V0 trash-dir abc
TEST start_vol $V0 $M0 $M0/abc
TEST [ -e $M0/abc -a ! -e $M0/.trashcan ]
EXPECT "Y" wildcard_exists $B0/${V0}1/abc/internal_op/rebal*
-# ensure that rename and delete operation on trash directory fails [63-65]
+# ensure that rename and delete operation on trash directory fails
rm -rf $M0/abc/internal_op
TEST [ -e $M0/abc/internal_op ]
rm -rf $M0/abc/
diff --git a/tests/features/worm.t b/tests/features/worm.t
index 9766dc1ad2b..40b08cdee02 100755
--- a/tests/features/worm.t
+++ b/tests/features/worm.t
@@ -72,6 +72,45 @@ sleep 10
TEST `echo "worm 1" >> $M0/file4`
TEST ! rm -f $M0/file4
+## Test for state transition if auto-commit-period is 0
+TEST $CLI volume set $V0 features.auto-commit-period 0
+TEST `echo "worm 1" > $M0/file5`
+EXPECT '3/10/0' echo $(getfattr -e text --absolute-names --only-value -n "trusted.reten_state" $B0/${V0}1/file5)
+EXPECT 'worm 1' cat $M0/file5
+TEST ! rm -f $M0/file5
+TEST $CLI volume set $V0 features.auto-commit-period 5
+
+## Test for checking if retention-period is updated on increasing the access time of a WORM-RETAINED file.
+TEST $CLI volume set $V0 features.worm-files-deletable 1
+TEST `echo "worm 1" >> $M0/file1`
+initial_timestamp=$(date +%s)
+current_time_seconds=$(date +%S | sed 's/^0*//' );
+TEST chmod 0444 $M0/file1
+EXPECT '3/10/5' echo $(getfattr -e text --absolute-names --only-value -n "trusted.reten_state" $B0/${V0}1/file1)
+changed_timestamp=$(date +%Y%m%d%H%M --date '60 seconds');
+seconds_diff=`expr 60 - $((current_time_seconds))`
+TEST `touch -a -t "${changed_timestamp}" $M0/file1`
+EXPECT "3/$seconds_diff/5" echo $(getfattr -e text --absolute-names --only-value -n "trusted.reten_state" $B0/${V0}1/file1)
+sleep $seconds_diff
+TEST `echo "worm 2" >> $M0/file1`
+EXPECT "$initial_timestamp" echo $(stat --printf %X $M0/file1)
+
+
+## Test for checking if retention-period is updated on decreasing the access time of a WORM-RETAINED file
+TEST $CLI volume set $V0 features.default-retention-period 120
+initial_timestamp=$(date +%s)
+current_time_seconds=$(date +%S | sed 's/^0*//' );
+TEST chmod 0444 $M0/file1
+EXPECT '3/120/5' echo $(getfattr -e text --absolute-names --only-value -n "trusted.reten_state" $B0/${V0}1/file1)
+changed_timestamp=$(date +%Y%m%d%H%M --date '60 seconds');
+seconds_diff=`expr 60 - $((current_time_seconds))`
+TEST `touch -a -t "${changed_timestamp}" $M0/file1`
+EXPECT "3/$seconds_diff/5" echo $(getfattr -e text --absolute-names --only-value -n "trusted.reten_state" $B0/${V0}1/file1)
+sleep $seconds_diff
+TEST `echo "worm 4" >> $M0/file1`
+EXPECT "$initial_timestamp" echo $(stat --printf %X $M0/file1)
+TEST rm -f $M0/file1
+
TEST $CLI volume stop $V0
EXPECT 'Stopped' volinfo_field $V0 'Status'
diff --git a/tests/geo-rep.rc b/tests/geo-rep.rc
index 396b4c4a3e5..9ba4262730e 100644
--- a/tests/geo-rep.rc
+++ b/tests/geo-rep.rc
@@ -1,4 +1,9 @@
GEO_REP_TIMEOUT=120
+CHECK_MOUNT_TIMEOUT=50
+#check for mount point
+function check_mounted () {
+ df | grep $1 | wc -l
+}
function check_status_num_rows()
{
@@ -6,6 +11,37 @@ function check_status_num_rows()
$GEOREP_CLI $master $slave status | grep -F "$search_key" | wc -l
}
+function check_fanout_status_num_rows()
+{
+ local search_key=$1
+ $GEOREP_CLI $master status | grep -F "$search_key" | wc -l
+}
+
+function check_fanout_status_detail_num_rows()
+{
+ local search_key=$1
+ $GEOREP_CLI $master status detail | grep -F "$search_key" | wc -l
+}
+
+function check_all_status_num_rows()
+{
+ local search_key=$1
+ $GEOREP_CLI status | grep -F "$search_key" | wc -l
+}
+
+function check_all_status_detail_num_rows()
+{
+ local search_key=$1
+ $GEOREP_CLI status detail | grep -F "$search_key" | wc -l
+}
+
+function verify_checkpoint_met()
+{
+ local master=$1
+ local slave=$2
+ $GEOREP_CLI $master $slave status detail| grep -F "Yes" | wc -l
+}
+
function check_keys_distributed()
{
local search_key=$(cat /var/lib/glusterd/geo-replication/master_slave_common_secret.pem.pub)
@@ -19,6 +55,18 @@ function check_common_secret_file()
echo $?
}
+function create_rename_symlink_case()
+{
+ mkdir ${mastermnt}/MUL_REN_SYMLINK
+ cd ${mastermnt}/MUL_REN_SYMLINK
+ mkdir sym_dir1
+ ln -s "sym_dir1" sym1
+ mv sym1 sym2
+ mv sym2 sym3
+ mv sym3 sym4
+ cd -
+}
+
function create_data()
{
prefix=$1
@@ -89,6 +137,23 @@ function create_data()
chown 1000:1000 ${master_mnt}/${prefix}_chown_f1_ಸಂತಸ
}
+function create_data_hang()
+{
+ prefix=$1
+ mkdir ${master_mnt}/${prefix}
+ cd ${master_mnt}/${prefix}
+ # ~1k files is required with 1 sync-job and hang happens if
+ # stderr buffer of tar/ssh executed with Popen is full (i.e., 64k).
+ # 64k is hit when ~800 files were not found while syncing data
+ # from master. So around 1k files is required to hit the condition.
+ for i in {1..1000}
+ do
+ echo "test data" > file$i
+ mv -f file$i file
+ done
+ cd -
+}
+
function chown_file_ok()
{
local file_owner=$(stat --format "%u:%g" "$1")
@@ -156,7 +221,8 @@ function arequal_checksum()
{
master=$1
slave=$2
- diff <(arequal-checksum -p $master) <(arequal-checksum -p $slave) | wc -l
+ ret=$(diff <(arequal-checksum -p $master) <(arequal-checksum -p $slave) | wc -l)
+ echo x$ret
}
function symlink_ok()
@@ -391,3 +457,39 @@ function check_slave_read_only()
gluster volume info $1 | grep 'features.read-only: on'
echo $?
}
+
+function create_rename_with_existing_destination()
+{
+ dir=$1/rename_with_existing_destination
+ mkdir $dir
+ for i in {1..5}
+ do
+ echo "Data_set$i" > $dir/data_set$i
+ mv $dir/data_set$i $dir/data_set -f
+ done
+}
+
+function verify_rename_with_existing_destination()
+{
+ dir=$1/rename_with_existing_destination
+
+ if [ ! -d $dir ]; then
+ echo 1
+ elif [ ! -f $dir/data_set ]; then
+ echo 2
+ elif [ -f $dir/data_set1 ]; then
+ echo 3
+ elif [ -f $dir/data_set2 ]; then
+ echo 4
+ elif [ -f $dir/data_set3 ]; then
+ echo 5
+ elif [ -f $dir/data_set4 ]; then
+ echo 6
+ elif [ -f $dir/data_set5 ]; then
+ echo 7
+ elif test "XData_set5" != "X$(cat $dir/data_set)"; then
+ echo 8
+ else
+ echo 0
+ fi
+}
diff --git a/tests/glusterfind/glusterfind-basic.t b/tests/glusterfind/glusterfind-basic.t
new file mode 100644
index 00000000000..ccb33fb1fc8
--- /dev/null
+++ b/tests/glusterfind/glusterfind-basic.t
@@ -0,0 +1,84 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+. $(dirname $0)/../env.rc
+
+SCRIPT_TIMEOUT=300
+
+##Cleanup and start glusterd
+cleanup;
+TEST glusterd;
+TEST pidof glusterd
+
+##create .keys
+mkdir -p /var/lib/glusterd/glusterfind/.keys
+
+#create_and_start test_volume
+TEST $CLI volume create test-vol $H0:$B0/b1 $H0:$B0/b2 $H0:$B0/b3
+TEST gluster volume start test-vol
+
+##Mount test-vol
+TEST glusterfs -s $H0 --volfile-id test-vol $M0
+
+TEST timestamp1=$(date +'%s')
+
+##Create files and dirs inside the mount point
+TEST mkdir -p $M0/dir1
+TEST touch $M0/file1
+
+##Glusterfind Create
+TEST glusterfind create sess_vol1 test-vol --force
+
+##################################################################################
+#Incremental crawl
+##################################################################################
+##Glusterfind Pre
+TEST glusterfind pre sess_vol1 test-vol output_file.txt
+
+#Glusterfind Post
+TEST glusterfind post sess_vol1 test-vol
+
+##Glusterfind List
+EXPECT '1' echo $(glusterfind list | grep sess_vol1 | wc -l)
+
+TEST timestamp2=$(date +'%s')
+
+##Glusterfind Query
+TEST glusterfind query test-vol --since-time $timestamp1 --end-time $timestamp2 output_file.txt
+
+#################################################################################
+#Full Crawl
+#################################################################################
+##Glusterfind Pre
+TEST glusterfind pre sess_vol1 test-vol output_file.txt --full --regenerate-outfile
+EXPECT '1' echo $(grep 'NEW dir1' output_file.txt | wc -l)
+EXPECT '1' echo $(grep 'NEW file1' output_file.txt | wc -l)
+
+##Glusterfind Query commands
+TEST glusterfind query test-vol --full output_file.txt
+EXPECT '1' echo $(grep 'NEW dir1' output_file.txt | wc -l)
+EXPECT '1' echo $(grep 'NEW file1' output_file.txt | wc -l)
+
+##using tag, full crawl
+TEST glusterfind query test-vol --full --tag-for-full-find NEW output_file.txt
+EXPECT '1' echo $(grep 'NEW dir1' output_file.txt | wc -l)
+EXPECT '1' echo $(grep 'NEW file1' output_file.txt | wc -l)
+
+##using -field-separator option, full crawl
+glusterfind query test-vol --full output_file.txt --field-separator "=="
+EXPECT '1' echo $(grep 'NEW==dir1' output_file.txt | wc -l)
+EXPECT '1' echo $(grep 'NEW==file1' output_file.txt | wc -l)
+
+##Adding or Replacing a Brick from an Existing Glusterfind Session
+TEST gluster volume add-brick test-vol $H0:$B0/b4 force
+
+##To make existing session work after brick add
+TEST glusterfind create sess_vol test-vol --force
+EXPECT '1' echo $(glusterfind list | grep sess_vol1 | wc -l)
+
+##glusterfind delete
+TEST glusterfind delete sess_vol test-vol
+
+rm -rf output_file.txt
+cleanup;
diff --git a/tests/include.rc b/tests/include.rc
index d76f13b8d07..0dc7d830449 100644
--- a/tests/include.rc
+++ b/tests/include.rc
@@ -1,3 +1,6 @@
+
+checkpoint_time="$(date +%s%N)"
+
M0=${M0:=/mnt/glusterfs/0}; # 0th mount point for FUSE
M1=${M1:=/mnt/glusterfs/1}; # 1st mount point for FUSE
M2=${M2:=/mnt/glusterfs/2}; # 2nd mount point for FUSE
@@ -8,6 +11,7 @@ V0=${V0:=patchy}; # volume name to use in tests
V1=${V1:=patchy1}; # volume name to use in tests
GMV0=${GMV0:=master}; # master volume name to use in geo-rep tests
GSV0=${GSV0:=slave}; # slave volume name to use in geo-rep tests
+GSV1=${GSV1:=slave1}; # slave volume name to use in geo-rep tests
B0=${B0:=/d/backends}; # top level of brick directories
WORKDIRS="$B0 $M0 $M1 $M2 $M3 $N0 $N1"
@@ -76,7 +80,7 @@ NFS_EXPORT_TIMEOUT=20
CHILD_UP_TIMEOUT=20
PROBE_TIMEOUT=60
PEER_SYNC_TIMEOUT=20
-REBALANCE_TIMEOUT=360
+REBALANCE_TIMEOUT=600
REOPEN_TIMEOUT=20
HEAL_TIMEOUT=80
IO_HEAL_TIMEOUT=120
@@ -89,6 +93,7 @@ GRAPH_SWITCH_TIMEOUT=10
UNLINK_TIMEOUT=5
MDC_TIMEOUT=5
IO_WAIT_TIMEOUT=5
+DISK_FAIL_TIMEOUT=80
LOGDIR=$(gluster --print-logdir)
@@ -130,8 +135,8 @@ _GFS () {
while true; do
touch $mount_point/xy_zzy 2> /dev/null && break
i=$((i+1))
- [ $i -lt 10 ] || break
- sleep 1
+ [ $i -lt 100 ] || break
+ sleep 0.1
done
rm -f $mount_point/xy_zzy
return $mount_ret
@@ -207,6 +212,7 @@ function test_header()
dbg "=========================";
dbg "TEST $t (line $TESTLINE): $*";
saved_cmd="$*"
+ start_time="$(date +%s%N)"
}
@@ -215,15 +221,18 @@ function test_footer()
RET=$?
local lineno=$1
local err=$2
-
+ local end_time
+ local elapsed1
+ local elapsed2
+
+ end_time="$(date +%s%N)"
+ elapsed1="$(((start_time - checkpoint_time) / 1000000))"
+ elapsed2="$(((end_time - start_time) / 1000000))"
+ checkpoint_time="$end_time"
if [ $RET -eq 0 ]; then
- echo "ok $t, LINENUM:$lineno";
+ printf "ok %3d [%7d/%7d] <%4d> '%s'\n" "$t" "$elapsed1" "$elapsed2" "$lineno" "$saved_cmd";
else
- echo "not ok $t $err, LINENUM:$lineno";
- # With DEBUG, this was already printed out, so skip it.
- if [ x"$DEBUG" = x"0" ]; then
- echo "FAILED COMMAND: $saved_cmd"
- fi
+ printf "not ok %3d [%7d/%7d] <%4d> '%s' -> '%s'\n" "$t" "$elapsed1" "$elapsed2" "$lineno" "$saved_cmd" "$err"
if [ "$EXIT_EARLY" = "1" ]; then
cleanup
exit $RET
@@ -356,12 +365,12 @@ function _EXPECT_WITHIN()
a="";
shift;
- local endtime=$(( ${timeout}+`date +%s` ))
+ local endtime="$(( ${timeout}000000000 + $(date +%s%N) ))"
# We *want* this to be globally visible.
EW_RETRIES=0
- while [ `date +%s` -lt $endtime ]; do
+ while [[ "$(date +%s%N)" < "$endtime" ]]; do
a=$("$@" | tail -1 ; exit ${PIPESTATUS[0]})
## Check command success
if [ $? -ne 0 ]; then
@@ -371,7 +380,7 @@ function _EXPECT_WITHIN()
if [[ "$a" =~ $e ]]; then
break;
fi
- sleep 1;
+ sleep 0.25;
EW_RETRIES=$((EW_RETRIES+1))
done
@@ -544,8 +553,41 @@ function process_pids() {
echo "${pids[@]}"
}
+## Lock files should get automatically removed once "usradd" or "groupadd"
+## command finishes. But sometimes we encounter situations (bugs) where
+## some of these files may not get properly unlocked after the execution of
+## the command. In that case, when we execute useradd next time, it may show
+## the error “cannot lock /etc/password” or “unable to lock group file”.
+## So, to avoid any such errors, check for any lock files under /etc.
+## and remove those.
+
+function remove_lock_files()
+{
+ if [ ! -f /etc/passwd.lock ];
+ then
+ rm -rf /etc/passwd.lock;
+ fi
+
+ if [ ! -f /etc/group.lock ];
+ then
+ rm -rf /etc/group.lock;
+ fi
+
+ if [ ! -f /etc/shadow.lock ];
+ then
+ rm -rf /etc/shadow.lock;
+ fi
+
+ if [ ! -f /etc/gshadow.lock ];
+ then
+ rm -rf /etc/gshadow.lock;
+ fi
+}
+
+
function cleanup()
{
+ local end_time
# Prepare flags for umount
case `uname -s` in
@@ -563,6 +605,9 @@ function cleanup()
;;
esac
+ # Clean up lock files.
+ remove_lock_files
+
# Clean up all client mounts
for m in `mount | grep fuse.glusterfs | awk '{print $3}'`; do
umount $flag $m
diff --git a/tests/line-coverage/afr-heal-info.t b/tests/line-coverage/afr-heal-info.t
new file mode 100644
index 00000000000..182665917c4
--- /dev/null
+++ b/tests/line-coverage/afr-heal-info.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+#Test that parallel heal-info command execution doesn't result in spurious
+#entries with locking-scheme granular
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+
+function write_and_del_file {
+ dd of=$M0/a.txt if=/dev/zero bs=1024k count=100
+ rm -f $M0/b.txt
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 locking-scheme granular
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/a.txt $M0/b.txt
+write_and_del_file &
+touch $B0/f1 $B0/f2
+
+# All above is similar to basic/afr/heal-info.t
+
+TEST $CLI volume heal $V0 enable
+TEST $CLI volume heal $V0 info --xml
+TEST $CLI volume heal $V0 info summary
+TEST $CLI volume heal $V0 info summary --xml
+TEST $CLI volume heal $V0 info split-brain
+TEST $CLI volume heal $V0 info split-brain --xml
+
+TEST $CLI volume heal $V0 statistics heal-count
+
+# It may fail as the file is not in splitbrain
+$CLI volume heal $V0 split-brain latest-mtime /a.txt
+
+TEST $CLI volume heal $V0 disable
+
+TEST $CLI volume stop $V0
+cleanup;
diff --git a/tests/line-coverage/arbiter-coverage.t b/tests/line-coverage/arbiter-coverage.t
new file mode 100755
index 00000000000..82b470141b5
--- /dev/null
+++ b/tests/line-coverage/arbiter-coverage.t
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 2 arbiter 1 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+build_tester ./glfsxmp.c -lgfapi
+$(dirname $0)/../basic/rpc-coverage.sh $M1 >/dev/null
+./glfsxmp $V0 $H0 >/dev/null
+
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+## Finish up
+TEST $CLI volume stop $V0;
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/line-coverage/cli-peer-and-volume-operations.t b/tests/line-coverage/cli-peer-and-volume-operations.t
new file mode 100644
index 00000000000..0cf8dbe81f9
--- /dev/null
+++ b/tests/line-coverage/cli-peer-and-volume-operations.t
@@ -0,0 +1,135 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../volume.rc
+
+function peer_count {
+eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
+}
+
+cleanup
+
+TEST launch_cluster 3
+
+TEST $CLI_1 system uuid reset
+
+## basic peer commands
+TEST $CLI_1 peer probe $H2
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
+
+#probe a unreachable node
+TEST kill_glusterd 3
+TEST ! $CLI_1 peer probe $H3
+
+#detach a node which is not a part of cluster
+TEST ! $CLI_1 peer detach $H3
+TEST ! $CLI_1 peer detach $H3 force
+
+TEST start_glusterd 3
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 2
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
+
+# probe a node which is already part of cluster
+TEST $CLI_1 peer probe $H3
+
+#probe an invalid address
+TEST ! $CLI_1 peer probe 1024.1024.1024.1024
+
+TEST $CLI_1 pool list
+
+TEST $CLI_1 --help
+TEST $CLI_1 --version
+TEST $CLI_1 --print-logdir
+TEST $CLI_1 --print-statedumpdir
+
+# try unrecognised command
+TEST ! $CLI_1 volume
+TEST pidof glusterd
+
+## all help commands
+TEST $CLI_1 global help
+TEST $CLI_1 help
+
+TEST $CLI_1 peer help
+TEST $CLI_1 volume help
+TEST $CLI_1 volume bitrot help
+TEST $CLI_1 volume quota help
+TEST $CLI_1 snapshot help
+
+## volume operations
+TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+# create a volume with already existing volume name
+TEST ! $CLI_1 volume create $V0 $H1:$B1/$V1 $H2:$B2/$V1
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+# Mount the volume and create files
+TEST glusterfs -s $H1 --volfile-id $V0 $M1
+TEST touch $M1/file{1..100}
+
+#fails because $V0 is not shd compatible
+TEST ! $CLI_1 volume status $V0 shd
+
+#test explicitly provided options
+TEST $CLI_1 --timeout=120 --log-level=INFO volume status
+
+#changing timezone to a different one, to check localtime logging feature
+TEST export TZ='Asia/Kolkata'
+TEST restart_glusterd 1
+
+#localtime logging enable
+TEST $CLI_1 volume set all cluster.localtime-logging enable
+EXPECT '1' logging_time_check $LOGDIR
+
+#localtime logging disable
+TEST $CLI_1 volume set all cluster.localtime-logging disable
+EXPECT '0' logging_time_check $LOGDIR
+
+#changing timezone back to original timezone
+TEST export TZ='UTC'
+
+#negative tests for volume options
+#'set' option to enable quota/inode-quota is now depreciated
+TEST ! $CLI_1 volume set $V0 quota enable
+TEST ! $CLI_1 volume set $V0 inode-quota enable
+
+#invalid transport type 'rcp'
+TEST ! $CLI_1 volume set $V0 config.transport rcp
+
+#'op-version' option is not valid for a single volume
+TEST ! $CLI_1 volume set $V0 cluster.op-version 72000
+
+#'op-version' option can't be used with any other option
+TEST ! $CLI_1 volume set all cluster.localtime-logging disable cluster.op-version 72000
+
+#invalid format of 'op-version'
+TEST ! $CLI_1 volume set all cluster.op-version 72-000
+
+#provided 'op-version' value is greater than max allowed op-version
+op_version=$($CLI_1 volume get all cluster.max-op-version | awk 'NR==3 {print$2}')
+op_version=$((op_version+1000)) #this can be any number greater than 0
+TEST ! $CLI_1 volume set all cluster.op-version $op_version
+
+#provided 'op-verison' value cannot be less than the current cluster op-version value
+TEST ! $CLI_1 volume set all cluster.op-version 00000
+
+# system commnds
+TEST $CLI_1 system help
+TEST $CLI_1 system uuid get
+TEST $CLI_1 system getspec $V0
+TEST $CLI_1 system getwd
+TEST $CLI_1 system fsm log
+
+# Both these may fail, but it covers xdr functions and some
+# more code in cli/glusterd
+$CLI_1 system:: mount test local:/$V0
+$CLI_1 system:: umount $M0 lazy
+$CLI_1 system:: copy file options
+$CLI_1 system:: portmap brick2port $H0:$B0/brick
+$CLI_1 system:: uuid reset
+
+cleanup
diff --git a/tests/line-coverage/cli-volume-top-profile-coverage.t b/tests/line-coverage/cli-volume-top-profile-coverage.t
new file mode 100644
index 00000000000..35713c26faa
--- /dev/null
+++ b/tests/line-coverage/cli-volume-top-profile-coverage.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../cluster.rc
+. $(dirname $0)/../volume.rc
+
+cleanup
+
+# Creating cluster
+TEST launch_cluster 3
+
+# Probing peers
+TEST $CLI_1 peer probe $H2
+TEST $CLI_1 peer probe $H3
+EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
+
+# Creating a volume and starting it.
+TEST $CLI_1 volume create $V0 replica 3 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
+TEST $CLI_1 volume start $V0
+EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
+
+TEST glusterfs -s $H1 --volfile-id $V0 $M1
+TEST touch $M1/file{1..100}
+
+# Testing volume top command with and without xml output
+function test_volume_top_cmds () {
+ local ret=0
+ declare -a top_cmds=("read" "open" "write" "opendir" "readdir")
+ for cmd in ${top_cmds[@]}; do
+ $CLI_1 volume top $V0 $cmd
+ (( ret += $? ))
+ $CLI_1 volume top $V0 clear
+ (( ret += $? ))
+ $CLI_1 volume top $V0 $cmd --xml
+ (( ret += $? ))
+ $CLI_1 volume top $V0 $cmd brick $H1:$B1/$V0
+ (( ret += $? ))
+ $CLI_1 volume top $V0 clear brick $H1:$B1/$V0
+ (( ret += $? ))
+ $CLI_1 volume top $V0 $cmd brick $H1:$B1/$V0 --xml
+ (( ret += $? ))
+ done
+ return $ret
+}
+
+# Testing volume profile command with and without xml
+function test_volume_profile_cmds () {
+ local ret=0
+ declare -a profile_cmds=("start" "info" "info peek" "info cumulative" "info clear" "info incremental peek" "stop")
+ for cmd in "${profile_cmds[@]}"; do
+ $CLI_1 volume profile $V0 $cmd
+ (( ret += $? ))
+ $CLI_1 volume profile $V0 $cmd --xml
+ (( ret += $? ))
+ done
+ return $ret
+}
+
+TEST test_volume_top_cmds;
+TEST test_volume_profile_cmds;
+
+cleanup
diff --git a/tests/line-coverage/errorgen-coverage.t b/tests/line-coverage/errorgen-coverage.t
new file mode 100755
index 00000000000..f4622428d79
--- /dev/null
+++ b/tests/line-coverage/errorgen-coverage.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+
+# Because I have added 10 iterations of rpc-coverage and glfsxmp for errorgen
+SCRIPT_TIMEOUT=600
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+TEST $CLI volume set $V0 error-gen posix;
+TEST $CLI volume set $V0 debug.error-failure 3%;
+
+## Start volume and verify
+TEST $CLI volume start $V0;
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+build_tester ./glfsxmp.c -lgfapi
+for i in $(seq 1 10); do
+ # as there is error-gen, there can be errors, so no
+ # need to test for success of below two commands
+ $(dirname $0)/../basic/rpc-coverage.sh $M1 >/dev/null
+ ./glfsxmp $V0 $H0 >/dev/null
+done
+
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+## Finish up
+TEST $CLI volume stop $V0;
+
+TEST $CLI volume delete $V0;
+
+cleanup;
diff --git a/tests/line-coverage/log-and-brick-ops-negative-case.t b/tests/line-coverage/log-and-brick-ops-negative-case.t
new file mode 100644
index 00000000000..d86cb452282
--- /dev/null
+++ b/tests/line-coverage/log-and-brick-ops-negative-case.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup
+TEST glusterd
+TEST pidof glusterd
+
+#create volumes
+TEST $CLI volume create ${V0}_1 $H0:$B0/v{1..2}
+
+TEST $CLI volume create ${V0}_2 replica 3 arbiter 1 $H0:$B0/v{3..5}
+
+TEST $CLI volume create ${V0}_3 disperse 3 redundancy 1 $H0:$B0/v{6..8}
+TEST $CLI volume start ${V0}_3
+EXPECT 'Started' volinfo_field ${V0}_3 'Status'
+
+TEST $CLI volume create ${V0}_4 replica 3 $H0:$B0/v{9..14}
+TEST $CLI volume start ${V0}_4
+EXPECT 'Started' volinfo_field ${V0}_4 'Status'
+
+#log rotate option
+#provided volume does not exist
+TEST ! $CLI volume log ${V0}_5 rotate
+
+#volume must be started before using log rotate option
+TEST ! $CLI volume log ${V0}_1 rotate
+TEST $CLI volume start ${V0}_1
+EXPECT 'Started' volinfo_field ${V0}_1 'Status'
+
+#incorrect brick provided for the volume
+TEST ! $CLI volume log ${V0}_1 rotate $H0:$B0/v15
+
+#add-brick operations
+#volume must be in started to state to increase replica count
+TEST ! $CLI volume add-brick ${V0}_2 replica 4 $H0:$B0/v15
+TEST $CLI volume start ${V0}_2
+EXPECT 'Started' volinfo_field ${V0}_2 'Status'
+
+#incorrect number of bricks for a replica 4 volume
+TEST ! $CLI volume add-brick ${V0}_1 replica 4 $H0:$B0/v15
+
+#replica count provided is less than the current replica count
+TEST ! $CLI volume add-brick ${V0}_2 replica 2 $H0:$B0/v15
+
+#dispersed to replicated dispersed not possible
+TEST ! $CLI volume add-brick ${V0}_3 replica 2 $H0:$B0/v15
+
+#remove-brick operations
+#replica count option provided for dispersed vol
+TEST ! $CLI volume remove-brick ${V0}_3 replica 2 $H0:$B0/v8 start
+
+#given replica count is greater than the current replica count
+TEST ! $CLI volume remove-brick ${V0}_2 replica 4 $H0:$B0/v5 start
+
+#number of bricks to be removed, must be a multiple of replica count
+TEST ! $CLI volume remove-brick ${V0}_2 replica 3 $H0:$B0/v{3..4} start
+
+#less number of bricks given to reduce the replica count
+TEST ! $CLI volume remove-brick ${V0}_2 replica 1 $H0:$B0/v3 start
+
+#bricks should be from different subvol
+TEST ! $CLI volume remove-brick ${V0}_4 replica 2 $H0:$B0/v{13..14} start
+
+#arbiter must be removed to reduce replica count
+TEST ! $CLI volume remove-brick ${V0}_2 replica 1 $H0:$B0/v{3..4} start
+
+#removal of bricks is not allowed without reducing the replica count explicitly
+TEST ! $CLI volume remove-brick ${V0}_2 replica 3 $H0:$B0/v{3..5} start
+
+#incorrect brick for given vol
+TEST ! $CLI volume remove-brick ${V0}_1 $H0:$B0/v15 start
+
+#removing all the bricks are not allowed
+TEST ! $CLI volume remove-brick ${V0}_1 $H0:$B0/v{1..2} start
+
+#volume must not be stopped state while removing bricks
+TEST $CLI volume stop ${V0}_1
+TEST ! $CLI volume remove-brick ${V0}_1 $H0:$B0/v1 start
+
+cleanup \ No newline at end of file
diff --git a/tests/line-coverage/meta-max-coverage.t b/tests/line-coverage/meta-max-coverage.t
new file mode 100755
index 00000000000..1cc07610aa7
--- /dev/null
+++ b/tests/line-coverage/meta-max-coverage.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume start $V0;
+
+## Mount FUSE
+TEST $GFS -s $H0 --volfile-id $V0 $M1
+
+TEST stat $M1/.meta/
+
+# expect failures in rpc-coverage.sh execution.
+res=$($(dirname $0)/../basic/rpc-coverage.sh $M1/.meta)
+
+
+# Expect errors here, hence no need to 'check for success'
+for file in $(find $M1/.meta type f -print); do
+ cat $file >/dev/null
+ echo 1>$file
+ echo hello>$file
+done
+
+TEST umount $M1
+
+cleanup;
diff --git a/tests/line-coverage/namespace-linecoverage.t b/tests/line-coverage/namespace-linecoverage.t
new file mode 100644
index 00000000000..8de6a0f279b
--- /dev/null
+++ b/tests/line-coverage/namespace-linecoverage.t
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+cleanup;
+
+TEST glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2,3,4,5,6,7,8}
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 cluster.read-subvolume-index 0
+TEST $CLI volume set $V0 features.tag-namespaces on
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 storage.build-pgfid on
+
+sleep 2
+
+## Mount FUSE
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+
+mkdir -p $M1/namespace
+
+# subvol_1 = bar, subvol_2 = foo, subvol_3 = hey
+# Test create, write (tagged by loc, fd respectively).
+touch $M1/namespace/{bar,foo,hey}
+
+open $M1/namespace/hey
+
+## TODO: best way to increase coverage is to have a gfapi program
+## which covers maximum fops
+TEST $(dirname $0)/../basic/rpc-coverage.sh $M1
+
+TEST cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+cleanup;
diff --git a/tests/line-coverage/old-protocol.t b/tests/line-coverage/old-protocol.t
new file mode 100755
index 00000000000..5676e5636db
--- /dev/null
+++ b/tests/line-coverage/old-protocol.t
@@ -0,0 +1,37 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '6' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+file="/var/lib/glusterd/vols/$V0/trusted-$V0.tcp-fuse.vol"
+sed -i -e 's$send-gids true$send-gids true\n option testing.old-protocol true$g' $file
+
+## Mount FUSE
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+## TODO: best way to increase coverage is to have a gfapi program
+## which covers maximum fops
+TEST $(dirname $0)/../basic/rpc-coverage.sh $M1
+
+TEST cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+cleanup;
diff --git a/tests/line-coverage/quiesce-coverage.t b/tests/line-coverage/quiesce-coverage.t
new file mode 100755
index 00000000000..ca29343451e
--- /dev/null
+++ b/tests/line-coverage/quiesce-coverage.t
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1,2,3,4,5,6};
+
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+EXPECT '6' brick_count $V0
+
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+file="/var/lib/glusterd/vols/$V0/trusted-$V0.tcp-fuse.vol"
+
+cat >> ${file} <<EOF
+
+volume quiesce
+ type features/quiesce
+ subvolumes ${V0}
+end-volume
+EOF
+
+## Mount FUSE
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+## TODO: best way to increase coverage is to have a gfapi program
+## which covers maximum fops
+TEST $(dirname $0)/../basic/rpc-coverage.sh $M1
+
+TEST cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+cleanup;
diff --git a/tests/line-coverage/shard-coverage.t b/tests/line-coverage/shard-coverage.t
new file mode 100644
index 00000000000..1797999c146
--- /dev/null
+++ b/tests/line-coverage/shard-coverage.t
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup
+
+TEST glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/brick
+TEST $CLI volume set $V0 features.shard on
+EXPECT 'Created' volinfo_field $V0 'Status'
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+# It is good to copy the file locally and build it, so the scope remains
+# inside tests directory.
+TEST cp $(dirname ${0})/../basic/gfapi/glfsxmp-coverage.c glfsxmp.c
+TEST build_tester ./glfsxmp.c -lgfapi
+TEST ./glfsxmp $V0 $H0
+TEST cleanup_tester ./glfsxmp
+TEST rm ./glfsxmp.c
+
+TEST $GFS -s $H0 --volfile-id $V0 $M1;
+
+TEST $(dirname $0)/../basic/rpc-coverage.sh $M1
+
+
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/line-coverage/some-features-in-libglusterfs.t b/tests/line-coverage/some-features-in-libglusterfs.t
new file mode 100644
index 00000000000..5719c4e039c
--- /dev/null
+++ b/tests/line-coverage/some-features-in-libglusterfs.t
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+function grep_string {
+ local f=$1
+ local string=$2
+ # The output of test script also shows up in log. Ignore them.
+ echo $(grep ${string} ${f} | grep -v "++++++" | wc -l)
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}
+TEST $CLI volume set $V0 client-log-level TRACE
+TEST $CLI volume start $V0;
+
+log_file="$(gluster --print-logdir)/gluster.log"
+## Mount FUSE
+TEST $GFS -s $H0 --log-file $log_file --volfile-id $V0 $M1
+
+## Cover 'monitoring.c' here
+pgrep 'glusterfs' | xargs kill -USR2
+
+EXPECT_WITHIN 2 1 grep_string $log_file 'sig:USR2'
+
+## Also cover statedump
+pgrep 'glusterfs' | xargs kill -USR1
+
+EXPECT_WITHIN 2 1 grep_string $log_file 'sig:USR1'
+
+## Also cover SIGHUP
+pgrep 'glusterfs' | xargs kill -HUP
+
+EXPECT_WITHIN 2 1 grep_string $log_file 'sig:HUP'
+
+## Also cover SIGTERM
+pgrep 'glusterfs' | xargs kill -TERM
+
+EXPECT_WITHIN 2 1 grep_string $log_file 'cleanup_and_exit'
+
+# Previous call should make umount of the process.
+# force_umount $M1
+
+# TODO: below section is commented out, mainly as our regression treats the test
+# as failure because sending ABRT signal will cause the process to dump core.
+# Our regression treats the test as failure, if there is a core.
+# FIXME: figure out a way to run this test, because this part of the code gets
+# executed only when there is coredump, and it is critical for debugging, to
+# keep it working always.
+
+# # Restart client
+# TEST $GFS -s $H0 --log-file $log_file --volfile-id $V0 $M1
+#
+# ## Also cover SIGABRT
+# pgrep 'glusterfs ' | xargs kill -ABRT
+#
+# TEST [ 1 -eq $(grep 'pending frames' $log_file | wc -l) ]
+
+TEST rm $log_file
+
+cleanup;
diff --git a/tests/line-coverage/volfile-with-all-graph-syntax.t b/tests/line-coverage/volfile-with-all-graph-syntax.t
new file mode 100644
index 00000000000..b137432cceb
--- /dev/null
+++ b/tests/line-coverage/volfile-with-all-graph-syntax.t
@@ -0,0 +1,73 @@
+#!/bin/bash
+
+. $(dirname $0)/../include.rc
+. $(dirname $0)/../volume.rc
+
+cleanup;
+
+TEST mkdir -p $B0/test
+cat > $B0/test.vol <<EOF
+volume test
+ type storage/posix
+ option directory $B0/test
+ option multiple-line-string "I am
+ testing a feature of volfile graph.l"
+ option single-line-string "this is running on $H0"
+ option option-with-back-tick `date +%Y%M%d`
+end-volume
+EOF
+
+# This should succeed, but it will have some unknown options, which is OK.
+TEST glusterfs -f $B0/test.vol $M0;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+ type storage/posix
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+ type storage/posix
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+ option test and test
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+ subvolumes
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+# This should not succeed
+cat > $B0/test.vol <<EOF
+volume test
+ type storage/posix
+ new-option key value
+ option directory $B0/test
+end-volume
+EOF
+TEST ! glusterfs -f $B0/test.vol $M0;
+
+cleanup;
diff --git a/tests/ssl.rc b/tests/ssl.rc
index 127f83f7577..b1ccc4c8d38 100644
--- a/tests/ssl.rc
+++ b/tests/ssl.rc
@@ -20,7 +20,7 @@ SSL_CA=$SSL_BASE/glusterfs.ca
# Create self-signed certificates
function create_self_signed_certs (){
- openssl genrsa -out $SSL_KEY 1024
+ openssl genrsa -out $SSL_KEY 2048
openssl req -new -x509 -key $SSL_KEY -subj /CN=Anyone -out $SSL_CERT
ln $SSL_CERT $SSL_CA
return $?
diff --git a/tests/thin-arbiter.rc b/tests/thin-arbiter.rc
index c5ac00baaaf..e26d91b1907 100644
--- a/tests/thin-arbiter.rc
+++ b/tests/thin-arbiter.rc
@@ -106,7 +106,7 @@ volume ${V0}-index
subvolumes ${V0}-io-threads
end-volume
-volume ${V0}-io-stats
+volume $b
type debug/io-stats
option count-fop-hits off
option latency-measurement off
@@ -115,11 +115,6 @@ volume ${V0}-io-stats
subvolumes ${V0}-index
end-volume
-volume $b
- type performance/decompounder
- subvolumes ${V0}-io-stats
-end-volume
-
volume ${V0}-server
type protocol/server
option transport.listen-backlog 1024
@@ -178,7 +173,7 @@ function ta_start_mount_process()
{
mkdir -p $1
identifier=$(echo $1 | tr / .)
- if glusterfs -p $B0/${identifier}.pid --volfile=$B0/mount.vol $1
+ if glusterfs --entry-timeout=0 --attribute-timeout=0 -p $B0/${identifier}.pid --volfile=$B0/mount.vol $1
then
cat $B0/$identifier.pid
else
@@ -187,6 +182,13 @@ function ta_start_mount_process()
fi
}
+function ta_get_mount_pid()
+{
+ local mount_path=$1
+ identifier=$(echo $mount_path | tr / .)
+ cat $B0/${identifier}.pid
+}
+
function ta_create_mount_volfile()
{
local b0=$B0/$1
@@ -259,33 +261,20 @@ end-volume
volume ${V0}-distribute
type cluster/distribute
- option tier-hot-compact-frequency 604800
option rebal-throttle normal
option force-migration off
option lookup-optimize on
option weighted-rebalance on
option write-freq-threshold 0
option assert-no-child-down off
- option tier-pause off
- option watermark-low 75
- option tier-compact off
option lock-migration off
option lookup-unhashed on
- option tier-demote-frequency 3600
- option watermark-hi 90
- option tier-cold-compact-frequency 604800
option randomize-hash-range-by-gfid off
option unhashed-sticky-bit off
option use-readdirp on
option readdir-optimize off
option xattr-name trusted.glusterfs.dht
- option tier-max-mb 4000
- option tier-max-files 10000
- option tier-query-limit 100
option read-freq-threshold 0
- option tier-mode test
- option tier-max-promote-file-size 0
- option tier-promote-frequency 120
option min-free-disk 10%
option min-free-inodes 5%
option rebalance-stats off
@@ -444,7 +433,6 @@ cat > $B0/glustershd.vol <<EOF
volume ${V0}-replicate-0-client-0
type protocol/client
option send-gids on
- option transport.socket.lowlat off
option transport.socket.keepalive-interval 2
option remote-host $H0
option remote-subvolume $b0
@@ -490,7 +478,6 @@ volume ${V0}-replicate-0-client-1
option send-gids on
option non-blocking-io off
option transport.listen-backlog 1024
- option transport.socket.lowlat off
option transport.socket.keepalive-interval 2
option password a0ad63dd-8314-4f97-9160-1b93e3cb1f0b
option username 459d48e8-2a92-4f11-89f2-077b29f6f86d
@@ -509,7 +496,6 @@ volume ${V0}-replicate-0-thin-arbiter-client
option remote-subvolume $ta
option filter-O_DIRECT disable
option non-blocking-io off
- option transport.socket.lowlat off
option transport.socket.keepalive-interval 2
option transport.socket.read-fail-log off
option remote-host $H0
@@ -612,3 +598,16 @@ function ta_start_shd_process()
return 1
fi
}
+
+function ta_mount_child_up_status()
+{
+ local mount_path=$1
+ #brick_id is (brick-num in volume info - 1)
+ local vol=$2
+ local brick_id=$3
+ local pid=$(ta_get_mount_pid $mount_path)
+ local fpath=$(generate_statedump $pid)
+ up=$(grep -a -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
+ rm -f $fpath
+ echo "$up"
+}
diff --git a/tests/tier.rc b/tests/tier.rc
deleted file mode 100644
index 88acdc791eb..00000000000
--- a/tests/tier.rc
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/bin/bash
-
-# Common tier functions
-
-# Check if a file is being migrated
-# by checking for the presence of
-# the sticky bit
-# Args: $1 : path to file
-
-function is_sticky_set () {
- echo $1
- if [ -k $1 ];
- then
- echo "yes"
- else
- echo "no"
- fi
-}
-
-
-function exists_and_regular_file () {
- filepath=$1
- if [ -n "$filepath" ];
- then
- if [ -k "$filepath" ]
- then
- echo "no"
- else
- echo "yes"
- fi
- else
- echo "no"
- fi
-}
-
-
-function check_counters {
- index=0
- ret=0
- rm -f /tmp/tc*.txt
- echo "0" > /tmp/tc2.txt
- $CLI volume tier $V0 status | grep localhost > /tmp/tc.txt
-
- promote=`cat /tmp/tc.txt |awk '{print $2}'`
- demote=`cat /tmp/tc.txt |awk '{print $3}'`
- if [ "${promote}" != "${1}" ]; then
- echo "1" > /tmp/tc2.txt
-
- elif [ "${demote}" != "${2}" ]; then
- echo "2" > /tmp/tc2.txt
- fi
-
- # temporarily disable non-Linux tests.
- case $OSTYPE in
- NetBSD | FreeBSD | Darwin)
- echo "0" > /tmp/tc2.txt
- ;;
- esac
- cat /tmp/tc2.txt
-}
-
-
-function detach_start {
- $CLI volume tier $1 detach start
- echo $?;
-}
-
-
-# Grab md5sum without file path (failed attempt notifications are discarded)
-function fingerprint {
- md5sum $1 2> /dev/null | grep --only-matching -m 1 '^[0-9a-f]*'
-}
-
-
-
-# Create a large number of files in the current directory.
-# $1 : file name prefix. Will create files $2-1 to $2-$3
-# $2 : number of files
-
-function create_many_files {
- filename=$1
- num=$2
-
- for i in `seq 1 $num`; do
- dd if=/dev/urandom of=./${dirname}/${filename}$i bs=104857 count=1;
- done
-}
-
-
-function confirm_tier_removed {
- $CLI system getspec $V0 | grep $1
- if [ $? == 0 ]; then
- echo "1"
- else
- echo "0"
- fi
-}
-
-function confirm_vol_stopped {
- $CLI volume stop $1
- if [ $? == 0 ]; then
- echo "0"
- else
- echo "1"
- fi
-}
-
-
-function sleep_first_cycle {
- startTime=$(date +%s)
- mod=$(( ( $startTime % $1 ) + 1 ))
- sleep $mod
-}
-
-function sleep_until_mid_cycle {
- startTime=$(date +%s)
- mod=$(( ( $startTime % $1 ) + 1 ))
- mod=$(( $1 - $mod ))
- mod=$(( $mod + $1 / 2 ))
- sleep $mod
-}
-
-function tier_daemon_check () {
- pgrep -f "tierd/$V0"
- echo "$?"
-}
-
-function rebalance_run_time () {
- local time=$($CLI volume tier $1 status | awk '{print $6}' | sed -n 3p);
- local hh=$(echo $time | cut -d ':' -f1);
- local mm=$(echo $time | cut -d ':' -f2);
- local ss=$(echo $time | cut -d ':' -f3);
- local total=$(($hh * 3600 + $mm * 60 + $ss));
- echo $total;
-}
-
-function tier_detach_commit () {
- $CLI_1 volume tier $V0 detach commit | grep "success" | wc -l
-}
-
-function tier_detach_status_node_down () {
- $CLI_1 volume tier $V0 detach status | grep "WARNING" | wc -l
-}
-
-function tier_status_node_down () {
- $CLI_1 volume tier $V0 status | grep "WARNING" | wc -l
-}
-
-function tier_detach_status () {
- $CLI_1 volume tier $V0 detach status | grep "success" | wc -l
-}
-
-function wait_for_tier_start () {
- sleep 5
-}
-
-function tier_detach_commit_for_single_node () {
- $CLI volume tier $V0 detach commit | grep "success" | wc -l
-}
diff --git a/tests/utils/changelog/changelog.h b/tests/utils/changelog/changelog.h
index 969a1f370c2..1502b689eb4 100644
--- a/tests/utils/changelog/changelog.h
+++ b/tests/utils/changelog/changelog.h
@@ -116,4 +116,10 @@ int
gf_history_changelog(char *changelog_dir, unsigned long start,
unsigned long end, int n_parallel,
unsigned long *actual_end);
+int
+gf_history_changelog_scan();
+ssize_t
+gf_history_changelog_next_change(char *bufptr, size_t maxlen);
+int
+gf_history_changelog_done(char *file);
#endif
diff --git a/tests/utils/changelog/test-changelog-api.c b/tests/utils/changelog/test-changelog-api.c
new file mode 100644
index 00000000000..f4eb066b630
--- /dev/null
+++ b/tests/utils/changelog/test-changelog-api.c
@@ -0,0 +1,98 @@
+/*
+ Copyright (c) 2019 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/**
+ * get set of new changes every 5 seconds (just print the file names)
+ *
+ * Compile it using:
+ * gcc -o getchanges `pkg-config --cflags libgfchangelog` get-changes.c \
+ * `pkg-config --libs libgfchangelog`
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/un.h>
+#include <limits.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include "changelog.h"
+
+int
+main(int argc, char **argv)
+{
+ int i = 0;
+ int ret = 0;
+ ssize_t nr_changes = 0;
+ ssize_t changes = 0;
+ char fbuf[PATH_MAX] = {
+ 0,
+ };
+
+ ret = gf_changelog_init(NULL);
+ if (ret) {
+ printf("-1");
+ fflush(stdout);
+ return -1;
+ }
+
+ /* get changes for brick "/d/backends/patchy0" */
+ ret = gf_changelog_register("/d/backends/patchy0", "/tmp/scratch_v1",
+ "/var/log/glusterfs/changes.log", 9, 5);
+ if (ret) {
+ printf("-2");
+ fflush(stdout);
+ return -1;
+ }
+
+ while (1) {
+ i = 0;
+ nr_changes = gf_changelog_scan();
+ if (nr_changes < 0) {
+ printf("-4");
+ fflush(stdout);
+ return -1;
+ }
+
+ if (nr_changes == 0)
+ goto next;
+
+ while ((changes = gf_changelog_next_change(fbuf, PATH_MAX)) > 0) {
+ /* process changelog */
+ /* ... */
+ /* ... */
+ /* ... */
+ /* done processing */
+
+ ret = gf_changelog_done(fbuf);
+ if (ret) {
+ printf("-5");
+ fflush(stdout);
+ return -1;
+ }
+ }
+
+ if (changes == -1) {
+ printf("-6");
+ fflush(stdout);
+ return -1;
+ }
+
+ next:
+ sleep(2);
+ }
+
+out:
+ printf("0");
+ fflush(stdout);
+ return ret;
+}
diff --git a/tests/utils/changelog/test-history-api.c b/tests/utils/changelog/test-history-api.c
new file mode 100644
index 00000000000..d78e387df10
--- /dev/null
+++ b/tests/utils/changelog/test-history-api.c
@@ -0,0 +1,111 @@
+/*
+ Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+/**
+ * get set of new changes every 10 seconds (just print the file names)
+ *
+ * Compile it using:
+ * gcc -o gethistory `pkg-config --cflags libgfchangelog` get-history.c \
+ * `pkg-config --libs libgfchangelog`
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/un.h>
+#include <limits.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include "changelog.h"
+
+int
+main(int argc, char **argv)
+{
+ int ret = 0;
+ int i = 0;
+ unsigned long end_ts = 0;
+ ssize_t nr_changes = 0;
+ ssize_t changes = 0;
+ int start = 0;
+ int end = 0;
+ char fbuf[PATH_MAX] = {
+ 0,
+ };
+
+ ret = gf_changelog_init(NULL);
+ if (ret) {
+ printf("-1");
+ fflush(stdout);
+ return -1;
+ }
+
+ ret = gf_changelog_register("/d/backends/patchy0", "/tmp/scratch_v1",
+ "/var/log/glusterfs/changes.log", 9, 5);
+ if (ret) {
+ printf("-2");
+ fflush(stdout);
+ return -1;
+ }
+
+ start = atoi(argv[1]);
+ end = atoi(argv[2]);
+
+ ret = gf_history_changelog("/d/backends/patchy0/.glusterfs/changelogs",
+ start, end, 3, &end_ts);
+ if (ret < 0) {
+ printf("-3");
+ fflush(stdout);
+ return -1;
+ } else if (ret == 1) {
+ printf("1");
+ fflush(stdout);
+ return 0;
+ }
+
+ while (1) {
+ nr_changes = gf_history_changelog_scan();
+ if (nr_changes < 0) {
+ printf("-4");
+ fflush(stdout);
+ return -1;
+ }
+
+ if (nr_changes == 0) {
+ goto out;
+ }
+
+ while ((changes = gf_history_changelog_next_change(fbuf, PATH_MAX)) >
+ 0) {
+ /* process changelog */
+ /* ... */
+ /* ... */
+ /* ... */
+ /* done processing */
+
+ ret = gf_history_changelog_done(fbuf);
+ if (ret) {
+ printf("-5");
+ fflush(stdout);
+ return -1;
+ }
+ }
+ if (changes == -1) {
+ printf("-6");
+ fflush(stdout);
+ return -1;
+ }
+ }
+
+out:
+ printf("0");
+ fflush(stdout);
+ return 0;
+}
diff --git a/tests/utils/changelogparser.py b/tests/utils/changelogparser.py
index e8e252d195f..3b8f81d1bad 100644
--- a/tests/utils/changelogparser.py
+++ b/tests/utils/changelogparser.py
@@ -125,7 +125,10 @@ class Record(object):
return repr(self.__dict__)
def __str__(self):
- return unicode(self).encode('utf-8')
+ if sys.version_info >= (3,):
+ return self.__unicode__()
+ else:
+ return unicode(self).encode('utf-8')
def get_num_tokens(data, tokens, version=Version.V11):
diff --git a/tests/utils/create-files.py b/tests/utils/create-files.py
index b2a19610d63..04736e9c73b 100755
--- a/tests/utils/create-files.py
+++ b/tests/utils/create-files.py
@@ -19,6 +19,11 @@ import argparse
datsiz = 0
timr = 0
+def get_ascii_upper_alpha_digits():
+ if sys.version_info > (3,0):
+ return string.ascii_uppercase+string.digits
+ else:
+ return string.uppercase+string.digits
def setLogger(filename):
global logger
@@ -111,7 +116,7 @@ def create_tar_file(fil, size, mins, maxs, rand):
def get_filename(flen):
size = flen
- char = string.uppercase+string.digits
+ char = get_ascii_upper_alpha_digits()
st = ''.join(random.choice(char) for i in range(size))
ti = str((hex(int(str(time.time()).split('.')[0])))[2:])
return ti+"%%"+st
@@ -175,7 +180,7 @@ def tar_files(files, file_count, inter, size, mins, maxs,
def setxattr_files(files, randname, dir_path):
- char = string.uppercase+string.digits
+ char = get_ascii_upper_alpha_digits()
if not randname:
for k in range(files):
v = ''.join(random.choice(char) for i in range(10))
diff --git a/tests/utils/get-mdata-xattr.c b/tests/utils/get-mdata-xattr.c
new file mode 100644
index 00000000000..e9f54717263
--- /dev/null
+++ b/tests/utils/get-mdata-xattr.c
@@ -0,0 +1,152 @@
+/*
+ Copyright (c) 2019 Red Hat, Inc. <http://www.redhat.com>
+ This file is part of GlusterFS.
+
+ This file is licensed to you under your choice of the GNU Lesser
+ General Public License, version 3 or any later version (LGPLv3 or
+ later), or the GNU General Public License, version 2 (GPLv2), in all
+ cases as published by the Free Software Foundation.
+*/
+
+#include <stdlib.h>
+#include <endian.h>
+#include <stdio.h>
+#include <time.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/xattr.h>
+#include <errno.h>
+
+typedef struct gf_timespec_disk {
+ uint64_t tv_sec;
+ uint64_t tv_nsec;
+} gf_timespec_disk_t;
+
+/* posix_mdata_t on disk structure */
+typedef struct __attribute__((__packed__)) posix_mdata_disk {
+ /* version of structure, bumped up if any new member is added */
+ uint8_t version;
+ /* flags indicates valid fields in the structure */
+ uint64_t flags;
+ gf_timespec_disk_t ctime;
+ gf_timespec_disk_t mtime;
+ gf_timespec_disk_t atime;
+} posix_mdata_disk_t;
+
+/* In memory representation posix metadata xattr */
+typedef struct {
+ /* version of structure, bumped up if any new member is added */
+ uint8_t version;
+ /* flags indicates valid fields in the structure */
+ uint64_t flags;
+ struct timespec ctime;
+ struct timespec mtime;
+ struct timespec atime;
+} posix_mdata_t;
+
+#define GF_XATTR_MDATA_KEY "trusted.glusterfs.mdata"
+
+/* posix_mdata_from_disk converts posix_mdata_disk_t into host byte order
+ */
+static inline void
+posix_mdata_from_disk(posix_mdata_t *out, posix_mdata_disk_t *in)
+{
+ out->version = in->version;
+ out->flags = be64toh(in->flags);
+
+ out->ctime.tv_sec = be64toh(in->ctime.tv_sec);
+ out->ctime.tv_nsec = be64toh(in->ctime.tv_nsec);
+
+ out->mtime.tv_sec = be64toh(in->mtime.tv_sec);
+ out->mtime.tv_nsec = be64toh(in->mtime.tv_nsec);
+
+ out->atime.tv_sec = be64toh(in->atime.tv_sec);
+ out->atime.tv_nsec = be64toh(in->atime.tv_nsec);
+}
+
+/* posix_fetch_mdata_xattr fetches the posix_mdata_t from disk */
+static int
+posix_fetch_mdata_xattr(const char *real_path, posix_mdata_t *metadata)
+{
+ size_t size = -1;
+ char *value = NULL;
+ char gfid_str[64] = {0};
+
+ char *key = GF_XATTR_MDATA_KEY;
+
+ if (!metadata || !real_path) {
+ goto err;
+ }
+
+ /* Get size */
+ size = lgetxattr(real_path, key, NULL, 0);
+ if (size == -1) {
+ goto err;
+ }
+
+ value = calloc(size + 1, sizeof(char));
+ if (!value) {
+ goto err;
+ }
+
+ /* Get xattr value */
+ size = lgetxattr(real_path, key, value, size);
+ if (size == -1) {
+ goto err;
+ }
+ posix_mdata_from_disk(metadata, (posix_mdata_disk_t *)value);
+
+out:
+ if (value)
+ free(value);
+ return 0;
+err:
+ if (value)
+ free(value);
+ return -1;
+}
+
+int
+main(int argc, char *argv[])
+{
+ posix_mdata_t metadata;
+ uint64_t result;
+
+ if (argc != 3) {
+ /*
+ Usage: get_mdata_xattr -c|-m|-a <file-name>
+ where -c --> ctime
+ -m --> mtime
+ -a --> atime
+ */
+ printf("-1");
+ goto err;
+ }
+
+ if (posix_fetch_mdata_xattr(argv[2], &metadata)) {
+ printf("-1");
+ goto err;
+ }
+
+ switch (argv[1][1]) {
+ case 'c':
+ result = metadata.ctime.tv_sec;
+ break;
+ case 'm':
+ result = metadata.mtime.tv_sec;
+ break;
+ case 'a':
+ result = metadata.atime.tv_sec;
+ break;
+ default:
+ printf("-1");
+ goto err;
+ }
+ printf("%" PRIu64, result);
+ fflush(stdout);
+ return 0;
+err:
+ fflush(stdout);
+ return -1;
+}
diff --git a/tests/utils/gfid-access.py b/tests/utils/gfid-access.py
index 556d2b4c65b..c35c1223df6 100755
--- a/tests/utils/gfid-access.py
+++ b/tests/utils/gfid-access.py
@@ -33,23 +33,51 @@ def _fmt_mkdir(l):
def _fmt_symlink(l1, l2):
return "!II%dsI%ds%ds" % (37, l1+1, l2+1)
-def entry_pack_reg(gf, bn, mo, uid, gid):
- blen = len(bn)
- return struct.pack(_fmt_mknod(blen),
- uid, gid, gf, mo, bn,
- stat.S_IMODE(mo), 0, umask())
-
-def entry_pack_dir(gf, bn, mo, uid, gid):
- blen = len(bn)
- return struct.pack(_fmt_mkdir(blen),
- uid, gid, gf, mo, bn,
- stat.S_IMODE(mo), umask())
-
-def entry_pack_symlink(gf, bn, lnk, mo, uid, gid):
- blen = len(bn)
- llen = len(lnk)
- return struct.pack(_fmt_symlink(blen, llen),
- uid, gid, gf, mo, bn, lnk)
+
+if sys.version_info > (3,):
+ def entry_pack_reg(gf, bn, mo, uid, gid):
+ bn_encoded = bn.encode()
+ blen = len(bn_encoded)
+ return struct.pack(_fmt_mknod(blen),
+ uid, gid, gf.encode(), mo, bn_encoded,
+ stat.S_IMODE(mo), 0, umask())
+
+ # mkdir
+ def entry_pack_dir(gf, bn, mo, uid, gid):
+ bn_encoded = bn.encode()
+ blen = len(bn_encoded)
+ return struct.pack(_fmt_mkdir(blen),
+ uid, gid, gf.encode(), mo, bn_encoded,
+ stat.S_IMODE(mo), umask())
+ # symlink
+ def entry_pack_symlink(gf, bn, lnk, st):
+ bn_encoded = bn.encode()
+ blen = len(bn_encoded)
+ lnk_encoded = lnk.encode()
+ llen = len(lnk_encoded)
+ return struct.pack(_fmt_symlink(blen, llen),
+ st['uid'], st['gid'],
+ gf.encode(), st['mode'], bn_encoded,
+ lnk_encoded)
+
+else:
+ def entry_pack_reg(gf, bn, mo, uid, gid):
+ blen = len(bn)
+ return struct.pack(_fmt_mknod(blen),
+ uid, gid, gf, mo, bn,
+ stat.S_IMODE(mo), 0, umask())
+
+ def entry_pack_dir(gf, bn, mo, uid, gid):
+ blen = len(bn)
+ return struct.pack(_fmt_mkdir(blen),
+ uid, gid, gf, mo, bn,
+ stat.S_IMODE(mo), umask())
+
+ def entry_pack_symlink(gf, bn, lnk, mo, uid, gid):
+ blen = len(bn)
+ llen = len(lnk)
+ return struct.pack(_fmt_symlink(blen, llen),
+ uid, gid, gf, mo, bn, lnk)
if __name__ == '__main__':
if len(sys.argv) < 9:
diff --git a/tests/utils/libcxattr.py b/tests/utils/libcxattr.py
index fd0b08378fc..3f3ed1fffbb 100644
--- a/tests/utils/libcxattr.py
+++ b/tests/utils/libcxattr.py
@@ -10,7 +10,9 @@
import os
import sys
-from ctypes import CDLL, c_int, create_string_buffer
+from ctypes import CDLL, c_int
+from py2py3 import bytearray_to_str, gr_create_string_buffer
+from py2py3 import gr_query_xattr, gr_lsetxattr, gr_lremovexattr
class Xattr(object):
@@ -47,20 +49,23 @@ class Xattr(object):
@classmethod
def _query_xattr(cls, path, siz, syscall, *a):
if siz:
- buf = create_string_buffer('\0' * siz)
+ buf = gr_create_string_buffer(siz)
else:
buf = None
ret = getattr(cls.libc, syscall)(*((path,) + a + (buf, siz)))
if ret == -1:
cls.raise_oserr()
if siz:
- return buf.raw[:ret]
+ # py2 and py3 compatibility. Convert bytes array
+ # to string
+ result = bytearray_to_str(buf.raw)
+ return result[:ret]
else:
return ret
@classmethod
def lgetxattr(cls, path, attr, siz=0):
- return cls._query_xattr(path, siz, 'lgetxattr', attr)
+ return gr_query_xattr(cls, path, siz, 'lgetxattr', attr)
@classmethod
def lgetxattr_buf(cls, path, attr):
@@ -74,20 +79,21 @@ class Xattr(object):
@classmethod
def llistxattr(cls, path, siz=0):
- ret = cls._query_xattr(path, siz, 'llistxattr')
+ ret = gr_query_xattr(cls, path, siz, 'llistxattr')
if isinstance(ret, str):
- ret = ret.split('\0')
+ ret = ret.strip('\0')
+ ret = ret.split('\0') if ret else []
return ret
@classmethod
def lsetxattr(cls, path, attr, val):
- ret = cls.libc.lsetxattr(path, attr, val, len(val), 0)
+ ret = gr_lsetxattr(cls, path, attr, val)
if ret == -1:
cls.raise_oserr()
@classmethod
def lremovexattr(cls, path, attr):
- ret = cls.libc.lremovexattr(path, attr)
+ ret = gr_lremovexattr(cls, path, attr)
if ret == -1:
cls.raise_oserr()
diff --git a/tests/utils/py2py3.py b/tests/utils/py2py3.py
new file mode 100644
index 00000000000..63aca10fd26
--- /dev/null
+++ b/tests/utils/py2py3.py
@@ -0,0 +1,186 @@
+#
+# Copyright (c) 2018 Red Hat, Inc. <http://www.redhat.com>
+# This file is part of GlusterFS.
+
+# This file is licensed to you under your choice of the GNU Lesser
+# General Public License, version 3 or any later version (LGPLv3 or
+# later), or the GNU General Public License, version 2 (GPLv2), in all
+# cases as published by the Free Software Foundation.
+#
+
+# All python2/python3 compatibility routines
+
+import sys
+import os
+import stat
+import struct
+from ctypes import create_string_buffer
+
+def umask():
+ return os.umask(0)
+
+if sys.version_info >= (3,):
+ def pipe():
+ (r, w) = os.pipe()
+ os.set_inheritable(r, True)
+ os.set_inheritable(w, True)
+ return (r, w)
+
+ # Raw conversion of bytearray to string. Used in the cases where
+ # buffer is created by create_string_buffer which is a 8-bit char
+ # array and passed to syscalls to fetch results. Using encode/decode
+ # doesn't work as it converts to string altering the size.
+ def bytearray_to_str(byte_arr):
+ return ''.join([chr(b) for b in byte_arr])
+
+ # Raw conversion of string to bytes. This is required to convert
+ # back the string into bytearray(c char array) to use in struc
+ # pack/unpacking. Again encode/decode can't be used as it
+ # converts it alters size.
+ def str_to_bytearray(string):
+ return bytes([ord(c) for c in string])
+
+ def gr_create_string_buffer(size):
+ return create_string_buffer(b'\0', size)
+
+ def gr_query_xattr(cls, path, size, syscall, attr=None):
+ if attr:
+ return cls._query_xattr(path.encode(), size, syscall,
+ attr.encode())
+ else:
+ return cls._query_xattr(path.encode(), size, syscall)
+
+ def gr_lsetxattr(cls, path, attr, val):
+ return cls.libc.lsetxattr(path.encode(), attr.encode(), val,
+ len(val), 0)
+
+ def gr_lremovexattr(cls, path, attr):
+ return cls.libc.lremovexattr(path.encode(), attr.encode())
+
+ def gr_cl_register(cls, brick, path, log_file, log_level, retries):
+ return cls._get_api('gf_changelog_register')(brick.encode(),
+ path.encode(),
+ log_file.encode(),
+ log_level, retries)
+
+ def gr_cl_done(cls, clfile):
+ return cls._get_api('gf_changelog_done')(clfile.encode())
+
+ def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel,
+ actual_end):
+ return cls._get_api('gf_history_changelog')(changelog_path.encode(),
+ start, end, num_parallel,
+ actual_end)
+
+ def gr_cl_history_done(cls, clfile):
+ return cls._get_api('gf_history_changelog_done')(clfile.encode())
+
+ # regular file
+
+ def entry_pack_reg(cls, gf, bn, mo, uid, gid):
+ bn_encoded = bn.encode()
+ blen = len(bn_encoded)
+ return struct.pack(cls._fmt_mknod(blen),
+ uid, gid, gf.encode(), mo, bn_encoded,
+ stat.S_IMODE(mo), 0, umask())
+
+ def entry_pack_reg_stat(cls, gf, bn, st):
+ bn_encoded = bn.encode()
+ blen = len(bn_encoded)
+ mo = st['mode']
+ return struct.pack(cls._fmt_mknod(blen),
+ st['uid'], st['gid'],
+ gf.encode(), mo, bn_encoded,
+ stat.S_IMODE(mo), 0, umask())
+ # mkdir
+
+ def entry_pack_mkdir(cls, gf, bn, mo, uid, gid):
+ bn_encoded = bn.encode()
+ blen = len(bn_encoded)
+ return struct.pack(cls._fmt_mkdir(blen),
+ uid, gid, gf.encode(), mo, bn_encoded,
+ stat.S_IMODE(mo), umask())
+ # symlink
+
+ def entry_pack_symlink(cls, gf, bn, lnk, st):
+ bn_encoded = bn.encode()
+ blen = len(bn_encoded)
+ lnk_encoded = lnk.encode()
+ llen = len(lnk_encoded)
+ return struct.pack(cls._fmt_symlink(blen, llen),
+ st['uid'], st['gid'],
+ gf.encode(), st['mode'], bn_encoded,
+ lnk_encoded)
+else:
+ def pipe():
+ (r, w) = os.pipe()
+ return (r, w)
+
+ # Raw conversion of bytearray to string
+ def bytearray_to_str(byte_arr):
+ return byte_arr
+
+ # Raw conversion of string to bytearray
+ def str_to_bytearray(string):
+ return string
+
+ def gr_create_string_buffer(size):
+ return create_string_buffer('\0', size)
+
+ def gr_query_xattr(cls, path, size, syscall, attr=None):
+ if attr:
+ return cls._query_xattr(path, size, syscall, attr)
+ else:
+ return cls._query_xattr(path, size, syscall)
+
+ def gr_lsetxattr(cls, path, attr, val):
+ return cls.libc.lsetxattr(path, attr, val, len(val), 0)
+
+ def gr_lremovexattr(cls, path, attr):
+ return cls.libc.lremovexattr(path, attr)
+
+ def gr_cl_register(cls, brick, path, log_file, log_level, retries):
+ return cls._get_api('gf_changelog_register')(brick, path, log_file,
+ log_level, retries)
+
+ def gr_cl_done(cls, clfile):
+ return cls._get_api('gf_changelog_done')(clfile)
+
+ def gr_cl_history_changelog(cls, changelog_path, start, end, num_parallel,
+ actual_end):
+ return cls._get_api('gf_history_changelog')(changelog_path, start, end,
+ num_parallel, actual_end)
+
+ def gr_cl_history_done(cls, clfile):
+ return cls._get_api('gf_history_changelog_done')(clfile)
+
+ # regular file
+
+ def entry_pack_reg(cls, gf, bn, mo, uid, gid):
+ blen = len(bn)
+ return struct.pack(cls._fmt_mknod(blen),
+ uid, gid, gf, mo, bn,
+ stat.S_IMODE(mo), 0, umask())
+
+ def entry_pack_reg_stat(cls, gf, bn, st):
+ blen = len(bn)
+ mo = st['mode']
+ return struct.pack(cls._fmt_mknod(blen),
+ st['uid'], st['gid'],
+ gf, mo, bn,
+ stat.S_IMODE(mo), 0, umask())
+ # mkdir
+
+ def entry_pack_mkdir(cls, gf, bn, mo, uid, gid):
+ blen = len(bn)
+ return struct.pack(cls._fmt_mkdir(blen),
+ uid, gid, gf, mo, bn,
+ stat.S_IMODE(mo), umask())
+ # symlink
+
+ def entry_pack_symlink(cls, gf, bn, lnk, st):
+ blen = len(bn)
+ llen = len(lnk)
+ return struct.pack(cls._fmt_symlink(blen, llen),
+ st['uid'], st['gid'],
+ gf, st['mode'], bn, lnk)
diff --git a/tests/volume.rc b/tests/volume.rc
index 261c6554d46..b38848c0e52 100644
--- a/tests/volume.rc
+++ b/tests/volume.rc
@@ -84,10 +84,6 @@ function fix-layout_status_field {
$CLI volume rebalance $1 status | awk '{print $2,$3,$4}' |sed -n 3p |tr -d '[^0-9+\.]'|sed 's/ *$//g'
}
-function detach_tier_status_field_complete {
- $CLI volume tier $1 detach status | awk '{print $7}' |sed -n 4p
-}
-
function remove_brick_status_completed_field {
local vol=$1
local brick_list=$2
@@ -116,16 +112,33 @@ function cleanup_statedump {
#.vimrc friendly comment */
}
+function wait_statedump_ready {
+ local maxtime="${1}000000000"
+ local pid="$2"
+ local deadline="$(($(date +%s%N) + maxtime))"
+ local fname
+
+ while [[ "$(date +%s%N)" < "$deadline" ]]; do
+ fname="$statedumpdir/$(ls $statedumpdir | grep -E "\.$pid\.dump\.")"
+ if [[ -f "$fname" ]]; then
+ grep "^DUMP-END-TIME" "$fname" >/dev/null
+ if [[ $? -eq 0 ]]; then
+ echo $fname
+ return
+ fi
+ fi
+ sleep 0.1
+ done
+
+ echo "nostatedump"
+}
+
function generate_statedump {
- local fpath=""
pid=$1
#remove old stale statedumps
cleanup_statedump $pid
kill -USR1 $pid
- #Wait till the statedump is generated
- sleep 1
- fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
- echo $statedumpdir/$fname
+ wait_statedump_ready 3 $pid
}
function generate_mount_statedump {
@@ -180,7 +193,7 @@ function afr_child_up_status_meta {
local mnt=$1
local repl=$2
local child=$3
- grep "child_up\[$child\]" $mnt/.meta/graphs/active/$repl/private | awk '{print $3}'
+ grep -E "^child_up\[$child\]" $mnt/.meta/graphs/active/$repl/private | awk '{print $3}'
}
function client_connected_status_meta {
@@ -237,11 +250,13 @@ function ec_child_up_count_shd {
}
function get_shd_process_pid {
- ps auxww | grep glusterfs | grep -E "glustershd/glustershd.pid" | awk '{print $2}' | head -1
+ local vol=$1
+ ps auxww | grep "process-name\ glustershd" | awk '{print $2}' | head -1
}
function generate_shd_statedump {
- generate_statedump $(get_shd_process_pid)
+ local vol=$1
+ generate_statedump $(get_shd_process_pid $vol)
}
function generate_nfs_statedump {
@@ -281,6 +296,10 @@ function quotad_up_status {
gluster volume status | grep "Quota Daemon" | awk '{print $7}'
}
+function get_glusterd_pid {
+ pgrep '^glusterd$' | head -1
+}
+
function get_brick_pidfile {
local vol=$1
local host=$2
@@ -303,15 +322,12 @@ function kill_brick {
local socket=$(cat $cmdline | tr '\0' '\n' | grep '\.socket$')
gf_attach -d $socket $brick
- cnt=1
- while [ "$cnt" -le "$PROCESS_UP_TIMEOUT" ] ;
- do
- online=`$CLI volume status $vol $host:$brick --xml | sed -ne 's/.*<status>\([01]\)<\/status>/\1/p'`
- if [ $online -eq 0 ] ; then
- break;
- fi
- cnt=$(( $cnt + 1 ))
- sleep 1
+
+ local deadline="$(($(date +%s%N) + ${PROCESS_UP_TIMEOUT}000000000))"
+ while [[ "$(date +%s%N)" < "$deadline" ]]; do
+ if [[ "$(brick_up_status $vol $host $brick)" == "0" ]]; then
+ break
+ fi
done
}
@@ -371,6 +387,19 @@ function get_gfid2path {
getfattr -h --only-values -n glusterfs.gfidtopath $path 2>/dev/null
}
+function get_mdata {
+ local path=$1
+ getfattr -h -e hex -n trusted.glusterfs.mdata $path 2>/dev/null | grep "trusted.glusterfs.mdata" | cut -f2 -d'='
+}
+
+function get_mdata_count {
+ getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | wc -l
+}
+
+function get_mdata_uniq_count {
+ getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | uniq | wc -l
+}
+
function get_xattr_key {
local key=$1
local path=$2
@@ -540,9 +569,8 @@ function volume_exists() {
}
function killall_gluster() {
- pkill gluster
+ terminate_pids $(process_pids gluster)
find $GLUSTERD_PIDFILEDIR -name '*.pid' | xargs rm -f
- sleep 1
}
function afr_get_index_count {
@@ -833,6 +861,9 @@ function get_fd_count {
else
count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
fi
+# If no information is found for a given gfid, it means it has not been
+# accessed, so it doesn't have any open fd. In this case we return 0.
+ count="${count:-0}"
rm -f $statedump
echo $count
}
@@ -859,7 +890,6 @@ function get_mount_active_size_value {
local vol=$1
local mount=$2
local statedump=$(generate_mount_statedump $vol $mount)
- sleep 1
local val=$(grep "active_size" $statedump | cut -f2 -d'=' | tail -1)
rm -f $statedump
echo $val
@@ -869,7 +899,6 @@ function get_mount_lru_size_value {
local vol=$1
local mount=$2
local statedump=$(generate_mount_statedump $vol $mount)
- sleep 1
local val=$(grep "lru_size" $statedump | cut -f2 -d'=' | tail -1)
rm -f $statedump
echo $val
@@ -879,5 +908,95 @@ function check_changelog_op {
local clog_path=$1
local op=$2
- $PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep $op | wc -l
+ $PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep "$op" | wc -l
+}
+
+function processed_changelogs {
+ local processed_dir=$1
+ count=$(ls -l $processed_dir | grep CHANGELOG | wc -l)
+ if [ $count -gt 0 ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function volgen_check_ancestry {
+ #Returns Y if ancestor_xl is an ancestor of $child_xl according to the volfile
+ local volfile="$1"
+
+ local child_xl_type="$2"
+ local child_xl="$3"
+
+ local ancestor_xl_type="$4"
+ local ancestor_xl="$5"
+
+ child_linenum=$(awk '/type $child_xl_type\/$child_xl/ {print FNR}' $volfile)
+ ancestor_linenum=$(awk '/type $ancestor_xl_type\/$ancestor_xl/ {print FNR}' $volfile)
+
+ if [ $child_linenum -lt $ancestor_linenum ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function get_shd_mux_pid {
+ local volume=$1
+ pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
+ echo $pid
+}
+
+function shd_count {
+ ps aux | grep "glustershd" | grep -v grep | wc -l
+}
+
+function number_healer_threads_shd {
+ local pid=$(get_shd_mux_pid $1)
+ pstack $pid | grep $2 | wc -l
+}
+
+function get_mtime {
+ local time=$(get-mdata-xattr -m $1)
+ if [ $time == "-1" ];
+ then
+ echo $(stat -c %Y $1)
+ else
+ echo $time
+ fi
+}
+
+function get_ctime {
+ local time=$(get-mdata-xattr -c $1)
+ if [ $time == "-1" ];
+ then
+ echo $(stat -c %Z $1)
+ else
+ echo $time
+ fi
+}
+
+function get_atime {
+ local time=$(get-mdata-xattr -a $1)
+ if [ $time == "-1" ];
+ then
+ echo $(stat -c %X $1)
+ else
+ echo $time
+ fi
+}
+
+function get-xml()
+{
+ $CLI $1 --xml | xmllint --format - | grep $2 | sed 's/\(<"$2">\|<\/"$2">\)//g'
+}
+
+function logging_time_check()
+{
+ local logdir=$1
+ local logfile=`echo ${0##*/}`_glusterd1.log
+
+ cat $logdir/1/$logfile | tail -n 2 | head -n 1 | grep $(date +%H:%M) | wc -l
}