summaryrefslogtreecommitdiffstats
path: root/tests/bugs/replicate
diff options
context:
space:
mode:
Diffstat (limited to 'tests/bugs/replicate')
-rwxr-xr-xtests/bugs/replicate/bug-1015990-rep.t35
-rwxr-xr-xtests/bugs/replicate/bug-1015990.t30
-rwxr-xr-xtests/bugs/replicate/bug-1046624.t3
-rw-r--r--tests/bugs/replicate/bug-1058797.t3
-rw-r--r--tests/bugs/replicate/bug-1101647.t12
-rw-r--r--tests/bugs/replicate/bug-1130892.t18
-rw-r--r--tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t8
-rw-r--r--tests/bugs/replicate/bug-1180545.t77
-rw-r--r--tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t57
-rw-r--r--tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t42
-rw-r--r--tests/bugs/replicate/bug-1238398-split-brain-resolution.t51
-rw-r--r--tests/bugs/replicate/bug-1238508-self-heal.t51
-rw-r--r--tests/bugs/replicate/bug-1250170-fsync.c56
-rw-r--r--tests/bugs/replicate/bug-1250170-fsync.t35
-rw-r--r--tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t54
-rw-r--r--tests/bugs/replicate/bug-1292379.t58
-rw-r--r--tests/bugs/replicate/bug-1297695.t43
-rw-r--r--tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t40
-rw-r--r--tests/bugs/replicate/bug-1325792.t25
-rw-r--r--tests/bugs/replicate/bug-1335652.t29
-rw-r--r--tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t46
-rw-r--r--tests/bugs/replicate/bug-1341650.t63
-rw-r--r--tests/bugs/replicate/bug-1363721.t118
-rw-r--r--tests/bugs/replicate/bug-1365455.t54
-rw-r--r--tests/bugs/replicate/bug-1386188-sbrain-fav-child.t82
-rw-r--r--tests/bugs/replicate/bug-1402730.t47
-rw-r--r--tests/bugs/replicate/bug-1408712.t101
-rw-r--r--tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t69
-rw-r--r--tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t79
-rw-r--r--tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t46
-rw-r--r--tests/bugs/replicate/bug-1448804-check-quorum-type-values.t47
-rw-r--r--tests/bugs/replicate/bug-1473026.t31
-rw-r--r--tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t52
-rw-r--r--tests/bugs/replicate/bug-1480525.t18
-rw-r--r--tests/bugs/replicate/bug-1493415-gfid-heal.t78
-rw-r--r--tests/bugs/replicate/bug-1498570-client-iot-graph-check.t48
-rwxr-xr-xtests/bugs/replicate/bug-1539358-split-brain-detection.t89
-rw-r--r--tests/bugs/replicate/bug-1561129-enospc.t24
-rw-r--r--tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t72
-rw-r--r--tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t128
-rw-r--r--tests/bugs/replicate/bug-1626994-info-split-brain.t62
-rw-r--r--tests/bugs/replicate/bug-1637249-gfid-heal.t149
-rw-r--r--tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t45
-rw-r--r--tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t55
-rwxr-xr-xtests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t55
-rw-r--r--tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t95
-rw-r--r--tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t40
-rw-r--r--tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t38
-rwxr-xr-xtests/bugs/replicate/bug-1696599-io-hang.t47
-rw-r--r--tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t136
-rw-r--r--tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t116
-rw-r--r--tests/bugs/replicate/bug-1728770-pass-xattrs.t52
-rw-r--r--tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t102
-rw-r--r--tests/bugs/replicate/bug-1744548-heal-timeout.t47
-rw-r--r--tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t89
-rw-r--r--tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t111
-rw-r--r--tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t74
-rw-r--r--tests/bugs/replicate/bug-1801624-entry-heal.t58
-rwxr-xr-xtests/bugs/replicate/bug-802417.t17
-rw-r--r--tests/bugs/replicate/bug-821056.t4
-rwxr-xr-xtests/bugs/replicate/bug-830665.t7
-rwxr-xr-xtests/bugs/replicate/bug-853680.t53
-rw-r--r--tests/bugs/replicate/bug-880898.t7
-rw-r--r--tests/bugs/replicate/bug-913051.t13
-rw-r--r--tests/bugs/replicate/bug-918437-sh-mtime.t2
-rwxr-xr-x[-rw-r--r--]tests/bugs/replicate/bug-921231.t4
-rw-r--r--tests/bugs/replicate/bug-957877.t2
-rw-r--r--tests/bugs/replicate/bug-966018.t35
-rw-r--r--tests/bugs/replicate/bug-976800.t3
-rwxr-xr-xtests/bugs/replicate/bug-977797.t65
-rwxr-xr-xtests/bugs/replicate/bug-979365.t2
-rw-r--r--tests/bugs/replicate/issue-1254-prioritize-enospc.t80
-rw-r--r--tests/bugs/replicate/mdata-heal-no-xattrs.t59
-rw-r--r--tests/bugs/replicate/ta-inode-refresh-read.t40
74 files changed, 3576 insertions, 207 deletions
diff --git a/tests/bugs/replicate/bug-1015990-rep.t b/tests/bugs/replicate/bug-1015990-rep.t
index 4e959e6e70e..ab8166e372a 100755
--- a/tests/bugs/replicate/bug-1015990-rep.t
+++ b/tests/bugs/replicate/bug-1015990-rep.t
@@ -11,7 +11,6 @@ TEST pidof glusterd;
TEST $CLI volume info;
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4};
-
## Verify volume is is created
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
@@ -20,24 +19,23 @@ EXPECT 'Created' volinfo_field $V0 'Status';
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
-
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
-
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
TEST kill_brick $V0 $H0 $B0/$V0"1"
-sleep 5
TEST kill_brick $V0 $H0 $B0/$V0"3"
-sleep 5
for i in {1..100}; do echo "STRING" > $M0/File$i; done
-brick_2_sh_entries=$(count_sh_entries $B0/$V0"2")
-brick_4_sh_entries=$(count_sh_entries $B0/$V0"4")
+# Check shd is connected to all up bricks before running statistics command.
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
command_output=$(gluster volume heal $V0 statistics heal-count replica $H0:$B0/$V0"1")
-
-
substring="Number of entries:"
count=0
while read -r line;
@@ -50,27 +48,10 @@ do
done <<< "$command_output"
-brick_2_entries_count=$(($count-$value))
-
-EXPECT "0" echo $brick_2_entries_count
-
brick_2_entries_count=$count
-
-
xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2")
-##Remove the count of the xattrop-gfid entry count as it does not contribute
-##to the number of files to be healed
-
-sub_val=1
-xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val))
-
-ret=0
-if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ]
- then
- ret=$(($ret + $sub_val))
-fi
+EXPECT $brick_2_entries_count echo $xattrop_count_brick_2
-EXPECT "1" echo $ret
## Finish up
TEST $CLI volume stop $V0;
EXPECT 'Stopped' volinfo_field $V0 'Status';
diff --git a/tests/bugs/replicate/bug-1015990.t b/tests/bugs/replicate/bug-1015990.t
index 48181c00329..a8b12f2c202 100755
--- a/tests/bugs/replicate/bug-1015990.t
+++ b/tests/bugs/replicate/bug-1015990.t
@@ -54,36 +54,10 @@ done <<< "$command_output"
brick_2_entries_count=$(($count-$value))
brick_4_entries_count=$value
-
xattrop_count_brick_2=$(count_sh_entries $B0/$V0"2")
-##Remove the count of the xattrop-gfid entry count as it does not contribute
-##to the number of files to be healed
-
-sub_val=1
-xattrop_count_brick_2=$(($xattrop_count_brick_2-$sub_val))
-
xattrop_count_brick_4=$(count_sh_entries $B0/$V0"4")
-##Remove xattrop-gfid entry count
-
-xattrop_count_brick_4=$(($xattrop_count_brick_4-$sub_val))
-
-
-ret=0
-if [ "$xattrop_count_brick_2" -eq "$brick_2_entries_count" ]
- then
- ret=$(($ret + $sub_val))
-fi
-
-EXPECT "1" echo $ret
-
-
-ret=0
-if [ "$xattrop_count_brick_4" -eq "$brick_4_entries_count" ]
- then
- ret=$(($ret + $sub_val))
-fi
-
-EXPECT "1" echo $ret
+EXPECT $brick_2_entries_count echo $xattrop_count_brick_2
+EXPECT $brick_4_entries_count echo $xattrop_count_brick_4
## Finish up
TEST $CLI volume stop $V0;
diff --git a/tests/bugs/replicate/bug-1046624.t b/tests/bugs/replicate/bug-1046624.t
index 9ae40879228..e2762ea6764 100755
--- a/tests/bugs/replicate/bug-1046624.t
+++ b/tests/bugs/replicate/bug-1046624.t
@@ -25,11 +25,12 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Mount native
-TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0 --use-readdirp=no
+TEST ${GFS} --volfile-server=$H0 --volfile-id=$V0 --use-readdirp=no $M0
TEST `echo "TEST-FILE" > $M0/File`
TEST `mkdir $M0/Dir`
TEST kill_brick $V0 $H0 $B0/${V0}-0
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
TEST `ln -s $M0/File $M0/Link1`
TEST `ln -s $M0/Dir $M0/Link2`
diff --git a/tests/bugs/replicate/bug-1058797.t b/tests/bugs/replicate/bug-1058797.t
index 99ab3eb3a66..598062a0dab 100644
--- a/tests/bugs/replicate/bug-1058797.t
+++ b/tests/bugs/replicate/bug-1058797.t
@@ -12,6 +12,9 @@ TEST glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1};
TEST $CLI volume start $V0
TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
# FUSE mount;create a file
TEST glusterfs -s $H0 --volfile-id $V0 $M0
diff --git a/tests/bugs/replicate/bug-1101647.t b/tests/bugs/replicate/bug-1101647.t
index 148af987f20..708bc1a1e29 100644
--- a/tests/bugs/replicate/bug-1101647.t
+++ b/tests/bugs/replicate/bug-1101647.t
@@ -11,15 +11,17 @@ TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2};
TEST $CLI volume start $V0;
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
-EXPECT_WITHIN 20 "Y" glustershd_up_status
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
-#Create base entry in indices/xattrop and indices/base_indices_holder
+#Create base entry in indices/xattrop
echo "Data">$M0/file
TEST $CLI volume heal $V0
-#Entries from indices/xattrop and indices/base_indices_holder should not be cleared after a heal.
-EXPECT 1 count_sh_entries $B0/$V0"1"
-EXPECT 1 count_sh_entries $B0/$V0"2"
+#Entries from indices/xattrop should not be cleared after a heal.
+EXPECT 1 count_index_entries $B0/$V0"1"
+EXPECT 1 count_index_entries $B0/$V0"2"
TEST kill_brick $V0 $H0 $B0/${V0}2
echo "More data">>$M0/file
diff --git a/tests/bugs/replicate/bug-1130892.t b/tests/bugs/replicate/bug-1130892.t
index 945ee4982fc..c7509f33cc2 100644
--- a/tests/bugs/replicate/bug-1130892.t
+++ b/tests/bugs/replicate/bug-1130892.t
@@ -16,6 +16,11 @@ EXPECT 'Created' volinfo_field $V0 'Status';
# Disable self-heal daemon
TEST gluster volume set $V0 self-heal-daemon off
+# Enable Client side heal
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+
# Disable all perf-xlators
TEST $CLI volume set $V0 performance.quick-read off
TEST $CLI volume set $V0 performance.io-cache off
@@ -28,7 +33,7 @@ TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
# FUSE Mount
-TEST glusterfs -s $H0 --volfile-id $V0 $M0
+TEST ${GFS} -s $H0 --volfile-id $V0 $M0
# Create files and dirs
TEST mkdir -p $M0/one/two/
@@ -36,9 +41,14 @@ TEST `echo "Carpe diem" > $M0/one/two/three`
# Simulate disk-replacement
TEST kill_brick $V0 $H0 $B0/${V0}-1
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
TEST rm -rf $B0/${V0}-1/one
TEST rm -rf $B0/${V0}-1/.glusterfs
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}-1/.glusterfs && chmod 600 $B0/${V0}-1/.glusterfs
+
# Start force
TEST $CLI volume start $V0 force
@@ -46,14 +56,18 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
TEST stat $M0/one
+sleep 1
+
# Check pending xattrs
EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 data
EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 entry
-EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}-0/one trusted.afr.$V0-client-1 metadata
TEST gluster volume set $V0 self-heal-daemon on
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one
EXPECT_WITHIN $HEAL_TIMEOUT "Y" is_dir_heal_done $B0/${V0}-0 $B0/${V0}-1 one/two
diff --git a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
index f43c7cea551..b69a38ae788 100644
--- a/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
+++ b/tests/bugs/replicate/bug-1134691-afr-lookup-metadata-heal.t
@@ -8,8 +8,12 @@ cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
-TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
cd $M0
@@ -21,9 +25,11 @@ iatt=$(stat -c "%g:%u:%A" file)
TEST $CLI volume start $V0 force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT 2 get_pending_heal_count $V0
#Trigger metadataheal
TEST stat file
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
#iattrs must be matching
iatt1=$(stat -c "%g:%u:%A" $B0/brick0/file)
diff --git a/tests/bugs/replicate/bug-1180545.t b/tests/bugs/replicate/bug-1180545.t
new file mode 100644
index 00000000000..5e40edd6c38
--- /dev/null
+++ b/tests/bugs/replicate/bug-1180545.t
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+#Create gfid split-brain of directory and check if conservative merge
+#completes successfully.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+function check_sh_entries() {
+ local expected="$1"
+ local count=
+ local good="0"
+ shift
+
+ for i in $*; do
+ count="$(count_sh_entries $i)"
+ if [[ "x${count}" == "x${expected}" ]]; then
+ good="$((good + 1))"
+ fi
+ done
+ if [[ "x${good}" != "x${last_good}" ]]; then
+ last_good="${good}"
+# This triggers a sweep of the heal index. However if more than one brick
+# tries to heal the same directory at the same time, one of them will take
+# the lock and the other will give up, waiting for the next heal cycle, which
+# is set to 60 seconds (the minimum valid value). So, each time we detect
+# that one brick has completed the heal, we trigger another heal.
+ $CLI volume heal $V0
+ fi
+
+ echo "${good}"
+}
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1}
+TEST $CLI volume set $V0 cluster.heal-timeout 60
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 stat-prefetch off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Create files with alternate brick down. One file has gfid mismatch.
+TEST mkdir $M0/DIR
+
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST touch $M0/DIR/FILE
+TEST touch $M0/DIR/file{1..5}
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/DIR/FILE
+TEST touch $M0/DIR/file{6..10}
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+#Trigger heal and verify number of entries in backend
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+last_good=""
+
+EXPECT_WITHIN $HEAL_TIMEOUT "2" check_sh_entries 2 $B0/brick{0,1}
+
+#Two entries for DIR and two for FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
+TEST diff <(ls $B0/brick0/DIR) <(ls $B0/brick1/DIR)
+cleanup
diff --git a/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t b/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
new file mode 100644
index 00000000000..fe8e992e8f8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1190069-afr-stale-index-entries.t
@@ -0,0 +1,57 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+#Stale entries in xattrop folder for files which do not need heal must be removed during the next index heal crawl.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0;
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST `echo hello>$M0/datafile`
+TEST touch $M0/mdatafile
+
+#Create split-brain and reset the afr xattrs, so that we have only the entry inside xattrop folder.
+#This is to simulate the case where the brick crashed just before pre-op happened, but index xlator created the entry inside xattrop folder.
+
+#Create data, metadata SB.
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST stat $M0/datafile
+TEST `echo append>>$M0/datafile`
+TEST chmod +x $M0/mdatafile
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+TEST kill_brick $V0 $H0 $B0/$V0"0"
+TEST stat $M0/datafile
+TEST `echo append>>$M0/datafile`
+TEST chmod -x $M0/mdatafile
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+TEST ! cat $M0/datafile
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/$V0"0"
+EXPECT_WITHIN $HEAL_TIMEOUT '2' count_sh_entries $B0/$V0"1"
+
+#Reset xattrs and trigger heal.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}0/datafile
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/datafile
+
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000000 $B0/${V0}0/mdatafile
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000000 $B0/${V0}1/mdatafile
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $B0/$V0"0"
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $B0/$V0"1"
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t b/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
new file mode 100644
index 00000000000..6ff471fbf15
--- /dev/null
+++ b/tests/bugs/replicate/bug-1221481-allow-fops-on-dir-split-brain.t
@@ -0,0 +1,42 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+#Allow readdirs to proceed on directories that are in split-brain
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0;
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file{1..5}
+
+#Create entry split-brain
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 1
+TEST touch $M0/dir/FILE
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "^Y$" force_umount $M0
+TEST $CLI volume start $V0 force
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+TEST kill_brick $V0 $H0 $B0/$V0"0"
+EXPECT_WITHIN ${PROCESS_DOWN_TIMEOUT} "^0$" afr_child_up_status $V0 0
+TEST touch $M0/dir/FILE
+EXPECT_WITHIN ${UMOUNT_TIMEOUT} "^Y$" force_umount $M0
+TEST $CLI volume start $V0 force
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT '1' afr_child_up_status_meta $M0 $V0-replicate-0 1
+
+cd $M0/dir
+EXPECT "6" echo $(ls | wc -l)
+TEST ! cat FILE
+TEST `echo hello>hello.txt`
+cd -
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1238398-split-brain-resolution.t b/tests/bugs/replicate/bug-1238398-split-brain-resolution.t
new file mode 100644
index 00000000000..8ef3aae979f
--- /dev/null
+++ b/tests/bugs/replicate/bug-1238398-split-brain-resolution.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+function get_split_brain_status {
+ local path=$1
+ echo `getfattr -n replica.split-brain-status $path` | cut -f2 -d"=" | sed -e 's/^"//' -e 's/"$//'
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+#Disable self-heal-daemon
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+TEST `echo "some-data" > $M0/metadata-split-brain.txt`
+
+#Create metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 666 $M0/metadata-split-brain.txt
+
+TEST $CLI volume start $V0 force
+TEST kill_brick $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST chmod 757 $M0/metadata-split-brain.txt
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT 2 get_pending_heal_count $V0
+
+#Inspect the file in metadata-split-brain
+EXPECT "data-split-brain:no metadata-split-brain:yes Choices:patchy-client-0,patchy-client-1" get_split_brain_status $M0/metadata-split-brain.txt
+TEST setfattr -n replica.split-brain-choice -v $V0-client-0 $M0/metadata-split-brain.txt
+
+EXPECT "757" stat -c %a $M0/metadata-split-brain.txt
+
+TEST setfattr -n replica.split-brain-choice -v $V0-client-1 $M0/metadata-split-brain.txt
+EXPECT "666" stat -c %a $M0/metadata-split-brain.txt
+
+cleanup;
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1238508-self-heal.t b/tests/bugs/replicate/bug-1238508-self-heal.t
new file mode 100644
index 00000000000..24fb07d31f0
--- /dev/null
+++ b/tests/bugs/replicate/bug-1238508-self-heal.t
@@ -0,0 +1,51 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+# Disable self-heal-daemon
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST mkdir $M0/olddir;
+TEST `echo "some-data" > $M0/olddir/oldfile`
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST mv $M0/olddir/oldfile $M0/olddir/newfile;
+TEST mv $M0/olddir $M0/newdir;
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# Test if the files are present on both the bricks
+EXPECT "newdir" ls $B0/${V0}0/
+EXPECT "newdir" ls $B0/${V0}1/
+EXPECT "newfile" ls $B0/${V0}0/newdir/
+EXPECT "newfile" ls $B0/${V0}1/newdir/
+
+# Test if gfid-link files in .glusterfs also provide correct info
+brick0gfid=$(gf_get_gfid_backend_file_path $B0/${V0}0 newdir)
+brick1gfid=$(gf_get_gfid_backend_file_path $B0/${V0}1 newdir)
+EXPECT "newfile" ls $brick0gfid
+EXPECT "newfile" ls $brick1gfid
+
+# Test if the files are accessible from the mount
+EXPECT "newdir" ls $M0/
+EXPECT "newfile" ls $M0/newdir
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1250170-fsync.c b/tests/bugs/replicate/bug-1250170-fsync.c
new file mode 100644
index 00000000000..21fd96594aa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1250170-fsync.c
@@ -0,0 +1,56 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+
+int
+main(int argc, char **argv)
+{
+ char *file = NULL;
+ int fd = -1;
+ char *buffer = NULL;
+ size_t buf_size = 0;
+ size_t written = 0;
+ int ret = 0;
+ off_t offset = 0;
+ int i = 0;
+ int loop_count = 5;
+
+ if (argc < 2) {
+ printf("Usage:%s <filename>\n", argv[0]);
+ return -1;
+ }
+
+ file = argv[1];
+ buf_size = 1024;
+ buffer = malloc(buf_size);
+ if (!buffer) {
+ perror("malloc");
+ return -1;
+ }
+ memset(buffer, 'R', buf_size);
+
+ fd = open(file, O_WRONLY);
+ if (fd == -1) {
+ perror("open");
+ return -1;
+ }
+
+ for (i = 0; i < loop_count; i++) {
+ ret = write(fd, buffer, buf_size);
+ if (ret == -1) {
+ perror("write");
+ return ret;
+ } else {
+ written += ret;
+ }
+ offset = lseek(fd, 0, SEEK_SET);
+ }
+
+ free(buffer);
+ return 0;
+}
diff --git a/tests/bugs/replicate/bug-1250170-fsync.t b/tests/bugs/replicate/bug-1250170-fsync.t
new file mode 100644
index 00000000000..7a3fdbf8bb5
--- /dev/null
+++ b/tests/bugs/replicate/bug-1250170-fsync.t
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+TEST gcc $(dirname $0)/bug-1250170-fsync.c -o $(dirname $0)/bug-1250170-fsync
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+TEST touch $M0/file
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST gluster volume profile $V0 start
+#Perform 5 non-sequential writes.
+TEST $(dirname $0)/bug-1250170-fsync $M0/file
+
+#Run profile info initially to filter out the interval statistics in the
+#subsequent runs.
+TEST $CLI volume profile $V0 info
+#We get only cumulative statistics.
+write_count=$($CLI volume profile $V0 info | grep WRITE |awk '{count += $8} END {print count}')
+fsync_count=$($CLI volume profile $V0 info | grep FSYNC |awk '{count += $8} END {print count}')
+
+EXPECT "5" echo $write_count
+TEST [ -z $fsync_count ]
+
+TEST $CLI volume profile $V0 stop
+TEST umount $M0
+rm -f $(dirname $0)/bug-1250170-fsync
+cleanup
diff --git a/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t b/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t
new file mode 100644
index 00000000000..884b7892954
--- /dev/null
+++ b/tests/bugs/replicate/bug-1266876-allow-reset-brick-for-same-path.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+# Create files
+for i in {1..5}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Negative case with brick not killed && volume-id xattrs present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit force
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+# Negative case with brick killed but volume-id xattr present
+TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
+
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 start
+# Simulated reset disk
+for i in {1..5}
+do
+ rm -rf $B0/${V0}{1}/file$i.txt
+done
+for i in {6..10}
+do
+ echo $i > $M0/file$i.txt
+done
+
+# Now reset the brick
+TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
+EXPECT "10" echo $(ls $B0/${V0}1 | wc -l)
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1292379.t b/tests/bugs/replicate/bug-1292379.t
new file mode 100644
index 00000000000..be1bf699173
--- /dev/null
+++ b/tests/bugs/replicate/bug-1292379.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+. $(dirname $0)/../../fileio.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.eager-lock off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+
+TEST wfd=`fd_available`
+TEST fd_open $wfd "w" $M0/a
+
+TEST fd_write $wfd "abcd"
+
+# Kill brick-0
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# While brick-0 is down, rename 'a' to 'b'
+TEST mv $M0/a $M0/b
+
+TEST fd_write $wfd "lmn"
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST fd_write $wfd "pqrs"
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST fd_write $wfd "xyz"
+TEST fd_close $wfd
+
+md5sum_b0=$(md5sum $B0/${V0}0/b | awk '{print $1}')
+
+EXPECT "$md5sum_b0" echo `md5sum $B0/${V0}1/b | awk '{print $1}'`
+
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1297695.t b/tests/bugs/replicate/bug-1297695.t
new file mode 100644
index 00000000000..d5c1a214fe2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1297695.t
@@ -0,0 +1,43 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function write_to_file {
+ dd of=$M0/dir/file if=/dev/urandom bs=1024k count=128 2>&1 >/dev/null
+}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
+
+TEST $CLI volume set $V0 cluster.eager-lock on
+TEST $CLI volume set $V0 post-op-delay-secs 3
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+
+TEST $CLI volume start $V0
+TEST $CLI volume profile $V0 start
+TEST $CLI volume set $V0 ensure-durability off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST mkdir $M0/dir
+TEST touch $M0/dir/file
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST `echo 'abc' > $M0/dir/file`
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+write_to_file &
+#Test if the MAX [F]INODELK fop latency is of the order of seconds.
+EXPECT "^1$" get_pending_heal_count $V0
+inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
+TEST [ -z $inodelk_max_latency ]
+cleanup
diff --git a/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t b/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t
new file mode 100644
index 00000000000..780ddb9250c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1305031-block-reads-on-metadata-sbrain.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+#Test that for files in metadata-split-brain, we do not wind even a single read.
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 performance.read-ahead off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.open-behind off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 700 $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST chmod 777 $M0/file
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+lines=`cat $M0/file|wc|awk '{print $1}'`
+EXPECT 0 echo $lines
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1325792.t b/tests/bugs/replicate/bug-1325792.t
new file mode 100644
index 00000000000..73190e5f341
--- /dev/null
+++ b/tests/bugs/replicate/bug-1325792.t
@@ -0,0 +1,25 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3}
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 3
+
+
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}0 | grep -A 1 ${V0}0 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}1 | grep -A 1 ${V0}1 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}2 | grep -A 1 ${V0}2 | grep "entries" | wc -l`
+EXPECT 1 echo `$CLI volume heal $V0 statistics heal-count replica $H0:$B0/${V0}3 | grep -A 1 ${V0}3 | grep "entries" | wc -l`
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1335652.t b/tests/bugs/replicate/bug-1335652.t
new file mode 100644
index 00000000000..653a1b05ce2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1335652.t
@@ -0,0 +1,29 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 shard on
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+#Kill the zero'th brick so that 1st and 2nd get marked dirty
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+TEST dd if=/dev/urandom of=$M0/file bs=10MB count=20
+
+#At any point value of dirty should not be greater than 0 on source bricks
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}1/.shard
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}2/.shard
+
+rm -rf $M0/file;
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t b/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t
new file mode 100644
index 00000000000..6d177a7d3f8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1340623-mkdir-fails-remove-brick-started.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../nfs.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+## Create a 2x2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/r11 $H0:$B0/r12 $H0:$B0/r21 $H0:$B0/r22;
+EXPECT "$V0" volinfo_field $V0 'Volume Name';
+EXPECT 'Created' volinfo_field $V0 'Status';
+
+## Start the volume
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+## for release > 3.7 , gluster nfs is off by default
+TEST $CLI vol set $V0 nfs.disable off;
+
+EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
+
+TEST mount_nfs $H0:/$V0 $N0;
+
+## create some directories and files inside mount
+mkdir $N0/io;
+for j in {1..10}; do mkdir $N0/io/b$j; for k in {1..10}; do touch $N0/io/b$j/c$k; done done
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/r11 $H0:$B0/r12 start;
+
+TEST mkdir $N0/dir1;
+
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$B0/r11 $H0:$B0/r12"
+
+TEST $CLI volume remove-brick $V0 $H0:$B0/r11 $H0:$B0/r12 commit;
+
+TEST mkdir $N0/dir2;
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1341650.t b/tests/bugs/replicate/bug-1341650.t
new file mode 100644
index 00000000000..610342ca5bd
--- /dev/null
+++ b/tests/bugs/replicate/bug-1341650.t
@@ -0,0 +1,63 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume info
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST $CLI volume quota $V0 enable
+TEST $CLI volume set $V0 quota-deem-statfs on
+TEST $CLI volume set $V0 soft-timeout 0
+TEST $CLI volume set $V0 hard-timeout 0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+TEST mkdir $M0/dir
+TEST $CLI volume quota $V0 limit-objects /dir 10
+
+TEST touch $M0/dir/file1
+TEST touch $M0/dir/file2
+TEST touch $M0/dir/file3
+TEST touch $M0/dir/file4
+TEST touch $M0/dir/file5
+
+# Kill 3rd brick and create entries
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST touch $M0/dir/file6
+TEST touch $M0/dir/file7
+TEST touch $M0/dir/file8
+TEST touch $M0/dir/file9
+
+# Quota object limit is reached. Remove object for create to succeed.
+TEST ! touch $M0/dir/file10
+
+TEST rm $M0/dir/file1
+TEST touch $M0/dir/file10
+
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}0/dir
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.dirty $B0/${V0}1/dir
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+#Check that no convervative merge happened.file1 must not be present on any brick.
+TEST ! stat $B0/${V0}0/dir/file1
+TEST ! stat $B0/${V0}1/dir/file1
+TEST ! stat $B0/${V0}2/dir/file1
+
+TEST umount $M0
+cleanup
diff --git a/tests/bugs/replicate/bug-1363721.t b/tests/bugs/replicate/bug-1363721.t
new file mode 100644
index 00000000000..0ed34d8a4f4
--- /dev/null
+++ b/tests/bugs/replicate/bug-1363721.t
@@ -0,0 +1,118 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+FILE_UPDATE_TIMEOUT=20
+cleanup
+
+function size_increased {
+ local file=$1
+ local size=$2
+ local new_size=$(stat -c%s $file)
+ if [ $new_size -gt $size ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function has_write_failed {
+ local pid=$1
+ if [ -d /proc/$pid ]; then echo "N"; else echo "Y"; fi
+}
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --direct-io-mode=enable $M0
+
+cd $M0
+
+# Start writing to a file.
+(dd if=/dev/urandom of=$M0/file1 bs=1k 2>/dev/null 1>/dev/null)&
+dd_pid=$!
+
+# Let IO happen
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 0
+
+# Now kill the zeroth brick
+kill_brick $V0 $H0 $B0/${V0}0
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now kill the first brick
+kill_brick $V0 $H0 $B0/${V0}1
+
+# Let IO continue
+EXPECT_WITHIN $FILE_UPDATE_TIMEOUT "Y" size_increased file1 $(stat -c%s file1)
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+# Let IO continue for 3 seconds
+sleep 3
+
+# Now kill the second brick
+kill_brick $V0 $H0 $B0/${V0}2
+
+# At this point the write should have been failed.
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "Y" has_write_failed $dd_pid
+
+# Also make sure that the second brick is never an accused.
+
+md5sum_2=$(md5sum $B0/${V0}2/file1 | awk '{print $1}')
+
+EXPECT_NOT "$md5sum_2" echo `md5sum $B0/${V0}0/file1 | awk '{print $1}'`
+EXPECT_NOT "$md5sum_2" echo `md5sum $B0/${V0}1/file1 | awk '{print $1}'`
+
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.dirty data
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.dirty data
+
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file1 trusted.afr.$V0-client-2 data
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/file1 trusted.afr.$V0-client-2 metadata
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/file1 trusted.afr.$V0-client-2 metadata
+EXPECT "00000000" afr_get_specific_changelog_xattr $B0/${V0}2/file1 trusted.afr.$V0-client-2 metadata
+
+# Now bring the brick back up
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Enable shd
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}0/file1 | awk '{print $1}'`
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}1/file1 | awk '{print $1}'`
+EXPECT "$md5sum_2" echo `md5sum $B0/${V0}2/file1 | awk '{print $1}'`
+
+cd ~
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1365455.t b/tests/bugs/replicate/bug-1365455.t
new file mode 100644
index 00000000000..1953e2a9327
--- /dev/null
+++ b/tests/bugs/replicate/bug-1365455.t
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+function check_size
+{
+ for i in {1..10}; do
+ size1=`stat -c %s $B0/${V0}0/tmp$i`
+ size2=`stat -c %s $B0/${V0}1/tmp$i`
+ if [[ $size1 -eq 0 ]] || [[ $size2 -eq 0 ]] || [[ $size1 -ne $size2 ]]; then
+ return 1
+ fi
+ done
+
+ return 0
+}
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}0;
+
+TEST $CLI volume start $V0;
+
+TEST glusterfs -s $H0 --volfile-id $V0 $M0
+
+for i in {1..10}
+do
+ echo abc > $M0/tmp$i
+done
+
+
+# Add Another brick
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}1
+
+#Check if self heal daemon has come up
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+
+#Check if self heal daemon is able to see all bricks
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check if entry-heal has happened
+TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort)
+
+#Check size of files on bricks
+TEST check_size
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t b/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t
new file mode 100644
index 00000000000..d049d95ef9a
--- /dev/null
+++ b/tests/bugs/replicate/bug-1386188-sbrain-fav-child.t
@@ -0,0 +1,82 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/data.txt
+TEST touch $M0/mdata.txt
+
+#Create data and metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/data.txt bs=1024 count=1024
+TEST setfattr -n user.value -v value1 $M0/mdata.txt
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/data.txt bs=1024 count=1024
+TEST setfattr -n user.value -v value2 $M0/mdata.txt
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+## Check that the file still in split-brain,
+ ## I/O fails
+ cat $M0/data.txt > /dev/null
+ EXPECT "1" echo $?
+ ## pending xattrs blame each other.
+ brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/data.txt)
+ brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST [ $brick0_pending -ne "000000000000000000000000" ]
+ TEST [ $brick1_pending -ne "000000000000000000000000" ]
+
+ ## I/O fails
+ getfattr -n user.value $M0/mdata.txt
+ EXPECT "1" echo $?
+ brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/mdata.txt)
+ brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/mdata.txt)
+ TEST [ $brick0_pending -ne "000000000000000000000000" ]
+ TEST [ $brick1_pending -ne "000000000000000000000000" ]
+
+## Let us use mtime as fav-child policy. So brick0 will be source.
+ # Set dirty (data part) on the sink brick to check if it is reset later along with the pending xattr.
+ TEST setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/${V0}1/data.txt
+ # Set dirty (metadata part) on the sink brick to check if it is reset later along with the pending xattr.
+ TEST setfattr -n trusted.afr.dirty -v 0x000000000000000100000000 $B0/${V0}1/mdata.txt
+
+ TEST $CLI volume set $V0 favorite-child-policy mtime
+
+ # Reading the file should be allowed and sink brick xattrs must be reset.
+ cat $M0/data.txt > /dev/null
+ EXPECT "0" echo $?
+ TEST brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST brick1_dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}1/data.txt)
+ TEST [ $brick1_dirty -eq "000000000000000000000000" ]
+ TEST [ $brick1_pending -eq "000000000000000000000000" ]
+
+ # Accessing the file should be allowed and sink brick xattrs must be reset.
+ EXPECT "value2" echo $(getfattr --only-values -n user.value $M0/mdata.txt)
+ TEST brick1_pending=$(get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1/data.txt)
+ TEST brick1_dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}1/data.txt)
+ TEST [ $brick1_dirty -eq "000000000000000000000000" ]
+ TEST [ $brick1_pending -eq "000000000000000000000000" ]
+
+#Enable shd and heal the file.
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+EXPECT 0 get_pending_heal_count $V0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1402730.t b/tests/bugs/replicate/bug-1402730.t
new file mode 100644
index 00000000000..c7866df463b
--- /dev/null
+++ b/tests/bugs/replicate/bug-1402730.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 granular-entry-heal on
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0
+
+TEST mkdir -p $M0/a/b/c -p
+cd $M0/a/b/c
+
+TEST kill_brick $V0 $H0 $B0/${V0}2
+rm -rf $B0/${V0}2/*
+rm -rf $B0/${V0}2/.glusterfs
+
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}2/.glusterfs && chmod 600 $B0/${V0}2/.glusterfs
+
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch file
+
+GFID_C=$(get_gfid_string $M0/a/b/c)
+TEST stat $B0/${V0}0/.glusterfs/indices/entry-changes/$GFID_C/file
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$GFID_C/file
+
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}0/a/b/c trusted.afr.$V0-client-2 entry
+EXPECT_NOT "00000000" afr_get_specific_changelog_xattr $B0/${V0}1/a/b/c trusted.afr.$V0-client-2 entry
+
+cd ~
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1408712.t b/tests/bugs/replicate/bug-1408712.t
new file mode 100644
index 00000000000..9499a598ef1
--- /dev/null
+++ b/tests/bugs/replicate/bug-1408712.t
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+TESTS_EXPECTED_IN_LOOP=12
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+TEST $CLI volume set $V0 features.shard on
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+TEST $CLI volume heal $V0 granular-entry-heal enable
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 performance.flush-behind off
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 2
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M1 $V0-replicate-0 2
+
+TEST cd $M0
+TEST dd if=/dev/zero of=file bs=1M count=8
+
+# Kill brick-0.
+TEST kill_brick $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT "0" brick_up_status $V0 $H0 $B0/${V0}0
+
+TEST "dd if=/dev/zero bs=1M count=8 >> file"
+
+FILE_GFID=$(get_gfid_string $M0/file)
+
+# Test that the index associated with '/.shard' is created on B1 and B2.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+# Check for successful creation of granular entry indices
+for i in {2..3}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+TEST cd ~
+TEST md5sum $M1/file
+
+# Test that the index associated with '/.shard' and the created shards do not disappear on B1 and B2.
+TEST stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+for i in {2..3}
+do
+ TEST_IN_LOOP stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+# Start the brick that was down
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Enable shd
+TEST gluster volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+
+# Wait for heal to complete
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Now verify that there are no name indices left after self-heal
+TEST ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+TEST ! stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID
+
+for i in {2..3}
+do
+ TEST_IN_LOOP ! stat $B0/${V0}1/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+ TEST_IN_LOOP ! stat $B0/${V0}2/.glusterfs/indices/entry-changes/$DOT_SHARD_GFID/$FILE_GFID.$i
+done
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t b/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t
new file mode 100644
index 00000000000..d0e2fee8bcd
--- /dev/null
+++ b/tests/bugs/replicate/bug-1417522-block-split-brain-resolution.t
@@ -0,0 +1,69 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST touch $M0/file
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=10
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=20
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST ! dd if=$M0/file of=/dev/null
+SOURCE_BRICK_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+
+# Various fav-child policies must not heal the file when some bricks are down.
+TEST $CLI volume set $V0 favorite-child-policy size
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy ctime
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy mtime
+TEST ! dd if=$M0/file of=/dev/null
+TEST $CLI volume set $V0 favorite-child-policy majority
+TEST ! dd if=$M0/file of=/dev/null
+
+# CLI/mount based split-brain resolution must also not work.
+TEST ! $CLI volume heal $V0 split-brain bigger-file /file
+TEST ! $CLI volume heal $V0 split-brain mtime /file
+TEST ! $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}2 /file1
+
+TEST ! getfattr -n replica.split-brain-status $M0/file
+TEST ! setfattr -n replica.split-brain-choice -v $V0-client-1 $M0/file
+
+# Bring all bricks back up and launch heal.
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT 0 get_pending_heal_count $V0
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$SOURCE_BRICK_MD5" == "$B1_MD5" ]
+TEST [ "$SOURCE_BRICK_MD5" == "$B2_MD5" ]
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
+
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
new file mode 100644
index 00000000000..10ce0131f4f
--- /dev/null
+++ b/tests/bugs/replicate/bug-1433571-undo-pending-only-on-up-bricks.t
@@ -0,0 +1,79 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Disable self-heal-daemon, client-side-heal and set quorum-type to none
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.quorum-type none
+
+#Kill bricks 0 & 1 and create a file to have pending entry for 0 & 1 on brick 2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+echo "file 1" >> $M0/f1
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#Kill bricks 1 & 2 and create a file to have pending entry for 1 & 2 on brick 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "file 2" >> $M0/f2
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+#Kill bricks 2 & 0 and create a file to have pending entry for 2 & 0 on brick 1
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST kill_brick $V0 $H0 $B0/${V0}0
+echo "file 3" >> $M0/f3
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
+EXPECT "000000000000000000000001" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# We were killing one brick and checking that entry heal does not reset the
+# pending xattrs for the down brick. Now that we need all bricks to be up for
+# entry heal, I'm removing that test from the .t
+
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
+
+TEST ls $M0
+TEST cat $M0/f1
+TEST cat $M0/f2
+TEST cat $M0/f3
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}1
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-0 $B0/${V0}2
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}2
+
+#Check whether all the bricks contains all the 3 files.
+EXPECT "3" echo $(ls $B0/${V0}0 | wc -l)
+EXPECT "3" echo $(ls $B0/${V0}1 | wc -l)
+EXPECT "3" echo $(ls $B0/${V0}2 | wc -l)
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t b/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t
new file mode 100644
index 00000000000..cdcaf62c925
--- /dev/null
+++ b/tests/bugs/replicate/bug-1438255-do-not-mark-self-accusing-xattrs.t
@@ -0,0 +1,46 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+NEW_USER=bug1438255
+NEW_UID=1438255
+NEW_GID=1438255
+
+TEST groupadd -o -g ${NEW_GID} ${NEW_USER}-${NEW_GID}
+TEST useradd -o -M -u ${NEW_UID} -g ${NEW_GID} -K MAIL_DIR=/dev/null ${NEW_USER}
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+TEST touch $M0/FILE
+TEST kill_brick $V0 $H0 $B0/${V0}2
+chown $NEW_UID:$NEW_GID $M0/FILE
+EXPECT "000000000000000100000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+EXPECT "000000000000000100000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# setfattr done as NEW_USER fails on 3rd brick with EPERM but suceeds on
+# the first 2 and hence on the mount.
+su -m bug1438255 -c "setfattr -n user.myattr -v myvalue $M0/FILE"
+TEST [ $? -eq 0 ]
+EXPECT "000000000000000200000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+EXPECT "000000000000000200000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+# Brick 3 does not have any self-blaming pending xattr.
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+TEST userdel --force ${NEW_USER}
+TEST groupdel ${NEW_USER}-${NEW_GID}
+cleanup
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
diff --git a/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t b/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t
new file mode 100644
index 00000000000..5bacf3edcfe
--- /dev/null
+++ b/tests/bugs/replicate/bug-1448804-check-quorum-type-values.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0..1}
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Default quorum-type for replica 2 is none. quorum-count is zero but it is not displayed.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "none" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# Convert to replica-3.
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Default quorum-type for replica 3 is auto. quorum-count is INT_MAX but it is not displayed.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "auto" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# Change the type to fixed.
+TEST $CLI volume set $V0 cluster.quorum-type fixed
+# We haven't set quorum-count yet, so it takes the default value of zero in reconfigure() and hence the quorum-type is displayed as none.
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "none" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+# set quorum-count and check.
+TEST $CLI volume set $V0 cluster.quorum-count 1
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "fixed" get_quorum_type $M0 $V0 0
+EXPECT "1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count|awk '{print $3}'`
+
+# reset to default values.
+TEST $CLI volume reset $V0 cluster.quorum-type
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "auto" get_quorum_type $M0 $V0 0
+cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep quorum-count
+TEST [ $? -ne 0 ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1473026.t b/tests/bugs/replicate/bug-1473026.t
new file mode 100644
index 00000000000..efb3ffa0d39
--- /dev/null
+++ b/tests/bugs/replicate/bug-1473026.t
@@ -0,0 +1,31 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+#G_TESTDEF_TEST_STATUS_CENTOS6=KNOWN_ISSUE,BUG=1473026
+#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=1473026
+
+cleanup;
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0;
+
+#kill one brick (this has some issue)
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+#kill the brick to be replaced
+TEST kill_brick $V0 $H0 $B0/${V0}0
+
+# We know this command would fail because file system is read only now
+TEST ! $CLI volume replace-brick $V0 $H0:$B0/${V0}0 $H0:$B0/${V0}4 commit force
+
+TEST pkill glusterd
+
+# Glusterd should start but the volume info and brick volfiles don't match
+TEST glusterd
+TEST pidof glusterd
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t b/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t
new file mode 100644
index 00000000000..bb858a8a63d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1477169-entry-selfheal-rename.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST mkdir -p $M0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 $M0/d2
+gfid_d1=$(gf_get_gfid_xattr $B0/${V0}0/d1)
+gfid_d2=$(gf_get_gfid_xattr $B0/${V0}0/d2)
+gfid_dir=$(gf_get_gfid_xattr $B0/${V0}0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789)
+
+gfid_str_d1=$(gf_gfid_xattr_to_str $gfid_d1)
+gfid_str_d2=$(gf_gfid_xattr_to_str $gfid_d2)
+gfid_str_d3=$(gf_gfid_xattr_to_str $gfid_dir)
+
+# Kill 3rd brick and rename the dir from mount.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+TEST mv $M0/d1/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789 $M0/d2
+
+# Bring it back and trigger heal.
+TEST $CLI volume start $V0 force
+
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check that .glusterfs symlink for dir exists and points to d2/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
+TEST linkname=$(readlink $B0/${V0}2/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3)
+EXPECT "dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" basename $linkname
+TEST parent_dir_gfid_str=$(echo $linkname|cut -d / -f5)
+EXPECT $gfid_str_d2 echo $parent_dir_gfid_str
+
+TEST rmdir $M0/d2/dir012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
+
+TEST ! stat $B0/${V0}0/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+TEST ! stat $B0/${V0}1/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+TEST ! stat $B0/${V0}2/.glusterfs/${gfid_str_d3:0:2}/${gfid_str_d3:2:2}/$gfid_str_d3
+cleanup;
diff --git a/tests/bugs/replicate/bug-1480525.t b/tests/bugs/replicate/bug-1480525.t
new file mode 100644
index 00000000000..7c63bb2e4ea
--- /dev/null
+++ b/tests/bugs/replicate/bug-1480525.t
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
+
+EXPECT_NOT "-1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep read_child |awk '{print $3}'`
+TEST $CLI volume set $V0 choose-local off
+EXPECT_WITHIN $CONFIG_UPDATE_TIMEOUT "-1" echo `cat $M0/.meta/graphs/active/$V0-replicate-0/private|grep read_child |awk '{print $3}'`
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1493415-gfid-heal.t b/tests/bugs/replicate/bug-1493415-gfid-heal.t
new file mode 100644
index 00000000000..8a79febf4b4
--- /dev/null
+++ b/tests/bugs/replicate/bug-1493415-gfid-heal.t
@@ -0,0 +1,78 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume start $V0
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST $CLI volume set $V0 self-heal-daemon off
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+
+#------------------------------------------------------------------------------#
+TEST touch $M0/f1
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/f1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+
+# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
+# brick crash at the point where file got created but no xattrs were set.
+TEST setfattr -x trusted.gfid $B0/${V0}1/f1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
+# Assume there were no pending xattrs on parent dir due to 1st brick crashing
+# too. Then name heal from client must heal the gfid.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0;
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST stat $M0/f1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/f1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+#------------------------------------------------------------------------------#
+TEST mkdir $M0/dir
+TEST touch $M0/dir/f2
+gfid_f2=$(gf_get_gfid_xattr $B0/${V0}0/dir/f2)
+gfid_str_f2=$(gf_gfid_xattr_to_str $gfid_f2)
+
+# Remove gfid xattr and .glusterfs hard link from 2nd brick. This simulates a
+# brick crash at the point where file got created but no xattrs were set.
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/f2
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+#Now simulate setting of pending entry xattr on parent dir of 1st brick.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir
+create_brick_xattrop_entry $B0/${V0}0 dir
+
+# storage/posix considers that a file without gfid changed less than a second
+# before doesn't exist, so we need to wait for a second to force posix to
+# consider that this is a valid file but without gfid.
+sleep 2
+
+#Trigger entry-heal via shd
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_f2" gf_get_gfid_xattr $B0/${V0}1/dir/f2
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+#------------------------------------------------------------------------------#
+cleanup;
diff --git a/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t b/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t
new file mode 100644
index 00000000000..2b3b3040228
--- /dev/null
+++ b/tests/bugs/replicate/bug-1498570-client-iot-graph-check.t
@@ -0,0 +1,48 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+TESTS_EXPECTED_IN_LOOP=21
+function reset_cluster
+{
+ cleanup
+ TEST glusterd
+ TEST pidof glusterd
+
+}
+function check_iot_option
+{
+ local enabled=$1
+ local is_loaded_in_graph=$2
+
+ EXPECT "$enabled" volume_get_field $V0 client-io-threads
+ IOT_STRING="volume\ $V0-io-threads"
+ grep "$IOT_STRING" $GLUSTERD_WORKDIR/vols/$V0/trusted-$V0.tcp-fuse.vol
+ TEST ret=$?
+ EXPECT_NOT "$is_loaded_in_graph" echo $ret
+}
+
+reset_cluster
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+check_iot_option on 1
+
+reset_cluster
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+check_iot_option off 0
+
+reset_cluster
+TEST $CLI volume create $V0 $H0:$B0/${V0}0
+TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}1
+check_iot_option off 0
+TEST $CLI volume remove-brick $V0 replica 1 $H0:$B0/${V0}1 force
+check_iot_option on 1
+
+reset_cluster
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..5}
+TEST $CLI volume set $V0 client-io-threads on
+check_iot_option on 1
+TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}2 $H0:$B0/${V0}5 force
+check_iot_option on 1
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1539358-split-brain-detection.t b/tests/bugs/replicate/bug-1539358-split-brain-detection.t
new file mode 100755
index 00000000000..7b71a7a9e7d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1539358-split-brain-detection.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+
+###############################################################################yy
+# Case of 2 bricks blaming the third and the third blaming the other two.
+
+TEST `echo "hello" >> $M0/file`
+
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST `echo "append" >> $M0/file`
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/file trusted.afr.$V0-client-1 data
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/file trusted.afr.$V0-client-1 data
+CLIENT_MD5=$(md5sum $M0/file | cut -d\ -f1)
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$CLIENT_MD5" == "$B0_MD5" ]
+TEST [ "$CLIENT_MD5" == "$B1_MD5" ]
+TEST [ "$CLIENT_MD5" == "$B2_MD5" ]
+
+TEST rm $M0/file
+
+###############################################################################yy
+# Case of each brick blaming the next one in a cyclic manner
+
+TEST `echo "hello" >> $M0/file`
+# Mark cyclic xattrs and modify file content directly on the bricks.
+TEST $CLI volume set $V0 self-heal-daemon off
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/$V0"0"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"0"/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"1"/file
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/$V0"2"/file
+setfattr -n trusted.afr.dirty -v 0x000000010000000000000000 $B0/$V0"2"/file
+
+TEST `echo "ab" >> $B0/$V0"0"/file`
+TEST `echo "cdef" >> $B0/$V0"1"/file`
+TEST `echo "ghi" >> $B0/$V0"2"/file`
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume set $V0 self-heal-daemon on
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+B0_MD5=$(md5sum $B0/${V0}0/file | cut -d\ -f1)
+B1_MD5=$(md5sum $B0/${V0}1/file | cut -d\ -f1)
+B2_MD5=$(md5sum $B0/${V0}2/file | cut -d\ -f1)
+TEST [ "$B0_MD5" == "$B1_MD5" ]
+TEST [ "$B0_MD5" == "$B2_MD5" ]
+###############################################################################yy
+cleanup
diff --git a/tests/bugs/replicate/bug-1561129-enospc.t b/tests/bugs/replicate/bug-1561129-enospc.t
new file mode 100644
index 00000000000..1b402fcc781
--- /dev/null
+++ b/tests/bugs/replicate/bug-1561129-enospc.t
@@ -0,0 +1,24 @@
+#!/bin/bash
+#Tests that sequential write workload doesn't lead to FSYNCs
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST truncate -s 128M $B0/xfs_image
+TEST mkfs.xfs -f $B0/xfs_image
+TEST mkdir $B0/bricks
+TEST mount -t xfs -o loop $B0/xfs_image $B0/bricks
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/bricks/brick{0,1,3}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+# Write 50MB of data, which will try to consume 50x3=150MB on $B0/bricks.
+# Before that, we hit ENOSPC in pre-op cbk, which should not crash the mount.
+TEST ! dd if=/dev/zero of=$M0/a bs=1M count=50
+TEST stat $M0/a
+cleanup;
diff --git a/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
new file mode 100644
index 00000000000..49c4dea4e9c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1586020-mark-dirty-for-entry-txn-on-quorum-failure.t
@@ -0,0 +1,72 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+function create_files {
+ local i=1
+ while (true)
+ do
+ dd if=/dev/zero of=$M0/file$i bs=1M count=10
+ if [ -e $B0/${V0}0/file$i ] || [ -e $B0/${V0}1/file$i ]; then
+ ((i++))
+ else
+ break
+ fi
+ done
+ echo $i
+}
+
+TEST glusterd
+
+#Create brick partitions
+TEST truncate -s 100M $B0/brick0
+TEST truncate -s 100M $B0/brick1
+#Have the 3rd brick of a higher size to test the scenario of entry transaction
+#passing on only one brick and not on other bricks.
+TEST truncate -s 110M $B0/brick2
+LO1=`SETUP_LOOP $B0/brick0`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO1
+LO2=`SETUP_LOOP $B0/brick1`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO2
+LO3=`SETUP_LOOP $B0/brick2`
+TEST [ $? -eq 0 ]
+TEST MKFS_LOOP $LO3
+TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+TEST MOUNT_LOOP $LO1 $B0/${V0}0
+TEST MOUNT_LOOP $LO2 $B0/${V0}1
+TEST MOUNT_LOOP $LO3 $B0/${V0}2
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+i=$(create_files)
+TEST ! ls $B0/${V0}0/file$i
+TEST ! ls $B0/${V0}1/file$i
+TEST ls $B0/${V0}2/file$i
+dirty=$(get_hex_xattr trusted.afr.dirty $B0/${V0}2)
+TEST [ "$dirty" != "000000000000000000000000" ]
+
+TEST $CLI volume set $V0 self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST rm -f $M0/file1
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+EXPECT 'Stopped' volinfo_field $V0 'Status';
+TEST $CLI volume delete $V0;
+UMOUNT_LOOP ${B0}/${V0}{0,1,2}
+rm -f ${B0}/brick{0,1,2}
+cleanup;
diff --git a/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t b/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
new file mode 100644
index 00000000000..c6e5459e9a8
--- /dev/null
+++ b/tests/bugs/replicate/bug-1591193-assign-gfid-and-heal.t
@@ -0,0 +1,128 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function check_gfid_and_link_count
+{
+ local file=$1
+
+ file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
+ TEST [ ! -z $file_gfid_b0 ]
+ file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
+ file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
+ EXPECT $file_gfid_b0 echo $file_gfid_b1
+ EXPECT $file_gfid_b0 echo $file_gfid_b2
+
+ EXPECT "2" stat -c %h $B0/${V0}0/$file
+ EXPECT "2" stat -c %h $B0/${V0}1/$file
+ EXPECT "2" stat -c %h $B0/${V0}2/$file
+}
+TESTS_EXPECTED_IN_LOOP=30
+
+##############################################################################
+# Test on 1x3 volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+
+# Create files directly in the backend on different bricks
+echo $RANDOM >> $B0/${V0}0/file1
+echo $RANDOM >> $B0/${V0}1/file2
+echo $RANDOM >> $B0/${V0}2/file3
+
+# To prevent is_fresh_file code path
+sleep 2
+
+# Access them from mount to trigger name + gfid heal.
+TEST stat $M0/file1
+TEST stat $M0/file2
+TEST stat $M0/file3
+
+# Launch index heal to complete any pending data/metadata heals.
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check each file has a gfid and the .glusterfs hardlink
+check_gfid_and_link_count file1
+check_gfid_and_link_count file2
+check_gfid_and_link_count file3
+
+TEST rm $M0/file1
+TEST rm $M0/file2
+TEST rm $M0/file3
+cleanup;
+
+##############################################################################
+# Test on 1x (2+1) volume
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+
+# Create files directly in the backend on different bricks
+echo $RANDOM >> $B0/${V0}0/file1
+echo $RANDOM >> $B0/${V0}1/file2
+touch $B0/${V0}2/file3
+
+# To prevent is_fresh_file code path
+sleep 2
+
+# Access them from mount to trigger name + gfid heal.
+TEST stat $M0/file1
+TEST stat $M0/file2
+
+# Though file is created on all 3 bricks, lookup will fail as arbiter blames the
+# other 2 bricks and ariter is not 'readable'.
+TEST ! stat $M0/file3
+
+# Launch index heal to complete any pending data/metadata heals.
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Now file3 should be accesible from mount.
+TEST stat $M0/file3
+
+# Check each file has a gfid and the .glusterfs hardlink
+check_gfid_and_link_count file1
+check_gfid_and_link_count file2
+check_gfid_and_link_count file3
+
+TEST rm $M0/file1
+TEST rm $M0/file2
+TEST rm $M0/file3
+cleanup;
diff --git a/tests/bugs/replicate/bug-1626994-info-split-brain.t b/tests/bugs/replicate/bug-1626994-info-split-brain.t
new file mode 100644
index 00000000000..86bfecb1a9e
--- /dev/null
+++ b/tests/bugs/replicate/bug-1626994-info-split-brain.t
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+# Test to check dirs having dirty xattr do not show up in info split-brain.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+rm -f $M0/FILE
+EXPECT "1" count_index_entries $B0/${V0}0
+EXPECT "1" count_index_entries $B0/${V0}1
+EXPECT "1" count_index_entries $B0/${V0}2
+
+TEST mkdir $M0/dirty_dir
+TEST mkdir $M0/pending_dir
+
+# Set dirty xattrs on all bricks to simulate the case where entry transaction
+# succeeded only the pre-op phase.
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}0/dirty_dir
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/dirty_dir
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}2/dirty_dir
+create_brick_xattrop_entry $B0/${V0}0 dirty_dir
+# Should not show up as split-brain.
+EXPECT "0" afr_get_split_brain_count $V0
+
+# replace/reset brick case where the new brick has dirty and the other 2 bricks
+# blame it should not be reported as split-brain.
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}0
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}1
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}2
+create_brick_xattrop_entry $B0/${V0}0 "/"
+# Should not show up as split-brain.
+EXPECT "0" afr_get_split_brain_count $V0
+
+# Set pending xattrs on all bricks blaming each other to simulate the case of
+# entry split-brain.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/pending_dir
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/${V0}1/pending_dir
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}2/pending_dir
+create_brick_xattrop_entry $B0/${V0}0 pending_dir
+# Should show up as split-brain.
+EXPECT "1" afr_get_split_brain_count $V0
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1637249-gfid-heal.t b/tests/bugs/replicate/bug-1637249-gfid-heal.t
new file mode 100644
index 00000000000..e824f14531e
--- /dev/null
+++ b/tests/bugs/replicate/bug-1637249-gfid-heal.t
@@ -0,0 +1,149 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1};
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume start $V0;
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+###############################################################################
+
+# Test for gfid + name heal when there is no 'source' brick, i.e. parent dir
+# xattrs are in split-brain or have dirty xattrs.
+
+TEST mkdir $M0/dir_pending
+TEST dd if=/dev/urandom of=$M0/dir_pending/file1 bs=1024 count=1024
+TEST mkdir $M0/dir_pending/dir11
+TEST mkdir $M0/dir_dirty
+TEST touch $M0/dir_dirty/file2
+
+# Set pending entry xattrs on dir_pending and remove gfid of entries under it on one brick.
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/${V0}0/dir_pending
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/${V0}1/dir_pending
+
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/file1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_pending/file1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+gfid_d11=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/dir11)
+gfid_str_d11=$(gf_gfid_xattr_to_str $gfid_d11)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_pending/dir11
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+
+# Set dirty entry xattrs on dir_dirty and remove gfid of entries under it on one brick.
+TEST setfattr -n trusted.afr.dirty -v 0x000000000000000000000001 $B0/${V0}1/dir_dirty
+gfid_f2=$(gf_get_gfid_xattr $B0/${V0}0/dir_dirty/file2)
+gfid_str_f2=$(gf_gfid_xattr_to_str $gfid_f2)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_dirty/file2
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+# Create a file under dir_pending directly on the backend only on 1 brick
+TEST touch $B0/${V0}1/dir_pending/file3
+
+# Create a file under dir_pending directly on the backend on all bricks
+TEST touch $B0/${V0}0/dir_pending/file4
+TEST touch $B0/${V0}1/dir_pending/file4
+
+# Stop & start the volume and mount client again.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/dir_pending/file1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/dir_pending/file1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+TEST stat $M0/dir_pending/dir11
+EXPECT "$gfid_d11" gf_get_gfid_xattr $B0/${V0}1/dir_pending/dir11
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+
+TEST stat $M0/dir_dirty/file2
+EXPECT "$gfid_f2" gf_get_gfid_xattr $B0/${V0}1/dir_dirty/file2
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f2:0:2}/${gfid_str_f2:2:2}/$gfid_str_f2
+
+TEST stat $M0/dir_pending/file3 # This assigns gfid on 2nd brick and heals the entry on to the 1st brick.
+gfid_f3=$(gf_get_gfid_xattr $B0/${V0}1/dir_pending/file3)
+TEST [ ! -z "$gfid_f3" ]
+EXPECT "$gfid_f3" gf_get_gfid_xattr $B0/${V0}0/dir_pending/file3
+
+TEST stat $M0/dir_pending/file4
+gfid_f4=$(gf_get_gfid_xattr $B0/${V0}0/dir_pending/file4)
+TEST [ ! -z "$gfid_f4" ]
+EXPECT "$gfid_f4" gf_get_gfid_xattr $B0/${V0}1/dir_pending/file4
+###############################################################################
+
+# Test for gfid + name heal when all bricks are 'source', i.e. parent dir
+# does not have any pending or dirty xattrs.
+
+TEST mkdir $M0/dir_clean
+TEST dd if=/dev/urandom of=$M0/dir_clean/file1 bs=1024 count=1024
+TEST mkdir $M0/dir_clean/dir11
+
+gfid_f1=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/file1)
+gfid_str_f1=$(gf_gfid_xattr_to_str $gfid_f1)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_clean/file1
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+gfid_d11=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/dir11)
+gfid_str_d11=$(gf_gfid_xattr_to_str $gfid_d11)
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir_clean/dir11
+TEST rm $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+# Create a file under dir_clean directly on the backend only on 1 brick
+TEST touch $B0/${V0}1/dir_clean/file3
+
+# Create a file under dir_clean directly on the backend on all bricks
+TEST touch $B0/${V0}0/dir_clean/file4
+TEST touch $B0/${V0}1/dir_clean/file4
+
+# Stop & start the volume and mount client again.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+TEST stat $M0/dir_clean/file1
+EXPECT "$gfid_f1" gf_get_gfid_xattr $B0/${V0}1/dir_clean/file1
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_f1:0:2}/${gfid_str_f1:2:2}/$gfid_str_f1
+
+TEST stat $M0/dir_clean/dir11
+EXPECT "$gfid_d11" gf_get_gfid_xattr $B0/${V0}1/dir_clean/dir11
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_d11:0:2}/${gfid_str_d11:2:2}/$gfid_str_d11
+
+TEST stat $M0/dir_clean/file3 # This assigns gfid on 2nd brick and heals the entry on to the 1st brick.
+gfid_f3=$(gf_get_gfid_xattr $B0/${V0}1/dir_clean/file3)
+TEST [ ! -z "$gfid_f3" ]
+EXPECT "$gfid_f3" gf_get_gfid_xattr $B0/${V0}0/dir_clean/file3
+
+TEST stat $M0/dir_clean/file4
+gfid_f4=$(gf_get_gfid_xattr $B0/${V0}0/dir_clean/file4)
+TEST [ ! -z "$gfid_f4" ]
+EXPECT "$gfid_f4" gf_get_gfid_xattr $B0/${V0}1/dir_clean/file4
+###############################################################################
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
new file mode 100644
index 00000000000..d7d1f285e01
--- /dev/null
+++ b/tests/bugs/replicate/bug-1637802-arbiter-stale-data-heal-lock.t
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+# Test to check that data self-heal does not leave any stale lock.
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+# Create base entry in indices/xattrop
+echo "Data" > $M0/FILE
+
+# Kill arbiter brick and write to FILE.
+TEST kill_brick $V0 $H0 $B0/${V0}2
+echo "arbiter down" >> $M0/FILE
+EXPECT 2 get_pending_heal_count $V0
+
+# Bring it back up and let heal complete.
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# write to the FILE must succeed.
+echo "this must succeed" >> $M0/FILE
+TEST [ $? -eq 0 ]
+cleanup;
diff --git a/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t b/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t
new file mode 100644
index 00000000000..63f72e86bf6
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655050-dir-sbrain-size-policy.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 data-self-heal off
+TEST $CLI volume set $V0 entry-self-heal off
+TEST $CLI volume set $V0 metadata-self-heal off
+TEST $CLI volume start $V0
+TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
+
+cd $M0
+TEST mkdir dir
+
+#Create metadata split-brain
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST chmod 757 dir
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST chmod 747 dir
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+
+#Use size as fav-child policy.
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+
+#Enable shd and heal the file.
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+
+b1c1dir=$(afr_get_specific_changelog_xattr $B0/${V0}0/dir \
+ trusted.afr.$V0-client-1 "metadata")
+b2c0dir=$(afr_get_specific_changelog_xattr $B0/${V0}1/dir \
+ trusted.afr.$V0-client-0 "metadata")
+
+EXPECT "00000001" echo $b1c1dir
+EXPECT "00000001" echo $b2c0dir
+
+#Finish up
+TEST force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t b/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t
new file mode 100755
index 00000000000..319736e1157
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655052-sbrain-policy-same-size.t
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+#Test the split-brain resolution CLI commands.
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+#Create replica 2 volume
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+TEST touch $M0/file
+
+############ Healing using favorite-child-policy = size and size of bricks is same #################
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+TEST $CLI volume heal $V0
+
+#file still in split-brain
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT_NOT "^0$" echo $?
+
+#We know that both bricks have same size file
+TEST $CLI volume set $V0 cluster.favorite-child-policy size
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "2" get_pending_heal_count $V0
+cat $M0/file > /dev/null
+EXPECT_NOT "^0$" echo $?
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup
+
diff --git a/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t b/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t
new file mode 100644
index 00000000000..783016dc3c0
--- /dev/null
+++ b/tests/bugs/replicate/bug-1655854-support-dist-to-rep3-arb-conversion.t
@@ -0,0 +1,95 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+# Conversion from 2x1 to 2x3
+
+TEST $CLI volume create $V0 $H0:$B0/${V0}{0,1}
+EXPECT 'Created' volinfo_field $V0 'Status';
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status';
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST mkdir $M0/dir
+TEST dd if=/dev/urandom of=$M0/dir/file bs=100K count=5
+file_md5sum=$(md5sum $M0/dir/file | awk '{print $1}')
+
+TEST $CLI volume add-brick $V0 replica 3 $H0:$B0/${V0}{2..5}
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}5
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 5
+
+# Trigger heal and wait for for it to complete
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Check whether the directory & file are healed to the newly added bricks
+TEST ls $B0/${V0}2/dir
+TEST ls $B0/${V0}3/dir
+TEST ls $B0/${V0}4/dir
+TEST ls $B0/${V0}5/dir
+
+TEST [ $file_md5sum == $(md5sum $B0/${V0}4/dir/file | awk '{print $1}') ]
+TEST [ $file_md5sum == $(md5sum $B0/${V0}5/dir/file | awk '{print $1}') ]
+
+
+# Conversion from 2x1 to 2x(2+1)
+
+TEST $CLI volume create $V1 $H0:$B0/${V1}{0,1}
+EXPECT 'Created' volinfo_field $V1 'Status';
+TEST $CLI volume start $V1
+EXPECT 'Started' volinfo_field $V1 'Status';
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}1
+
+TEST $GFS --volfile-id=$V1 --volfile-server=$H0 $M1;
+TEST mkdir $M1/dir
+TEST dd if=/dev/urandom of=$M1/dir/file bs=100K count=5
+file_md5sum=$(md5sum $M1/dir/file | awk '{print $1}')
+
+TEST $CLI volume add-brick $V1 replica 3 arbiter 1 $H0:$B0/${V1}{2..5}
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}3
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}4
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V1 $H0 $B0/${V1}5
+
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 2
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 3
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 4
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V1 5
+
+# Trigger heal and wait for for it to complete
+TEST $CLI volume heal $V1
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V1
+
+# Check whether the directory & file are healed to the newly added bricks
+TEST ls $B0/${V1}2/dir
+TEST ls $B0/${V1}3/dir
+TEST ls $B0/${V1}4/dir
+TEST ls $B0/${V1}5/dir
+
+EXPECT "0" stat -c %s $B0/${V1}5/dir/file
+TEST [ $file_md5sum == $(md5sum $B0/${V1}4/dir/file | awk '{print $1}') ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t b/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t
new file mode 100644
index 00000000000..b180f0e1239
--- /dev/null
+++ b/tests/bugs/replicate/bug-1657783-do-not-update-read-subvol-on-rename-link.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0..2}
+TEST $CLI volume set $V0 self-heal-daemon off
+TEST $CLI volume set $V0 cluster.data-self-heal off
+TEST $CLI volume set $V0 cluster.metadata-self-heal off
+TEST $CLI volume set $V0 cluster.entry-self-heal off
+TEST $CLI volume set $V0 performance.write-behind off
+
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST mkdir $M0/dir
+TEST "echo abc > $M0/file1"
+TEST "echo uvw > $M0/file2"
+
+TEST kill_brick $V0 $H0 $B0/${V0}0
+TEST "echo def > $M0/file1"
+TEST "echo xyz > $M0/file2"
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+
+TEST kill_brick $V0 $H0 $B0/${V0}1
+
+# Rename file1 and read it. Read should be served from the 3rd brick
+TEST mv $M0/file1 $M0/file3
+EXPECT "def" cat $M0/file3
+
+# Create a link to file2 and read it. Read should be served from the 3rd brick
+TEST ln $M0/file2 $M0/dir/file4
+EXPECT "xyz" cat $M0/dir/file4
+EXPECT "xyz" cat $M0/file2
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
new file mode 100644
index 00000000000..78581e99614
--- /dev/null
+++ b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t
@@ -0,0 +1,38 @@
+#!/bin/bash
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+CHANGELOG_PATH_0="$B0/${V0}2/.glusterfs/changelogs"
+ROLLOVER_TIME=100
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
+TEST $CLI volume set $V0 changelog.changelog on
+TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+
+TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0;
+TEST dd if=/dev/zero of=$M0/file1 bs=128K count=5
+
+TEST $CLI volume profile $V0 start
+TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+TEST $CLI volume profile $V0 info
+truncate_count=$($CLI volume profile $V0 info | grep TRUNCATE | awk '{count += $8} END {print count}')
+
+EXPECT "1" echo $truncate_count
+EXPECT "1" check_changelog_op ${CHANGELOG_PATH_0} "^ D "
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1696599-io-hang.t b/tests/bugs/replicate/bug-1696599-io-hang.t
new file mode 100755
index 00000000000..869cdb94bda
--- /dev/null
+++ b/tests/bugs/replicate/bug-1696599-io-hang.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../fileio.rc
+
+#Tests that local structures in afr are removed from granted/blocked list of
+#locks when inodelk fails on all bricks
+
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{1..3}
+TEST $CLI volume set $V0 performance.quick-read off
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $CLI volume set $V0 performance.io-cache off
+TEST $CLI volume set $V0 performance.stat-prefetch off
+TEST $CLI volume set $V0 performance.client-io-threads off
+TEST $CLI volume set $V0 delay-gen locks
+TEST $CLI volume set $V0 delay-gen.delay-duration 5000000
+TEST $CLI volume set $V0 delay-gen.delay-percentage 100
+TEST $CLI volume set $V0 delay-gen.enable finodelk
+
+TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+
+TEST $GFS -s $H0 --volfile-id $V0 $M0
+TEST touch $M0/file
+#Trigger write and stop bricks so inodelks fail on all bricks leading to
+#lock failure condition
+echo abc >> $M0/file &
+
+TEST $CLI volume stop $V0
+TEST $CLI volume reset $V0 delay-gen
+wait
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_meta $M0 $V0-replicate-0 2
+#Test that only one write succeeded, this tests that delay-gen worked as
+#expected
+echo abc >> $M0/file
+EXPECT "abc" cat $M0/file
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
new file mode 100644
index 00000000000..76d1f2170f2
--- /dev/null
+++ b/tests/bugs/replicate/bug-1717819-metadata-split-brain-detection.t
@@ -0,0 +1,136 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $CLI volume heal $V0 disable
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+###############################################################################
+# Case of 2 bricks blaming the third and the third blaming the other two.
+
+TEST mkdir $M0/dir
+
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST setfattr -n user.metadata -v 1 $M0/dir
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/dir trusted.afr.$V0-client-1 metadata
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/dir trusted.afr.$V0-client-1 metadata
+CLIENT_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $M0/dir)
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"1"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir)
+
+TEST [ "$CLIENT_XATTR" == "$B0_XATTR" ]
+TEST [ "$CLIENT_XATTR" == "$B1_XATTR" ]
+TEST [ "$CLIENT_XATTR" == "$B2_XATTR" ]
+TEST setfattr -x user.metadata $M0/dir
+
+###############################################################################
+# Case of each brick blaming the next one in a cyclic manner
+
+TEST $CLI volume heal $V0 disable
+TEST `echo "hello" >> $M0/dir/file`
+# Mark cyclic xattrs and modify metadata directly on the bricks.
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000100000000 $B0/$V0"0"/dir/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"2"/dir/file
+
+setfattr -n user.metadata -v 1 $B0/$V0"0"/dir/file
+setfattr -n user.metadata -v 2 $B0/$V0"1"/dir/file
+setfattr -n user.metadata -v 3 $B0/$V0"2"/dir/file
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/file))
+ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir/file)
+
+TEST [ "$B0_XATTR" == "$B1_XATTR" ]
+TEST [ "$B0_XATTR" == "$B2_XATTR" ]
+TEST rm -f $M0/dir/file
+
+###############################################################################
+# Case of 2 bricks having quorum blaming and the other having only one blaming.
+
+TEST $CLI volume heal $V0 disable
+TEST `echo "hello" >> $M0/dir/file`
+# B0 and B2 must blame B1
+TEST kill_brick $V0 $H0 $B0/$V0"1"
+TEST setfattr -n user.metadata -v 1 $M0/dir/file
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}0/dir/file trusted.afr.$V0-client-1 metadata
+EXPECT "00000001" afr_get_specific_changelog_xattr $B0/${V0}2/dir/file trusted.afr.$V0-client-1 metadata
+
+# B1 must blame B0 and B2
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"1"/dir/file
+
+# B0 must blame B2
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000100000000 $B0/$V0"0"/dir/file
+
+# Modify the metadata directly on the bricks B1 & B2.
+setfattr -n user.metadata -v 2 $B0/$V0"1"/dir/file
+setfattr -n user.metadata -v 3 $B0/$V0"2"/dir/file
+
+# Launch heal
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^1$" brick_up_status $V0 $H0 $B0/${V0}1
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+B0_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}0/dir/file)
+B1_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}1/dir/file)
+B2_XATTR=$(getfattr -n 'user.metadata' --absolute-names --only-values $B0/${V0}2/dir/file)
+
+TEST [ "$B0_XATTR" == "$B1_XATTR" ]
+TEST [ "$B0_XATTR" == "$B2_XATTR" ]
+
+###############################################################################
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
new file mode 100644
index 00000000000..0aeaaafc84c
--- /dev/null
+++ b/tests/bugs/replicate/bug-1722507-type-mismatch-error-handling.t
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+
+##########################################################################################
+# GFID link file and the GFID is missing on one brick and all the bricks are being blamed.
+
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on one brick.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+rm -f $M0/dir/file
+
+
+###########################################################################################
+# GFID link file and the GFID is missing on two bricks and all the bricks are being blamed.
+
+TEST $CLI volume heal $V0 disable
+TEST touch $M0/dir/file
+#TEST kill_brick $V0 $H0 $B0/$V0"1"
+
+#B0 and B2 must blame B1
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-1 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir to trigger index heal.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+ln -s $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Remove the gfid xattr and the link file on two bricks.
+gfid_file=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file)
+gfid_str_file=$(gf_gfid_xattr_to_str $gfid_file)
+TEST setfattr -x trusted.gfid $B0/${V0}0/dir/file
+TEST rm -f $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+TEST setfattr -x trusted.gfid $B0/${V0}1/dir/file
+TEST rm -f $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+
+# Wait for 2 second to force posix to consider that this is a valid file but
+# without gfid.
+sleep 2
+TEST $CLI volume heal $V0
+
+# Heal should not fail as the file is missing gfid xattr and the link file,
+# which is not actually the gfid or type mismatch.
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}0/dir/file
+TEST stat $B0/${V0}0/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+EXPECT "$gfid_file" gf_get_gfid_xattr $B0/${V0}1/dir/file
+TEST stat $B0/${V0}1/.glusterfs/${gfid_str_file:0:2}/${gfid_str_file:2:2}/$gfid_str_file
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1728770-pass-xattrs.t b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
new file mode 100644
index 00000000000..159c4fcc6a1
--- /dev/null
+++ b/tests/bugs/replicate/bug-1728770-pass-xattrs.t
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../snapshot.rc
+
+cleanup;
+
+function fop_on_bad_disk {
+ local path=$1
+ mkdir $path/dir{1..1000} 2>/dev/null
+ mv $path/dir1 $path/newdir
+ touch $path/foo.txt
+ echo $?
+}
+
+function ls_fop_on_bad_disk {
+ local path=$1
+ ls $path
+ echo $?
+}
+
+TEST init_n_bricks 6;
+TEST setup_lvm 6;
+
+TEST glusterd;
+TEST pidof glusterd;
+
+TEST $CLI volume create $V0 replica 3 $H0:$L1 $H0:$L2 $H0:$L3 $H0:$L4 $H0:$L5 $H0:$L6;
+TEST $CLI volume set $V0 health-check-interval 1000;
+
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0;
+#corrupt last disk
+dd if=/dev/urandom of=/dev/mapper/patchy_snap_vg_6-brick_lvm bs=512K count=200 status=progress && sync
+
+
+# Test the disk is now returning EIO for touch and ls
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^1$" fop_on_bad_disk "$L6"
+EXPECT_WITHIN $DISK_FAIL_TIMEOUT "^2$" ls_fop_on_bad_disk "$L6"
+
+TEST touch $M0/foo{1..100}
+TEST $CLI volume remove-brick $V0 replica 3 $H0:$L4 $H0:$L5 $H0:$L6 start
+EXPECT_WITHIN $REBALANCE_TIMEOUT "completed" remove_brick_status_completed_field "$V0" "$H0:$L4 $H0:$L5 $H0:$L6";
+
+#check that remove-brick status should not have any failed or skipped files
+var=`$CLI volume remove-brick $V0 $H0:$L4 $H0:$L5 $H0:$L6 status | grep completed`
+TEST [ `echo $var | awk '{print $5}'` = "0" ]
+TEST [ `echo $var | awk '{print $6}'` = "0" ]
+
+cleanup;
diff --git a/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
new file mode 100644
index 00000000000..14dfae89135
--- /dev/null
+++ b/tests/bugs/replicate/bug-1734370-entry-heal-restore-time.t
@@ -0,0 +1,102 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+function time_stamps_match {
+ path=$1
+ mtime_source_b0=$(get_mtime $B0/${V0}0/$path)
+ atime_source_b0=$(get_atime $B0/${V0}0/$path)
+ mtime_source_b2=$(get_mtime $B0/${V0}2/$path)
+ atime_source_b2=$(get_atime $B0/${V0}2/$path)
+ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path)
+ atime_sink_b1=$(get_atime $B0/${V0}1/$path)
+
+ #The same brick must be the source of heal for both atime and mtime.
+ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1 && $atime_source_b0 -eq $atime_sink_b1 ) || \
+ ( $mtime_source_b2 -eq $mtime_sink_b1 && $atime_source_b2 -eq $atime_sink_b1 ) ]]
+ then
+ echo "Y"
+ else
+ echo "Mtimes: $mtime_source_b0:$mtime_sink_b1:$mtime_source_b2 Atimes: $atime_source_b0:$atime_sink_b1:$atime_source_b2"
+ fi
+
+}
+
+function mtimes_match {
+ path=$1
+ mtime_source_b0=$(get_mtime $B0/${V0}0/$path)
+ mtime_source_b2=$(get_mtime $B0/${V0}2/$path)
+ mtime_sink_b1=$(get_mtime $B0/${V0}1/$path)
+
+ if [[ ( $mtime_source_b0 -eq $mtime_sink_b1) || \
+ ( $mtime_source_b2 -eq $mtime_sink_b1) ]]
+ then
+ echo "Y"
+ else
+ echo "Mtimes: $mtime_source_b0:$mtime_sink_b1:$mtime_source_b2"
+ fi
+
+}
+
+# Test that the parent dir's timestamps are restored during entry-heal.
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 --attribute-timeout=0 --entry-timeout=0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+###############################################################################
+TEST mkdir $M0/DIR
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/DIR/FILE
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+EXPECT "Y" time_stamps_match DIR
+ctime_source1=$(get_ctime $B0/${V0}0/$path)
+ctime_source2=$(get_ctime $B0/${V0}2/$path)
+ctime_sink=$(get_ctime $B0/${V0}1/$path)
+TEST [ $ctime_source1 -eq $ctime_sink ]
+TEST [ $ctime_source2 -eq $ctime_sink ]
+
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+TEST mkdir $M0/DIR2
+TEST kill_brick $V0 $H0 $B0/${V0}1
+TEST touch $M0/DIR2/FILE
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+#Executing parallel heal may lead to changing atime after heal. So better
+#to test just the mtime
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+EXPECT "Y" mtimes_match DIR2
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+cleanup;
diff --git a/tests/bugs/replicate/bug-1744548-heal-timeout.t b/tests/bugs/replicate/bug-1744548-heal-timeout.t
new file mode 100644
index 00000000000..011535066f9
--- /dev/null
+++ b/tests/bugs/replicate/bug-1744548-heal-timeout.t
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+function get_cumulative_opendir_count {
+#sed command prints content between Cumulative and Interval, this keeps content from Cumulative stats
+ $CLI volume profile $V0 info |sed -n '/^Cumulative/,/^Interval/p'|grep OPENDIR| awk '{print $8}'|tr -d '\n'
+}
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume heal $V0 disable
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST ! $CLI volume heal $V0
+
+# Enable shd and verify that index crawl is triggered immediately.
+TEST $CLI volume profile $V0 start
+TEST $CLI volume profile $V0 info clear
+TEST $CLI volume heal $V0 enable
+# Each brick does 4 opendirs, corresponding to dirty, xattrop and entry-changes, anonymous-inode
+EXPECT_WITHIN 4 "^444$" get_cumulative_opendir_count
+
+# Check that a change in heal-timeout is honoured immediately.
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+sleep 10
+# Two crawls must have happened.
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^121212$" get_cumulative_opendir_count
+
+# shd must not heal if it is disabled and heal-timeout is changed.
+TEST $CLI volume heal $V0 disable
+#Wait for configuration update and any opendir fops to complete
+sleep 10
+TEST $CLI volume profile $V0 info clear
+TEST $CLI volume set $V0 cluster.heal-timeout 6
+#Better to wait for more than 6 seconds to account for configuration updates
+sleep 10
+COUNT=`$CLI volume profile $V0 info incremental |grep OPENDIR|awk '{print $8}'|tr -d '\n'`
+TEST [ -z $COUNT ]
+cleanup;
diff --git a/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
new file mode 100644
index 00000000000..96279084065
--- /dev/null
+++ b/tests/bugs/replicate/bug-1749322-entry-heal-not-happening.t
@@ -0,0 +1,89 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup
+
+function check_gfid_and_link_count
+{
+ local file=$1
+
+ file_gfid_b0=$(gf_get_gfid_xattr $B0/${V0}0/$file)
+ TEST [ ! -z $file_gfid_b0 ]
+ file_gfid_b1=$(gf_get_gfid_xattr $B0/${V0}1/$file)
+ file_gfid_b2=$(gf_get_gfid_xattr $B0/${V0}2/$file)
+ EXPECT $file_gfid_b0 echo $file_gfid_b1
+ EXPECT $file_gfid_b0 echo $file_gfid_b2
+
+ EXPECT "2" stat -c %h $B0/${V0}0/$file
+ EXPECT "2" stat -c %h $B0/${V0}1/$file
+ EXPECT "2" stat -c %h $B0/${V0}2/$file
+}
+TESTS_EXPECTED_IN_LOOP=18
+
+################################################################################
+## Start and create a volume
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume info;
+
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume start $V0;
+TEST $CLI volume set $V0 cluster.heal-timeout 5
+TEST $CLI volume heal $V0 disable
+EXPECT 'Started' volinfo_field $V0 'Status';
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+
+TEST mkdir $M0/dir
+TEST `echo "File 1 " > $M0/dir/file1`
+TEST touch $M0/dir/file{2..4}
+
+# Remove file2 from 1st & 3rd bricks
+TEST rm -f $B0/$V0"0"/dir/file2
+TEST rm -f $B0/$V0"2"/dir/file2
+
+# Remove file3 and the .glusterfs hardlink from 1st & 2nd bricks
+gfid_file3=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file3)
+gfid_str_file3=$(gf_gfid_xattr_to_str $gfid_file3)
+TEST rm $B0/$V0"0"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm $B0/$V0"1"/.glusterfs/${gfid_str_file3:0:2}/${gfid_str_file3:2:2}/$gfid_str_file3
+TEST rm -f $B0/$V0"0"/dir/file3
+TEST rm -f $B0/$V0"1"/dir/file3
+
+# Remove the .glusterfs hardlink and the gfid xattr of file4 on 3rd brick
+gfid_file4=$(gf_get_gfid_xattr $B0/$V0"0"/dir/file4)
+gfid_str_file4=$(gf_gfid_xattr_to_str $gfid_file4)
+TEST rm $B0/$V0"2"/.glusterfs/${gfid_str_file4:0:2}/${gfid_str_file4:2:2}/$gfid_str_file4
+TEST setfattr -x trusted.gfid $B0/$V0"2"/dir/file4
+
+# B0 and B2 blame each other
+setfattr -n trusted.afr.$V0-client-0 -v 0x000000000000000000000001 $B0/$V0"2"/dir
+setfattr -n trusted.afr.$V0-client-2 -v 0x000000000000000000000001 $B0/$V0"0"/dir
+
+# Add entry to xattrop dir on first brick.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/dir/))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_str
+
+EXPECT "^1$" get_pending_heal_count $V0
+
+# Launch heal
+TEST $CLI volume heal $V0 enable
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "^Y$" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "^1$" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# All the files must be present on all the bricks after conservative merge and
+# should have the gfid xattr and the .glusterfs hardlink.
+check_gfid_and_link_count dir/file1
+check_gfid_and_link_count dir/file2
+check_gfid_and_link_count dir/file3
+check_gfid_and_link_count dir/file4
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
new file mode 100644
index 00000000000..c1bdf34ee6d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1756938-replica-3-sbrain-cli.t
@@ -0,0 +1,111 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+
+cleanup;
+
+TEST glusterd;
+TEST pidof glusterd;
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume set $V0 features.shard enable
+TEST $CLI volume set $V0 features.shard-block-size 4MB
+
+TEST $CLI volume start $V0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2
+TEST glusterfs --volfile-server=$H0 --volfile-id=/$V0 $M0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+#Create split-brain by setting afr xattrs/gfids manually.
+#file1 is non-sharded and will be in data split-brain.
+#file2 will have one shard which will be in data split-brain.
+#file3 will have one shard which will be in gfid split-brain.
+#file4 will have one shard which will be in data & metadata split-brain.
+TEST dd if=/dev/zero of=$M0/file1 bs=1024 count=1024 oflag=direct
+TEST dd if=/dev/zero of=$M0/file2 bs=1M count=6 oflag=direct
+TEST dd if=/dev/zero of=$M0/file3 bs=1M count=6 oflag=direct
+TEST dd if=/dev/zero of=$M0/file4 bs=1M count=6 oflag=direct
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+
+#-------------------------------------------------------------------------------
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/file1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/file1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/file1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/file1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/file1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/file1
+
+#-------------------------------------------------------------------------------
+gfid_f2=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file2))
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}0/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000000000000 $B0/${V0}1/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000000000000 $B0/${V0}2/.shard/$gfid_f2.1
+
+#-------------------------------------------------------------------------------
+TESTS_EXPECTED_IN_LOOP=5
+function assign_new_gfid {
+ brickpath=$1
+ filename=$2
+ gfid=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/$filename))
+ gfid_shard=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brickpath/.shard/$gfid.1))
+
+ TEST rm $brickpath/.glusterfs/${gfid_shard:0:2}/${gfid_shard:2:2}/$gfid_shard
+ TEST setfattr -x trusted.gfid $brickpath/.shard/$gfid.1
+ new_gfid=$(get_random_gfid)
+ new_gfid_str=$(gf_gfid_xattr_to_str $new_gfid)
+ TEST setfattr -n trusted.gfid -v $new_gfid $brickpath/.shard/$gfid.1
+ TEST mkdir -p $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}
+ TEST ln $brickpath/.shard/$gfid.1 $brickpath/.glusterfs/${new_gfid_str:0:2}/${new_gfid_str:2:2}/$new_gfid_str
+}
+assign_new_gfid $B0/$V0"1" file3
+assign_new_gfid $B0/$V0"2" file3
+
+#-------------------------------------------------------------------------------
+gfid_f4=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file4))
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}0/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-2 -v 0x000000010000000100000000 $B0/${V0}1/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-0 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
+TEST setfattr -n trusted.afr.$V0-client-1 -v 0x000000010000000100000000 $B0/${V0}2/.shard/$gfid_f4.1
+
+#-------------------------------------------------------------------------------
+#Add entry to xattrop dir on first brick and check for split-brain.
+xattrop_dir0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_dir0`
+
+gfid_f1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/file1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f1
+
+gfid_f2_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f2.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f2_shard1
+
+gfid_f3=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/${V0}0/file3))
+gfid_f3_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f3.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f3_shard1
+
+gfid_f4_shard1=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/.shard/$gfid_f4.1))
+TEST ln $xattrop_dir0/$base_entry_b0 $xattrop_dir0/$gfid_f4_shard1
+
+#-------------------------------------------------------------------------------
+#gfid split-brain won't show up in split-brain count.
+EXPECT "3" afr_get_split_brain_count $V0
+EXPECT_NOT "^0$" get_pending_heal_count $V0
+
+#Resolve split-brains
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /file1
+GFIDSTR="gfid:$gfid_f2_shard1"
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 $GFIDSTR
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f3.1
+TEST $CLI volume heal $V0 split-brain source-brick $H0:$B0/${V0}1 /.shard/$gfid_f4.1
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+cleanup;
diff --git a/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
new file mode 100644
index 00000000000..7e24eaec03d
--- /dev/null
+++ b/tests/bugs/replicate/bug-1761531-metadata-heal-restore-time.t
@@ -0,0 +1,74 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../afr.rc
+cleanup
+
+GET_MDATA_PATH=$(dirname $0)/../../utils
+build_tester $GET_MDATA_PATH/get-mdata-xattr.c
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0..2}
+TEST $CLI volume start $V0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+
+TEST touch $M0/a
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/a
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime0=$(get_mtime $B0/brick0/a)
+mtime1=$(get_mtime $B0/brick1/a)
+TEST [ $mtime0 -eq $mtime1 ]
+
+ctime0=$(get_ctime $B0/brick0/a)
+ctime1=$(get_ctime $B0/brick1/a)
+TEST [ $ctime0 -eq $ctime1 ]
+
+###############################################################################
+# Repeat the test with ctime feature disabled.
+TEST $CLI volume set $V0 features.ctime off
+
+TEST touch $M0/b
+sleep 1
+TEST kill_brick $V0 $H0 $B0/brick0
+TEST touch $M0/b
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^2$" get_pending_heal_count $V0
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+mtime2=$(get_mtime $B0/brick0/b)
+mtime3=$(get_mtime $B0/brick1/b)
+TEST [ $mtime2 -eq $mtime3 ]
+
+TEST rm $GET_MDATA_PATH/get-mdata-xattr
+
+TEST force_umount $M0
+TEST $CLI volume stop $V0
+TEST $CLI volume delete $V0
+
+cleanup
diff --git a/tests/bugs/replicate/bug-1801624-entry-heal.t b/tests/bugs/replicate/bug-1801624-entry-heal.t
new file mode 100644
index 00000000000..94b465181fa
--- /dev/null
+++ b/tests/bugs/replicate/bug-1801624-entry-heal.t
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/brick{0,1,2}
+TEST $CLI volume set $V0 heal-timeout 5
+TEST $CLI volume start $V0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0 granular-entry-heal enable
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Re-create the file when a brick is down.
+TEST kill_brick $V0 $H0 $B0/brick1
+TEST rm $M0/FILE
+echo "New Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+EXPECT_WITHIN $HEAL_TIMEOUT "4" get_pending_heal_count $V0
+
+# Launching index heal must not reset parent dir afr xattrs or remove granular entry indices.
+$CLI volume heal $V0 # CLI will fail but heal is launched anyway.
+TEST sleep 5 # give index heal a chance to do one run.
+brick0_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick0/)
+brick2_pending=$(get_hex_xattr trusted.afr.$V0-client-1 $B0/brick2/)
+TEST [ $brick0_pending -eq "000000000000000000000002" ]
+TEST [ $brick2_pending -eq "000000000000000000000002" ]
+EXPECT "FILE" ls $B0/brick0/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+EXPECT "FILE" ls $B0/brick2/.glusterfs/indices/entry-changes/00000000-0000-0000-0000-000000000001/
+
+TEST $CLI volume start $V0 force
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+$CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0
+
+# No gfid-split-brain (i.e. EIO) must be seen. Try on fresh mount to avoid cached values.
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+TEST cat $M0/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/bug-802417.t b/tests/bugs/replicate/bug-802417.t
index df989b1470b..f213439401e 100755
--- a/tests/bugs/replicate/bug-802417.t
+++ b/tests/bugs/replicate/bug-802417.t
@@ -10,6 +10,18 @@ function write_file()
}
cleanup;
+
+#####################################################
+# We are currently not triggering data heal unless all bricks of the replica are
+# up. We will need to modify this .t once the fix for preventing stale reads
+# being served to clients for files in spurious split-brains is done. Spurious
+# split-brains here means afr xattrs indicates sbrain but it is actually not.
+# Self-heal will heal such files automatically but before the heal completes,
+# reads can be served which needs fixing.
+#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
+#G_TESTDEF_TEST_STATUS_CENTOS6=BAD_TEST,BUG=000000
+######################################################
+
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
@@ -32,7 +44,6 @@ TEST $CLI volume set $V0 performance.stat-prefetch off
## Make sure automatic self-heal doesn't perturb our results.
TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume set $V0 cluster.data-self-heal on
-TEST $CLI volume set $V0 cluster.background-self-heal-count 0
## Start volume and verify
TEST $CLI volume start $V0;
@@ -70,8 +81,8 @@ tgt_xattr_2="trusted.afr.${V0}-client-2"
actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_0)
EXPECT "0x000000000000000000000000|^\$" echo $actual
-actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_1)
-EXPECT "0x000000000000000000000000|^\$" echo $actual
+EXPECT_WITHIN $HEAL_TIMEOUT "0x000000000000000000000000" \
+afr_get_changelog_xattr $obs_path_0 $tgt_xattr_1
actual=$(afr_get_changelog_xattr $obs_path_0 $tgt_xattr_2)
EXPECT "0x000000030000000000000000" echo $actual
diff --git a/tests/bugs/replicate/bug-821056.t b/tests/bugs/replicate/bug-821056.t
index 02a9c78b6f0..81186d86309 100644
--- a/tests/bugs/replicate/bug-821056.t
+++ b/tests/bugs/replicate/bug-821056.t
@@ -8,7 +8,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST $CLI volume set $V0 performance.quick-read off
TEST $CLI volume set $V0 performance.open-behind off
@@ -35,7 +35,7 @@ kill_brick $V0 $H0 $B0/${V0}0
TEST gf_rm_file_and_gfid_link $B0/${V0}0 "a"
TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-ls -l $M0/a 2>&1 > /dev/null #Make sure the file is re-created
+ls -l $M0/a > /dev/null 2>&1 #Make sure the file is re-created
EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpath"
EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/a
diff --git a/tests/bugs/replicate/bug-830665.t b/tests/bugs/replicate/bug-830665.t
index 3d2ec1145da..68180424803 100755
--- a/tests/bugs/replicate/bug-830665.t
+++ b/tests/bugs/replicate/bug-830665.t
@@ -4,6 +4,8 @@
. $(dirname $0)/../../nfs.rc
. $(dirname $0)/../../volume.rc
+#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
+
cleanup;
function recreate {
@@ -18,6 +20,7 @@ TEST $CLI volume info;
recreate ${B0}/${V0}-0
recreate ${B0}/${V0}-1
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}-{0,1}
+TEST $CLI volume set $V0 nfs.disable false
function volinfo_field()
{
@@ -75,6 +78,10 @@ volid=$(getfattr -e hex -n trusted.glusterfs.volume-id $B0/${V0}-0 2> /dev/null
| grep = | cut -d= -f2)
rm -rf $B0/${V0}-0;
mkdir $B0/${V0}-0;
+#Ideally, disk replacement is done using reset-brick or replace-brick gluster CLI
+#which will create .glusterfs folder.
+mkdir $B0/${V0}-0/.glusterfs && chmod 600 $B0/${V0}-0/.glusterfs
+
setfattr -n trusted.glusterfs.volume-id -v $volid $B0/${V0}-0
## Restart and remount. Note that we use actimeo=0 so that the stat calls
diff --git a/tests/bugs/replicate/bug-853680.t b/tests/bugs/replicate/bug-853680.t
deleted file mode 100755
index 806c3d142a1..00000000000
--- a/tests/bugs/replicate/bug-853680.t
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/bin/bash
-#
-# Bug 853680
-#
-# Test that io-threads least-rate-limit throttling functions as expected. Set
-# a limit, perform a few operations with a least-priority mount and verify
-# said operations take a minimum amount of time according to the limit.
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-
-cleanup;
-
-TEST glusterd
-
-TEST $CLI volume create $V0 $H0:$B0/${V0}1
-TEST $CLI volume start $V0
-
-#Accept min val
-TEST $CLI volume set $V0 performance.least-rate-limit 0
-#Accept some value in between
-TEST $CLI volume set $V0 performance.least-rate-limit 1035
-#Accept max val INT_MAX
-TEST $CLI volume set $V0 performance.least-rate-limit 2147483647
-
-#Reject other values
-TEST ! $CLI volume set $V0 performance.least-rate-limit 2147483648
-TEST ! $CLI volume set $V0 performace.least-rate-limit -8
-TEST ! $CLI volume set $V0 performance.least-rate-limit abc
-TEST ! $CLI volume set $V0 performance.least-rate-limit 0.0
-TEST ! $CLI volume set $V0 performance.least-rate-limit -10.0
-TEST ! $CLI volume set $V0 performance.least-rate-limit 1%
-
-# set rate limit to 1 operation/sec
-TEST $CLI volume set $V0 performance.least-rate-limit 1
-
-# use client-pid=-1 for least priority mount
-TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0 --client-pid=-1
-
-# create a few files and verify this takes more than a few seconds
-date1=`date +%s`
-TEST touch $M0/file{0..2}
-date2=`date +%s`
-
-optime=$(($date2 - $date1))
-TEST [ $optime -ge 3 ]
-
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
-
-TEST $CLI volume stop $V0
-TEST $CLI volume delete $V0
-
-cleanup;
diff --git a/tests/bugs/replicate/bug-880898.t b/tests/bugs/replicate/bug-880898.t
index 123e7e16425..660d34ca25f 100644
--- a/tests/bugs/replicate/bug-880898.t
+++ b/tests/bugs/replicate/bug-880898.t
@@ -1,12 +1,19 @@
#!/bin/bash
. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
cleanup;
TEST glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/brick1 $H0:$B0/brick2
TEST $CLI volume start $V0
+EXPECT 'Started' volinfo_field $V0 'Status'
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/brick2
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
pkill glusterfs
uuid=""
for line in $(cat $GLUSTERD_WORKDIR/glusterd.info)
diff --git a/tests/bugs/replicate/bug-913051.t b/tests/bugs/replicate/bug-913051.t
index 1c218397276..6794995e6fe 100644
--- a/tests/bugs/replicate/bug-913051.t
+++ b/tests/bugs/replicate/bug-913051.t
@@ -21,7 +21,7 @@ TEST $CLI volume set $V0 performance.stat-prefetch off
TEST $CLI volume set $V0 performance.read-ahead off
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST $CLI volume start $V0
-TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0 --direct-io-mode=enable
+TEST $GFS --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 --direct-io-mode=enable $M0
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST mkdir $M0/dir
@@ -37,17 +37,6 @@ TEST fd_open $rfd "r" $M0/dir/b
TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 0
-#check that the files are not opned on brick-0
-TEST stat $M0/dir/a
-realpatha=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/a")
-EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpatha"
-EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/a
-
-TEST stat $M0/dir/b
-realpathb=$(gf_get_gfid_backend_file_path $B0/${V0}0 "dir/b")
-EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 "$realpathb"
-EXPECT "N" gf_check_file_opened_in_brick $V0 $H0 $B0/${V0}0 $B0/${V0}0/dir/b
-
#attempt self-heal so that the files are created on brick-0
TEST dd if=$M0/dir/a of=/dev/null bs=1024k
diff --git a/tests/bugs/replicate/bug-918437-sh-mtime.t b/tests/bugs/replicate/bug-918437-sh-mtime.t
index 04ac02f6337..6a194b14a9b 100644
--- a/tests/bugs/replicate/bug-918437-sh-mtime.t
+++ b/tests/bugs/replicate/bug-918437-sh-mtime.t
@@ -54,7 +54,7 @@ TEST stat $M0/b
TEST gluster volume heal $V0 full
EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick01/a
EXPECT_WITHIN $HEAL_TIMEOUT "Y" file_exists $B0/gfs0/brick02/b
-EXPECT_WITHIN $HEAL_TIMEOUT 0 afr_get_pending_heal_count $V0
+EXPECT_WITHIN $HEAL_TIMEOUT 0 get_pending_heal_count $V0
size=`stat -c '%s' /etc/passwd`
EXPECT $size stat -c '%s' $B0/gfs0/brick01/a
diff --git a/tests/bugs/replicate/bug-921231.t b/tests/bugs/replicate/bug-921231.t
index 93c642beb1e..81504612f63 100644..100755
--- a/tests/bugs/replicate/bug-921231.t
+++ b/tests/bugs/replicate/bug-921231.t
@@ -3,7 +3,7 @@
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
-# This test writes to same file with 2 fds and tests that eager-lock is not
+# This test writes to same file with 2 fds and tests that cluster.eager-lock is not
# causing extra delay because of post-op-delay-secs
cleanup;
@@ -14,7 +14,7 @@ function write_to_file {
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}0 $H0:$B0/${V0}1
-TEST $CLI volume set $V0 eager-lock on
+TEST $CLI volume set $V0 cluster.eager-lock on
TEST $CLI volume set $V0 post-op-delay-secs 3
TEST $CLI volume set $V0 client-log-level DEBUG
TEST $CLI volume start $V0
diff --git a/tests/bugs/replicate/bug-957877.t b/tests/bugs/replicate/bug-957877.t
index 12901723880..bcce7e3c9e7 100644
--- a/tests/bugs/replicate/bug-957877.t
+++ b/tests/bugs/replicate/bug-957877.t
@@ -23,7 +23,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
TEST $CLI volume heal $V0
# Wait for self-heal to complete
-EXPECT_WITHIN $HEAL_TIMEOUT '1' count_sh_entries $BRICK;
+EXPECT_WITHIN $HEAL_TIMEOUT '0' count_sh_entries $BRICK;
TEST getfattr -n "user.foo" $B0/${V0}0/f1;
diff --git a/tests/bugs/replicate/bug-966018.t b/tests/bugs/replicate/bug-966018.t
deleted file mode 100644
index be4d0b97b88..00000000000
--- a/tests/bugs/replicate/bug-966018.t
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-. $(dirname $0)/../../include.rc
-. $(dirname $0)/../../volume.rc
-. $(dirname $0)/../../nfs.rc
-
-#This tests if eager-lock blocks metadata operations on nfs/fuse mounts.
-#If it is not woken up, INODELK from the next command waits
-#for post-op-delay secs.
-
-cleanup;
-TEST glusterd
-TEST pidof glusterd
-
-TEST $CLI volume create $V0 replica 2 $H0:$B0/r2_0 $H0:$B0/r2_1
-TEST $CLI volume set $V0 ensure-durability off
-TEST $CLI volume set $V0 cluster.eager-lock on
-TEST $CLI volume set $V0 cluster.post-op-delay-secs 3
-
-TEST $CLI volume start $V0
-TEST $CLI volume profile $V0 start
-EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
-TEST mount_nfs $H0:/$V0 $N0 nolock;
-TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id=$V0 $M0
-echo 1 > $N0/1 && chmod +x $N0/1
-echo 1 > $M0/1 && chmod +x $M0/1
-
-#Check that INODELK MAX latency is not in the order of seconds
-#Test if the MAX INODELK fop latency is of the order of seconds.
-inodelk_max_latency=$($CLI volume profile $V0 info | grep INODELK | awk 'BEGIN {max = 0} {if ($6 > max) max=$6;} END {print max}' | cut -d. -f 1 | egrep "[0-9]{7,}")
-
-TEST [ -z $inodelk_max_latency ]
-EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
-
-cleanup;
diff --git a/tests/bugs/replicate/bug-976800.t b/tests/bugs/replicate/bug-976800.t
index 35a40a3c72e..27f8b27619e 100644
--- a/tests/bugs/replicate/bug-976800.t
+++ b/tests/bugs/replicate/bug-976800.t
@@ -20,7 +20,8 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 ensure-durability off
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
+TEST $CLI volume set $V0 flush-behind off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST dd of=$M0/1 if=/dev/zero bs=1k count=1 conv=fsync
diff --git a/tests/bugs/replicate/bug-977797.t b/tests/bugs/replicate/bug-977797.t
index 3ff14ecf3d5..9a8f36c956c 100755
--- a/tests/bugs/replicate/bug-977797.t
+++ b/tests/bugs/replicate/bug-977797.t
@@ -26,9 +26,11 @@ TEST $CLI volume set $V0 quick-read off
TEST $CLI volume set $V0 read-ahead off
TEST $CLI volume set $V0 write-behind off
TEST $CLI volume set $V0 io-cache off
-TEST $CLI volume set $V0 background-self-heal-count 0
+TEST $CLI volume set $V0 cluster.data-self-heal on
+TEST $CLI volume set $V0 cluster.metadata-self-heal on
+TEST $CLI volume set $V0 cluster.entry-self-heal on
-TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+TEST $GFS --volfile-id=/$V0 --volfile-server=$H0 $M0
TEST mkdir -p $M0/a
@@ -54,36 +56,35 @@ TEST chmod 757 $M0/a/file
TEST $CLI volume start $V0 force
EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status $V0 1;
-TEST dd if=$M0/a/file of=/dev/null bs=1024k
-
-b1c0dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \
- trusted.afr.$V0-client-0 "entry")
-b1c1dir=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a \
- trusted.afr.$V0-client-1 "entry")
-b2c0dir=$(afr_get_specific_changelog_xattr \
- $B0/$V0"2"/a trusted.afr.$V0-client-0 "entry")
-b2c1dir=$(afr_get_specific_changelog_xattr \
- $B0/$V0"2"/a trusted.afr.$V0-client-1 "entry")
-
-
-b1c0f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \
- trusted.afr.$V0-client-0 "data")
-b1c1f=$(afr_get_specific_changelog_xattr $B0/$V0"1"/a/file \
- trusted.afr.$V0-client-1 "data")
-b2c0f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \
- trusted.afr.$V0-client-0 "data")
-b2c1f=$(afr_get_specific_changelog_xattr $B0/$V0"2"/a/file \
- trusted.afr.$V0-client-1 "data")
-
-EXPECT "00000000|^$" echo $b1c0f
-EXPECT "00000000|^$" echo $b1c1f
-EXPECT "00000000|^$" echo $b2c0f
-EXPECT "00000000|^$" echo $b2c1f
-
-EXPECT "00000000|^$" echo $b1c0dir
-EXPECT "00000000|^$" echo $b1c1dir
-EXPECT "00000000|^$" echo $b2c0dir
-EXPECT "00000000|^$" echo $b2c1dir
+#Trigger entry heal of $M0/a
+getfattr -n user.nosuchattr $M0/a
+dd if=$M0/a/file of=/dev/null bs=1024k
+#read fails, but heal is triggered.
+TEST [ $? -ne 0 ]
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a/file trusted.afr.$V0-client-0 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a/file trusted.afr.$V0-client-1 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a/file trusted.afr.$V0-client-0 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a/file trusted.afr.$V0-client-1 "data"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a trusted.afr.$V0-client-0 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"1"/a trusted.afr.$V0-client-1 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a trusted.afr.$V0-client-0 "entry"
+
+EXPECT_WITHIN $HEAL_TIMEOUT "00000000" \
+afr_get_specific_changelog_xattr $B0/$V0"2"/a trusted.afr.$V0-client-1 "entry"
## Finish up
TEST $CLI volume stop $V0;
diff --git a/tests/bugs/replicate/bug-979365.t b/tests/bugs/replicate/bug-979365.t
index b1396c23348..c09c7d51772 100755
--- a/tests/bugs/replicate/bug-979365.t
+++ b/tests/bugs/replicate/bug-979365.t
@@ -15,7 +15,7 @@ TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1}
TEST $CLI volume set $V0 ensure-durability on
-TEST $CLI volume set $V0 eager-lock off
+TEST $CLI volume set $V0 cluster.eager-lock off
TEST $CLI volume start $V0
TEST $CLI volume profile $V0 start
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
diff --git a/tests/bugs/replicate/issue-1254-prioritize-enospc.t b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
new file mode 100644
index 00000000000..fab94b71b27
--- /dev/null
+++ b/tests/bugs/replicate/issue-1254-prioritize-enospc.t
@@ -0,0 +1,80 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+
+cleanup
+
+function create_bricks {
+ TEST truncate -s 100M $B0/brick0
+ TEST truncate -s 100M $B0/brick1
+ TEST truncate -s 20M $B0/brick2
+ LO1=`SETUP_LOOP $B0/brick0`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO1
+ LO2=`SETUP_LOOP $B0/brick1`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO2
+ LO3=`SETUP_LOOP $B0/brick2`
+ TEST [ $? -eq 0 ]
+ TEST MKFS_LOOP $LO3
+ TEST mkdir -p $B0/${V0}0 $B0/${V0}1 $B0/${V0}2
+ TEST MOUNT_LOOP $LO1 $B0/${V0}0
+ TEST MOUNT_LOOP $LO2 $B0/${V0}1
+ TEST MOUNT_LOOP $LO3 $B0/${V0}2
+}
+
+function create_files {
+ local i=1
+ while (true)
+ do
+ touch $M0/file$i
+ if [ -e $B0/${V0}2/file$i ];
+ then
+ ((i++))
+ else
+ break
+ fi
+ done
+}
+
+TESTS_EXPECTED_IN_LOOP=13
+
+#Arbiter volume: Check for ENOSPC when arbiter brick becomes full#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((echo "Test" > $M0/file-3) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
+
+#Replica-3 volume: Check for ENOSPC when one of the brick becomes full#
+#Keeping the third brick of lower size to simulate disk full scenario#
+TEST glusterd
+create_bricks
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
+TEST $CLI volume start $V0
+TEST $CLI volume set $V0 performance.write-behind off
+TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0
+
+create_files
+TEST kill_brick $V0 $H0 $B0/${V0}1
+error1=$(touch $M0/file-1 2>&1)
+EXPECT "No space left on device" echo $error1
+error2=$(mkdir $M0/dir-1 2>&1)
+EXPECT "No space left on device" echo $error2
+error3=$((cat /dev/zero > $M0/file1) 2>&1)
+EXPECT "No space left on device" echo $error3
+
+cleanup
diff --git a/tests/bugs/replicate/mdata-heal-no-xattrs.t b/tests/bugs/replicate/mdata-heal-no-xattrs.t
new file mode 100644
index 00000000000..d3b0c504c80
--- /dev/null
+++ b/tests/bugs/replicate/mdata-heal-no-xattrs.t
@@ -0,0 +1,59 @@
+#!/bin/bash
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+cleanup;
+
+TEST glusterd
+TEST pidof glusterd
+TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2};
+TEST $CLI volume set $V0 cluster.self-heal-daemon off
+TEST $CLI volume start $V0
+
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2
+echo "Data">$M0/FILE
+ret=$?
+TEST [ $ret -eq 0 ]
+
+# Change permission on brick-0: simulates the case where there is metadata
+# mismatch but no pending xattrs. This brick will become the source for heal.
+TEST chmod +x $B0/$V0"0"/FILE
+
+# Add gfid to xattrop
+xattrop_b0=$(afr_get_index_path $B0/$V0"0")
+base_entry_b0=`ls $xattrop_b0`
+gfid_str_FILE=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $B0/$V0"0"/FILE))
+TEST ln $xattrop_b0/$base_entry_b0 $xattrop_b0/$gfid_str_FILE
+EXPECT_WITHIN $HEAL_TIMEOUT "^1$" get_pending_heal_count $V0
+
+TEST $CLI volume set $V0 cluster.self-heal-daemon on
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1
+EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2
+TEST $CLI volume heal $V0
+EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0
+
+# Brick-0 should contain xattrs blaming other 2 bricks.
+# The values will be zero because heal is over.
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-1 $B0/${V0}0/FILE
+EXPECT "000000000000000000000000" get_hex_xattr trusted.afr.$V0-client-2 $B0/${V0}0/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}0/FILE
+
+# Brick-1 and Brick-2 must not contain any afr xattrs.
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}1/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-0 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-1 $B0/${V0}2/FILE
+TEST ! getfattr -n trusted.afr.$V0-client-2 $B0/${V0}2/FILE
+
+# check permission bits.
+EXPECT '755' stat -c %a $B0/${V0}0/FILE
+EXPECT '755' stat -c %a $B0/${V0}1/FILE
+EXPECT '755' stat -c %a $B0/${V0}2/FILE
+
+EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
+cleanup;
diff --git a/tests/bugs/replicate/ta-inode-refresh-read.t b/tests/bugs/replicate/ta-inode-refresh-read.t
new file mode 100644
index 00000000000..6dd6ff7f163
--- /dev/null
+++ b/tests/bugs/replicate/ta-inode-refresh-read.t
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+# Test read transaction inode refresh logic for thin-arbiter.
+
+. $(dirname $0)/../../include.rc
+. $(dirname $0)/../../volume.rc
+. $(dirname $0)/../../thin-arbiter.rc
+cleanup;
+TEST ta_create_brick_and_volfile brick0
+TEST ta_create_brick_and_volfile brick1
+TEST ta_create_ta_and_volfile ta
+TEST ta_start_brick_process brick0
+TEST ta_start_brick_process brick1
+TEST ta_start_ta_process ta
+
+TEST ta_create_mount_volfile brick0 brick1 ta
+# Set afr xlator options to choose brick0 as read-subvol.
+sed -i '/iam-self-heal-daemon/a \ option read-subvolume-index 0' $B0/mount.vol
+TEST [ $? -eq 0 ]
+sed -i '/iam-self-heal-daemon/a \ option choose-local false' $B0/mount.vol
+TEST [ $? -eq 0 ]
+
+TEST ta_start_mount_process $M0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" ta_up_status $V0 $M0 0
+EXPECT_WITHIN $PROCESS_UP_TIMEOUT "trusted.afr.patchy-ta-2" ls $B0/ta
+
+TEST touch $M0/FILE
+TEST ls $B0/brick0/FILE
+TEST ls $B0/brick1/FILE
+TEST ! ls $B0/ta/FILE
+TEST setfattr -n user.name -v ravi $M0/FILE
+
+# Remove gfid hardlink from brick0 which is the read-subvol for FILE.
+# This triggers inode refresh up on a getfattr and eventually calls
+# afr_ta_read_txn(). Without this patch, afr_ta_read_txn() will again query
+# brick0 causing getfattr to fail.
+TEST rm -f $(gf_get_gfid_backend_file_path $B0/brick0 FILE)
+TEST getfattr -n user.name $M0/FILE
+
+cleanup;