From 48ca0c05df4cee66cf8d07e19ee2267fc9ba920b Mon Sep 17 00:00:00 2001 From: karthik-us Date: Thu, 7 Mar 2019 22:26:49 +0530 Subject: cluster/afr: Send truncate on arbiter brick from SHD Problem: In an arbiter volume configuration SHD will not send any writes onto the arbiter brick even if there is data pending marker for the arbiter brick. If we have a arbiter setup on the geo-rep master and there are data pending markers for the files on arbiter brick, SHD will not mark any data changelog during healing. While syncing the data from master to slave, if the arbiter-brick is considered as ACTIVE, then there is a chance that slave will miss out some data. If the arbiter brick is being newly added or replaced there is a chance of slave missing all the data during sync. Fix: If there is data pending marker for the arbiter brick, send truncate on the arbiter brick during heal, so that it will record truncate as the data transaction in changelog. Change-Id: I3242ba6cea6da495c418ef860d9c3359c5459dec fixes: bz#1686568 Signed-off-by: karthik-us --- ...bug-1686568-send-truncate-on-arbiter-from-shd.t | 38 ++++++++++++++++++++++ tests/volume.rc | 2 +- xlators/cluster/afr/src/afr-self-heal-data.c | 28 ++++++++-------- 3 files changed, 52 insertions(+), 16 deletions(-) create mode 100644 tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t diff --git a/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t new file mode 100644 index 00000000000..78581e99614 --- /dev/null +++ b/tests/bugs/replicate/bug-1686568-send-truncate-on-arbiter-from-shd.t @@ -0,0 +1,38 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +CHANGELOG_PATH_0="$B0/${V0}2/.glusterfs/changelogs" +ROLLOVER_TIME=100 + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 changelog.changelog on +TEST $CLI volume set $V0 changelog.rollover-time $ROLLOVER_TIME +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 + +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; +TEST dd if=/dev/zero of=$M0/file1 bs=128K count=5 + +TEST $CLI volume profile $V0 start +TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 + +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +TEST $CLI volume profile $V0 info +truncate_count=$($CLI volume profile $V0 info | grep TRUNCATE | awk '{count += $8} END {print count}') + +EXPECT "1" echo $truncate_count +EXPECT "1" check_changelog_op ${CHANGELOG_PATH_0} "^ D " + +cleanup; diff --git a/tests/volume.rc b/tests/volume.rc index 6fd7c3cdbcd..fe6074b3cd6 100644 --- a/tests/volume.rc +++ b/tests/volume.rc @@ -883,5 +883,5 @@ function check_changelog_op { local clog_path=$1 local op=$2 - $PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep $op | wc -l + $PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep "$op" | wc -l } diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c index 6dd38ef633a..d9a0ee35f55 100644 --- a/xlators/cluster/afr/src/afr-self-heal-data.c +++ b/xlators/cluster/afr/src/afr-self-heal-data.c @@ -375,17 +375,18 @@ __afr_selfheal_truncate_sinks(call_frame_t *frame, xlator_t *this, fd_t *fd, { afr_local_t *local = NULL; afr_private_t *priv = NULL; - unsigned char arbiter_sink_status = 0; int i = 0; local = frame->local; priv = this->private; - if (priv->arbiter_count) { - arbiter_sink_status = healed_sinks[ARBITER_BRICK_INDEX]; - healed_sinks[ARBITER_BRICK_INDEX] = 0; - } - + /* This will send truncate on the arbiter brick as well if it is marked as + * sink. If changelog is enabled on the volume it captures truncate as a + * data transactions on the arbiter brick. This will help geo-rep to + * properly sync the data from master to slave if arbiter is the ACTIVE + * brick during syncing and which had got some entries healed for data as + * part of self heal. + */ AFR_ONLIST(healed_sinks, frame, afr_sh_generic_fop_cbk, ftruncate, fd, size, NULL); @@ -396,8 +397,6 @@ __afr_selfheal_truncate_sinks(call_frame_t *frame, xlator_t *this, fd_t *fd, */ healed_sinks[i] = 0; - if (arbiter_sink_status) - healed_sinks[ARBITER_BRICK_INDEX] = arbiter_sink_status; return 0; } @@ -693,19 +692,18 @@ __afr_selfheal_data(call_frame_t *frame, xlator_t *this, fd_t *fd, goto unlock; } - if (priv->arbiter_count && - AFR_COUNT(healed_sinks, priv->child_count) == 1 && - healed_sinks[ARBITER_BRICK_INDEX]) { - is_arbiter_the_only_sink = _gf_true; - goto restore_time; - } - ret = __afr_selfheal_truncate_sinks( frame, this, fd, healed_sinks, locked_replies[source].poststat.ia_size); if (ret < 0) goto unlock; + if (priv->arbiter_count && + AFR_COUNT(healed_sinks, priv->child_count) == 1 && + healed_sinks[ARBITER_BRICK_INDEX]) { + is_arbiter_the_only_sink = _gf_true; + goto restore_time; + } ret = 0; } unlock: -- cgit