From 87bb8d0400d4ed18dd3954b1d9e5ca6ee0fb9742 Mon Sep 17 00:00:00 2001 From: Mohit Agrawal Date: Tue, 9 Aug 2016 15:53:27 +0530 Subject: glusterd: Convert volume to replica after adding brick self heal is not triggered Problem: After add brick to a distribute volume to convert to replica is not triggering self heal. Solution: Modify the condition in brick_graph_add_index to set trusted.afr.dirty attribute in xlator. Test : To verify the patch followd below steps 1) Create a single node volume gluster volume create 2) Start volume and create mount point mount -t glusterfs :/DIS /mnt 3) Touch some file and write some data on file 4) Add another brick along with replica 2 gluster volume add-brick DIS replica 2 :/dist2/brick2 5) Before apply the patch file size is 0 bytes in mount point. BUG: 1365455 Change-Id: Ief0ccbf98ea21b53d0e27edef177db6cabb3397f Signed-off-by: Mohit Agrawal Reviewed-on: http://review.gluster.org/15118 NetBSD-regression: NetBSD Build System Reviewed-by: Ravishankar N Reviewed-by: Anuradha Talur Smoke: Gluster Build System CentOS-regression: Gluster Build System Reviewed-by: Atin Mukherjee --- tests/bugs/replicate/bug-1365455.t | 54 +++++++++++++++++++++++++++++ xlators/mgmt/glusterd/src/glusterd-volgen.c | 3 +- 2 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 tests/bugs/replicate/bug-1365455.t diff --git a/tests/bugs/replicate/bug-1365455.t b/tests/bugs/replicate/bug-1365455.t new file mode 100644 index 00000000000..1953e2a9327 --- /dev/null +++ b/tests/bugs/replicate/bug-1365455.t @@ -0,0 +1,54 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +function check_size +{ + for i in {1..10}; do + size1=`stat -c %s $B0/${V0}0/tmp$i` + size2=`stat -c %s $B0/${V0}1/tmp$i` + if [[ $size1 -eq 0 ]] || [[ $size2 -eq 0 ]] || [[ $size1 -ne $size2 ]]; then + return 1 + fi + done + + return 0 +} + +cleanup; +TEST glusterd +TEST pidof glusterd + +TEST $CLI volume create $V0 $H0:$B0/${V0}0; + +TEST $CLI volume start $V0; + +TEST glusterfs -s $H0 --volfile-id $V0 $M0 + +for i in {1..10} +do + echo abc > $M0/tmp$i +done + + +# Add Another brick +TEST $CLI volume add-brick $V0 replica 2 $H0:$B0/${V0}1 + +#Check if self heal daemon has come up +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status + +#Check if self heal daemon is able to see all bricks +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 + +# Wait for heal to complete +EXPECT_WITHIN $HEAL_TIMEOUT "^0$" get_pending_heal_count $V0 + +# Check if entry-heal has happened +TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort) + +#Check size of files on bricks +TEST check_size + +cleanup; diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c index 1f8a0e70add..52331dac302 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volgen.c +++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c @@ -1985,7 +1985,8 @@ brick_graph_add_index (volgen_graph_t *graph, glusterd_volinfo_t *volinfo, goto out; } if ((volinfo->type == GF_CLUSTER_TYPE_STRIPE_REPLICATE || - volinfo->type == GF_CLUSTER_TYPE_REPLICATE)) { + volinfo->type == GF_CLUSTER_TYPE_REPLICATE || + volinfo->type == GF_CLUSTER_TYPE_NONE)) { ret = xlator_set_option (xl, "xattrop-dirty-watchlist", "trusted.afr.dirty"); if (ret) -- cgit