From 5784a00f997212d34bd52b2303e20c097240d91c Mon Sep 17 00:00:00 2001 From: karthik-us Date: Wed, 30 May 2018 15:27:52 +0530 Subject: cluster/afr: Use 2 domain locking in SHD for thin-arbiter With this change when SHD starts the index crawl it requests all the clients to release the AFR_TA_DOM_NOTIFY lock so that clients will know the in memory state is no more valid and any new operations needs to query the thin-arbiter if required. When SHD completes healing all the files without any failure, it will again take the AFR_TA_DOM_NOTIFY lock and gets the xattrs on TA to see whether there are any new failures happened by that time. If there are new failures marked on TA, SHD will start the crawl immediately to heal those failures as well. If there are no new failures, then SHD will take the AFR_TA_DOM_MODIFY lock and unsets the xattrs on TA, so that both the data bricks will be considered as good there after. Change-Id: I037b89a0823648f314580ba0716d877bd5ddb1f1 fixes: bz#1579788 Signed-off-by: karthik-us --- tests/thin-arbiter.rc | 181 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) (limited to 'tests/thin-arbiter.rc') diff --git a/tests/thin-arbiter.rc b/tests/thin-arbiter.rc index 36d11cea61d..c5ac00baaaf 100644 --- a/tests/thin-arbiter.rc +++ b/tests/thin-arbiter.rc @@ -431,3 +431,184 @@ function ta_up_status() local replica_id=$3 grep -E "^up = " $m/.meta/graphs/active/${v}-replicate-${replica_id}/private | cut -f2 -d'=' } + +function ta_create_shd_volfile() +{ + local b0=$B0/$1 + local b1=$B0/$2 + local ta=$B0/$3 + local b0_port=${PORTMAP[$1]} + local b1_port=${PORTMAP[$2]} + local ta_port=${PORTMAP[$3]} +cat > $B0/glustershd.vol <