From 4bba976e7a61d9961a133b2b61e56eb0871d3ba2 Mon Sep 17 00:00:00 2001 From: Pranith Kumar K Date: Fri, 22 Apr 2016 11:43:45 +0530 Subject: cluster/afr: Do not fsync when durability is off >BUG: 1329501 >Change-Id: Id402c20f2fa19b22bc402295e03e7a0ea96b0c40 >Signed-off-by: Pranith Kumar K >Reviewed-on: http://review.gluster.org/14048 >Reviewed-by: Ravishankar N >Smoke: Gluster Build System >NetBSD-regression: NetBSD Build System >CentOS-regression: Gluster Build System >Reviewed-by: Jeff Darcy >(cherry picked from commit 302e218f68ef5edab6b369411d6f06cafea08ce1) Change-Id: Ifbf693f8de6765fca90a9ef3c11c1912c2e9885f BUG: 1331342 Signed-off-by: Pranith Kumar K Reviewed-on: http://review.gluster.org/14104 Reviewed-by: Ravishankar N Smoke: Gluster Build System Reviewed-by: Krutika Dhananjay CentOS-regression: Gluster Build System NetBSD-regression: NetBSD Build System --- tests/basic/afr/durability-off.t | 44 ++++++++++++++++++++++++++++ xlators/cluster/afr/src/afr-self-heal-data.c | 3 ++ 2 files changed, 47 insertions(+) create mode 100644 tests/basic/afr/durability-off.t diff --git a/tests/basic/afr/durability-off.t b/tests/basic/afr/durability-off.t new file mode 100644 index 00000000000..155ffa09ef0 --- /dev/null +++ b/tests/basic/afr/durability-off.t @@ -0,0 +1,44 @@ +#!/bin/bash +#This test tests that self-heals don't perform fsync when durability is turned +#off + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +TEST $CLI volume set $V0 cluster.ensure-durability off +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; +TEST kill_brick $V0 $H0 $B0/brick0 +TEST dd of=$M0/a.txt if=/dev/zero bs=1024k count=1 +#NetBSD sends FSYNC so the counts go for a toss. Stop and start the volume. +TEST $CLI volume stop $V0 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 +EXPECT "^0$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l) + +#Test that fsyncs happen when durability is on +TEST $CLI volume set $V0 cluster.ensure-durability on +TEST $CLI volume set $V0 performance.strict-write-ordering on +TEST kill_brick $V0 $H0 $B0/brick0 +TEST dd of=$M0/a.txt if=/dev/zero bs=1024k count=1 +#NetBSD sends FSYNC so the counts go for a toss. Stop and start the volume. +TEST $CLI volume stop $V0 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 +EXPECT "^2$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l) + +cleanup; diff --git a/xlators/cluster/afr/src/afr-self-heal-data.c b/xlators/cluster/afr/src/afr-self-heal-data.c index 72f6b2ad945..8b6e846ae01 100644 --- a/xlators/cluster/afr/src/afr-self-heal-data.c +++ b/xlators/cluster/afr/src/afr-self-heal-data.c @@ -303,6 +303,9 @@ afr_selfheal_data_fsync (call_frame_t *frame, xlator_t *this, fd_t *fd, local = frame->local; priv = this->private; + if (!priv->ensure_durability) + return 0; + AFR_ONLIST (healed_sinks, frame, attr_cbk, fsync, fd, 0, NULL); for (i = 0; i < priv->child_count; i++) -- cgit