From 302e218f68ef5edab6b369411d6f06cafea08ce1 Mon Sep 17 00:00:00 2001 From: Pranith Kumar K Date: Fri, 22 Apr 2016 11:43:45 +0530 Subject: cluster/afr: Do not fsync when durability is off BUG: 1329501 Change-Id: Id402c20f2fa19b22bc402295e03e7a0ea96b0c40 Signed-off-by: Pranith Kumar K Reviewed-on: http://review.gluster.org/14048 Reviewed-by: Ravishankar N Smoke: Gluster Build System NetBSD-regression: NetBSD Build System CentOS-regression: Gluster Build System Reviewed-by: Jeff Darcy --- tests/basic/afr/durability-off.t | 44 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 tests/basic/afr/durability-off.t (limited to 'tests/basic') diff --git a/tests/basic/afr/durability-off.t b/tests/basic/afr/durability-off.t new file mode 100644 index 00000000000..155ffa09ef0 --- /dev/null +++ b/tests/basic/afr/durability-off.t @@ -0,0 +1,44 @@ +#!/bin/bash +#This test tests that self-heals don't perform fsync when durability is turned +#off + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/brick{0,1} +TEST $CLI volume start $V0 +TEST $CLI volume profile $V0 start +TEST $CLI volume set $V0 cluster.ensure-durability off +TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0; +TEST kill_brick $V0 $H0 $B0/brick0 +TEST dd of=$M0/a.txt if=/dev/zero bs=1024k count=1 +#NetBSD sends FSYNC so the counts go for a toss. Stop and start the volume. +TEST $CLI volume stop $V0 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 +EXPECT "^0$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l) + +#Test that fsyncs happen when durability is on +TEST $CLI volume set $V0 cluster.ensure-durability on +TEST $CLI volume set $V0 performance.strict-write-ordering on +TEST kill_brick $V0 $H0 $B0/brick0 +TEST dd of=$M0/a.txt if=/dev/zero bs=1024k count=1 +#NetBSD sends FSYNC so the counts go for a toss. Stop and start the volume. +TEST $CLI volume stop $V0 +TEST $CLI volume start $V0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +TEST $CLI volume heal $V0 +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 +EXPECT "^2$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l) + +cleanup; -- cgit