From 03f5172dd50b50988c65dd66e87a0d43e78a3810 Mon Sep 17 00:00:00 2001 From: Pranith Kumar K Date: Mon, 24 Jun 2013 08:15:09 +0530 Subject: cluster/afr: Fix fd/memory leak on fsync Change-Id: I764883811e30ca9d9c249ad00b6762101083a2fe BUG: 976800 Signed-off-by: Pranith Kumar K Reviewed-on: http://review.gluster.org/5248 Tested-by: Gluster Build System Reviewed-by: Jeff Darcy --- tests/bugs/bug-976800.t | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 tests/bugs/bug-976800.t (limited to 'tests') diff --git a/tests/bugs/bug-976800.t b/tests/bugs/bug-976800.t new file mode 100644 index 000000000..9557630db --- /dev/null +++ b/tests/bugs/bug-976800.t @@ -0,0 +1,27 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc + +# This test checks if there are any open fds on the brick +# even after the file is closed on the mount. This particular +# test tests dd with "fsync" to check afr's fsync codepath +cleanup; + +function is_fd_open { + local v=$1 + local h=$2 + local b=$3 + local bpid=$(get_brick_pid $v $h $b) + ls -l /proc/$bpid/fd | grep -w "\-> $b/1" +} + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 eager-lock off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST dd of=$M0/1 if=/dev/zero bs=1k count=1 conv=fsync +TEST ! is_fd_open $V0 $H0 $B0/${V0}0 +cleanup; -- cgit