From febf5ed4848ad705a34413353559482417c61467 Mon Sep 17 00:00:00 2001 From: Amar Tumballi Date: Fri, 23 Jun 2017 13:10:56 +0530 Subject: posix: option to handle the shared bricks for statvfs() Currently 'storage/posix' xlator has an option called option `export-statfs-size no`, which exports zero as values for few fields in `struct statvfs`. In a case of backend brick shared between multiple brick processes, the values of these variables should be `field_value / number-of-bricks-at-node`. This way, even the issue of 'min-free-disk' etc at different layers would also be handled properly when the statfs() sys call is made. Fixes #241 Change-Id: I2e320e1fdcc819ab9173277ef3498201432c275f Signed-off-by: Amar Tumballi Reviewed-on: https://review.gluster.org/17618 CentOS-regression: Gluster Build System Smoke: Gluster Build System Reviewed-by: Jeff Darcy Reviewed-by: Atin Mukherjee --- tests/basic/posix/shared-statfs.t | 53 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 tests/basic/posix/shared-statfs.t (limited to 'tests') diff --git a/tests/basic/posix/shared-statfs.t b/tests/basic/posix/shared-statfs.t new file mode 100644 index 00000000000..8caa9fa2110 --- /dev/null +++ b/tests/basic/posix/shared-statfs.t @@ -0,0 +1,53 @@ +#!/bin/bash +#Test that statfs is not served from posix backend FS. + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc + +cleanup; +TEST glusterd + +#Create brick partitions +TEST truncate -s 100M $B0/brick1 +TEST truncate -s 100M $B0/brick2 +LO1=`SETUP_LOOP $B0/brick1` +TEST [ $? -eq 0 ] +TEST MKFS_LOOP $LO1 +LO2=`SETUP_LOOP $B0/brick2` +TEST [ $? -eq 0 ] +TEST MKFS_LOOP $LO2 +TEST mkdir -p $B0/${V0}1 $B0/${V0}2 +TEST MOUNT_LOOP $LO1 $B0/${V0}1 +TEST MOUNT_LOOP $LO2 $B0/${V0}2 + +# Create a subdir in mountpoint and use that for volume. +TEST $CLI volume create $V0 $H0:$B0/${V0}1/1 $H0:$B0/${V0}2/1; +TEST $CLI volume start $V0 +TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0 +total_space=$(df -P $M0 | tail -1 | awk '{ print $2}') +# Keeping the size less than 200M mainly because XFS will use +# some storage in brick to keep its own metadata. +TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ] + + +TEST force_umount $M0 +TEST $CLI volume stop $V0 +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +# From the same mount point, share another 2 bricks with the volume +TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1/2 $H0:$B0/${V0}2/2 $H0:$B0/${V0}1/3 $H0:$B0/${V0}2/3 + +TEST $CLI volume start $V0 +TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0 +total_space=$(df -P $M0 | tail -1 | awk '{ print $2}') +TEST [ $total_space -gt 194000 -a $total_space -lt 200000 ] + +TEST force_umount $M0 +TEST $CLI volume stop $V0 +EXPECT 'Stopped' volinfo_field $V0 'Status'; + +TEST $CLI volume delete $V0; + +UMOUNT_LOOP ${B0}/${V0}{1,2} +rm -f ${B0}/brick{1,2} +cleanup; -- cgit