From 950726dfc8e3171bef625b563c0c6dbba1ec2928 Mon Sep 17 00:00:00 2001 From: Sheetal Pamecha Date: Mon, 19 Nov 2018 22:15:25 +0530 Subject: posix: add storage.reserve-size option storage.reserve-size option will take size as input instead of percentage. If set, priority will be given to storage.reserve-size over storage.reserve. Default value of this option is 0. fixes: bz#1651445 Change-Id: I7a7342c68e436e8bf65bd39c567512ee04abbcea Signed-off-by: Sheetal Pamecha --- tests/bugs/posix/bug-1651445.t | 58 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 tests/bugs/posix/bug-1651445.t (limited to 'tests/bugs') diff --git a/tests/bugs/posix/bug-1651445.t b/tests/bugs/posix/bug-1651445.t new file mode 100644 index 00000000000..f6f1833f919 --- /dev/null +++ b/tests/bugs/posix/bug-1651445.t @@ -0,0 +1,58 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../snapshot.rc + +cleanup + +TEST verify_lvm_version +TEST glusterd +TEST pidof glusterd +TEST init_n_bricks 3 +TEST setup_lvm 3 + +TEST $CLI volume create $V0 replica 3 $H0:$L{1,2,3} +TEST $CLI volume start $V0 + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 + +TEST $CLI volume set $V0 storage.reserve-size 10MB + +#No effect as priority to reserve-size +TEST $CLI volume set $V0 storage.reserve 20 + +TEST dd if=/dev/zero of=$M0/a bs=100M count=1 +sleep 5 + +#Below dd confirms posix is giving priority to reserve-size +TEST dd if=/dev/zero of=$M0/b bs=40M count=1 + +sleep 5 +TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1 + +rm -rf $M0/* +#Size will reserve from the previously set reserve option = 20% +TEST $CLI volume set $V0 storage.reserve-size 0 + +#Overwrite reserve option +TEST $CLI volume set $V0 storage.reserve-size 40MB + +#wait 5s to reset disk_space_full flag +sleep 5 + +TEST dd if=/dev/zero of=$M0/a bs=100M count=1 +TEST dd if=/dev/zero of=$M0/b bs=10M count=1 + +# Wait 5s to update disk_space_full flag because thread check disk space +# after every 5s + +sleep 5 +# setup_lvm create lvm partition of 150M and 40M are reserve so after +# consuming more than 110M next dd should fail +TEST ! dd if=/dev/zero of=$M0/c bs=5M count=1 + +TEST $CLI volume stop $V0 +TEST $CLI volume delete $V0 + +cleanup -- cgit