From a6a1f596a443b6a015dca6435f1d22fc582acc80 Mon Sep 17 00:00:00 2001 From: Pavan Sondur Date: Tue, 19 Jan 2010 08:11:05 +0000 Subject: extras: Add defrag scripts to the repository Signed-off-by: Pavan Vilas Sondur Signed-off-by: Anand V. Avati BUG: 478 (Add defrag scripts into glusterfs) URL: http://bugs.gluster.com/cgi-bin/bugzilla3/show_bug.cgi?id=478 --- extras/scale-n-defrag.sh | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 extras/scale-n-defrag.sh (limited to 'extras/scale-n-defrag.sh') diff --git a/extras/scale-n-defrag.sh b/extras/scale-n-defrag.sh new file mode 100644 index 00000000000..1031b3931a8 --- /dev/null +++ b/extras/scale-n-defrag.sh @@ -0,0 +1,37 @@ +#!/bin/sh + +# This script runs over the GlusterFS mountpoint (from just one client) +# to handle the distribution of 'data', after the distribute translator's +# subvolumes count changes. +# +# (c) 2009 Gluster Inc, +# +# +# Make sure the following variables are properly initialized + +MOUNTPOINT=/tmp/testdir +directory_to_be_scaled="${MOUNTPOINT}/" + +logdir=$(dirname $0) +cd $logdir +LOGDIR=$(pwd) +cd - + +# The below command is enough to make sure the new layout will be scaled across new +# nodes. +find ${directory_to_be_scaled} -type d -exec setfattr -x "trusted.glusterfs.dht" {} \; + +# Now do a lookup on files so the scaling/re-hashing is done +find ${directory_to_be_scaled} > /dev/null + + +# copy the defrag (to copy data across for new nodes (for linkfiles)) +# + + +cd ${directory_to_be_scaled}; +for dir in *; do + echo "Defragmenting directory ${directory_to_be_scaled}/$dir ($LOGDIR/defrag-store-$dir.log)" + $LOGDIR/defrag.sh $dir >> $LOGDIR/defrag-store-$dir.log 2>&1 + echo Completed directory ${directory_to_be_scaled}/$dir +done -- cgit