summaryrefslogtreecommitdiffstats
path: root/extras
diff options
context:
space:
mode:
authorPavan Sondur <pavan@gluster.com>2010-01-19 08:11:05 +0000
committerAnand V. Avati <avati@dev.gluster.com>2010-01-19 05:56:39 -0800
commita6a1f596a443b6a015dca6435f1d22fc582acc80 (patch)
tree2a3f876cc0eab33a225978d306a96a37a3e8f9a4 /extras
parenta23185f3a43ec95a56af9f0f543b67a1fcfb4852 (diff)
extras: Add defrag scripts to the repository
Signed-off-by: Pavan Vilas Sondur <pavan@gluster.com> Signed-off-by: Anand V. Avati <avati@dev.gluster.com> BUG: 478 (Add defrag scripts into glusterfs) URL: http://bugs.gluster.com/cgi-bin/bugzilla3/show_bug.cgi?id=478
Diffstat (limited to 'extras')
-rw-r--r--extras/Makefile.am2
-rw-r--r--extras/backend-cleanup.sh28
-rw-r--r--extras/defrag.sh60
-rw-r--r--extras/scale-n-defrag.sh37
4 files changed, 126 insertions, 1 deletions
diff --git a/extras/Makefile.am b/extras/Makefile.am
index 8ec2edf05cc..8ad2f6302ff 100644
--- a/extras/Makefile.am
+++ b/extras/Makefile.am
@@ -5,5 +5,5 @@ EditorMode_DATA = glusterfs-mode.el glusterfs.vim
SUBDIRS = init.d benchmarking volgen
-EXTRA_DIST = specgen.scm MacOSX/Portfile glusterfs-mode.el glusterfs.vim migrate-unify-to-distribute.sh backend-xattr-sanitize.sh
+EXTRA_DIST = specgen.scm MacOSX/Portfile glusterfs-mode.el glusterfs.vim migrate-unify-to-distribute.sh backend-xattr-sanitize.sh backend-cleanup.sh defrag.sh scale-n-defrag.sh
diff --git a/extras/backend-cleanup.sh b/extras/backend-cleanup.sh
new file mode 100644
index 00000000000..755161f18c9
--- /dev/null
+++ b/extras/backend-cleanup.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+# This script can be used to cleanup the 'cluster/distribute' translator's
+# stale link files. One may choose to run this only when number of subvolumes
+# to distribute volume gets increased (or decreased)
+#
+# This script has to be run on the servers, which are exporting the data to
+# GlusterFS
+#
+# (c) 2009 Gluster Inc <http://www.gluster.com/>
+
+set -e
+
+# Change the below variable as per the setup.
+export_directory="/export/glusterfs"
+
+clean_dir()
+{
+ # Clean the 'link' files on backend
+ find "${export_directory}" -type f -perm +01000 -exec rm -v '{}' \;
+}
+
+main()
+{
+ clean_dir ;
+}
+
+main "$@"
diff --git a/extras/defrag.sh b/extras/defrag.sh
new file mode 100644
index 00000000000..465b0979488
--- /dev/null
+++ b/extras/defrag.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+
+# This script gets called from 'scale-n-defrag.sh' script.
+# Don't run this stand alone.
+#
+#
+
+set -e
+
+CP="cp"
+MV="mv"
+
+scan_dir()
+{
+ path=$1;
+ find "$path" -type f -perm +01000 -exec $0 '{}' \;
+}
+
+rsync_filename()
+{
+ path=$1
+ dir=$(dirname "$path");
+ file=$(basename "$path");
+
+ echo "$dir/.$file.zr$$";
+}
+
+relocate_file()
+{
+ path=$1;
+ tmp_path=$(rsync_filename "$path");
+
+ pre_mtime=$(stat -c '%Y' "$path");
+ $CP -a "$path" "$tmp_path";
+ post_mtime=$(stat -c '%Y' "$path");
+
+ if [ $pre_mtime = $post_mtime ]; then
+ chmod -t "$tmp_path";
+ $MV "$tmp_path" "$path";
+ echo "file '$path' relocated"
+ else
+ echo "file '$path' modified during defrag. skipping"
+ rm -f "$tmp_path";
+ fi
+}
+
+main()
+{
+ path="$1";
+
+ if [ -d "$path" ]; then
+ scan_dir "$path";
+ else
+ relocate_file "$@";
+ fi
+
+ usleep 500000 # 500ms
+}
+
+main "$1"
diff --git a/extras/scale-n-defrag.sh b/extras/scale-n-defrag.sh
new file mode 100644
index 00000000000..1031b3931a8
--- /dev/null
+++ b/extras/scale-n-defrag.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+# This script runs over the GlusterFS mountpoint (from just one client)
+# to handle the distribution of 'data', after the distribute translator's
+# subvolumes count changes.
+#
+# (c) 2009 Gluster Inc, <http://www.gluster.com/>
+#
+#
+# Make sure the following variables are properly initialized
+
+MOUNTPOINT=/tmp/testdir
+directory_to_be_scaled="${MOUNTPOINT}/"
+
+logdir=$(dirname $0)
+cd $logdir
+LOGDIR=$(pwd)
+cd -
+
+# The below command is enough to make sure the new layout will be scaled across new
+# nodes.
+find ${directory_to_be_scaled} -type d -exec setfattr -x "trusted.glusterfs.dht" {} \;
+
+# Now do a lookup on files so the scaling/re-hashing is done
+find ${directory_to_be_scaled} > /dev/null
+
+
+# copy the defrag (to copy data across for new nodes (for linkfiles))
+#
+
+
+cd ${directory_to_be_scaled};
+for dir in *; do
+ echo "Defragmenting directory ${directory_to_be_scaled}/$dir ($LOGDIR/defrag-store-$dir.log)"
+ $LOGDIR/defrag.sh $dir >> $LOGDIR/defrag-store-$dir.log 2>&1
+ echo Completed directory ${directory_to_be_scaled}/$dir
+done