summaryrefslogtreecommitdiffstats
path: root/extras/geo-rep
diff options
context:
space:
mode:
Diffstat (limited to 'extras/geo-rep')
-rw-r--r--extras/geo-rep/Makefile.am16
-rw-r--r--extras/geo-rep/generate-gfid-file.sh70
-rwxr-xr-xextras/geo-rep/get-gfid.sh7
-rw-r--r--extras/geo-rep/gsync-sync-gfid.c109
-rw-r--r--extras/geo-rep/gsync-upgrade.sh123
-rw-r--r--extras/geo-rep/schedule_georep.py.in492
-rw-r--r--extras/geo-rep/slave-upgrade.sh102
7 files changed, 919 insertions, 0 deletions
diff --git a/extras/geo-rep/Makefile.am b/extras/geo-rep/Makefile.am
new file mode 100644
index 00000000000..09eff308ac4
--- /dev/null
+++ b/extras/geo-rep/Makefile.am
@@ -0,0 +1,16 @@
+scriptsdir = $(libexecdir)/glusterfs/scripts
+scripts_SCRIPTS = gsync-upgrade.sh generate-gfid-file.sh get-gfid.sh \
+ slave-upgrade.sh schedule_georep.py
+
+scripts_PROGRAMS = gsync-sync-gfid
+gsync_sync_gfid_CFLAGS = $(GF_CFLAGS) -Wall -I$(top_srcdir)/libglusterfs/src
+gsync_sync_gfid_LDFLAGS = $(GF_LDFLAGS)
+gsync_sync_gfid_LDADD = $(GF_LDADD) $(top_builddir)/libglusterfs/src/libglusterfs.la
+gsync_sync_gfid_SOURCES = gsync-sync-gfid.c
+gsync_sync_gfid_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \
+ -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src
+
+EXTRA_DIST = gsync-sync-gfid.c gsync-upgrade.sh generate-gfid-file.sh \
+ get-gfid.sh slave-upgrade.sh schedule_georep.py.in
+
+CLEANFILES = schedule_georep.py
diff --git a/extras/geo-rep/generate-gfid-file.sh b/extras/geo-rep/generate-gfid-file.sh
new file mode 100644
index 00000000000..14f104b986d
--- /dev/null
+++ b/extras/geo-rep/generate-gfid-file.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+#Usage: generate-gfid-file.sh <master-volfile-server:master-volume> <path-to-get-gfid.sh> <output-file> [dirs-list-file]
+
+function get_gfids()
+{
+ GET_GFID_CMD=$1
+ OUTPUT_FILE=$2
+ DIR_PATH=$3
+ find "$DIR_PATH" -exec $GET_GFID_CMD {} \; >> $OUTPUT_FILE
+}
+
+function mount_client()
+{
+ local T; # temporary mount
+ local i; # inode number
+
+ VOLFILE_SERVER=$1;
+ VOLUME=$2;
+ GFID_CMD=$3;
+ OUTPUT=$4;
+
+ T=$(mktemp -d -t ${0##*/}.XXXXXX);
+
+ glusterfs -s $VOLFILE_SERVER --volfile-id $VOLUME $T;
+
+ i=$(stat -c '%i' $T);
+
+ [ "x$i" = "x1" ] || fatal "could not mount volume $MASTER on $T";
+
+ cd $T;
+ rm -f $OUTPUT;
+ touch $OUTPUT;
+
+ if [ "$DIRS_FILE" = "." ]
+ then
+ get_gfids $GFID_CMD $OUTPUT "."
+ else
+ while read line
+ do
+ get_gfids $GFID_CMD $OUTPUT "$line"
+ done < $DIRS_FILE
+ fi;
+
+ cd -;
+
+ umount $T || fatal "could not umount $MASTER from $T";
+
+ rmdir $T || warn "rmdir of $T failed";
+}
+
+
+function main()
+{
+ SLAVE=$1
+ GET_GFID_CMD=$2
+ OUTPUT=$3
+
+ VOLFILE_SERVER=`echo $SLAVE | sed -e 's/\(.*\):.*/\1/'`
+ VOLUME_NAME=`echo $SLAVE | sed -e 's/.*:\(.*\)/\1/'`
+
+ if [ "$#" -lt 4 ]
+ then
+ DIRS_FILE="."
+ else
+ DIRS_FILE=$4
+ fi
+ mount_client $VOLFILE_SERVER $VOLUME_NAME $GET_GFID_CMD $OUTPUT $DIRS_FILE
+}
+
+main "$@";
diff --git a/extras/geo-rep/get-gfid.sh b/extras/geo-rep/get-gfid.sh
new file mode 100755
index 00000000000..a4d609b0bc5
--- /dev/null
+++ b/extras/geo-rep/get-gfid.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+ATTR_STR=`getfattr -h $1 -n glusterfs.gfid.string`
+GLFS_PATH=`echo $ATTR_STR | sed -e 's/# file: \(.*\) glusterfs.gfid.string*/\1/g'`
+GFID=`echo $ATTR_STR | sed -e 's/.*glusterfs.gfid.string="\(.*\)"/\1/g'`
+
+echo "$GFID $GLFS_PATH"
diff --git a/extras/geo-rep/gsync-sync-gfid.c b/extras/geo-rep/gsync-sync-gfid.c
new file mode 100644
index 00000000000..47dca0413e9
--- /dev/null
+++ b/extras/geo-rep/gsync-sync-gfid.c
@@ -0,0 +1,109 @@
+
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <limits.h>
+#include <sys/types.h>
+#include <libgen.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/syscall.h>
+
+#ifndef UUID_CANONICAL_FORM_LEN
+#define UUID_CANONICAL_FORM_LEN 36
+#endif
+
+#ifndef GF_FUSE_AUX_GFID_HEAL
+#define GF_FUSE_AUX_GFID_HEAL "glusterfs.gfid.heal"
+#endif
+
+#define GLFS_LINE_MAX (PATH_MAX + (2 * UUID_CANONICAL_FORM_LEN))
+
+int
+main(int argc, char *argv[])
+{
+ char *file = NULL;
+ char *tmp = NULL;
+ char *tmp1 = NULL;
+ char *parent_dir = NULL;
+ char *gfid = NULL;
+ char *bname = NULL;
+ int ret = -1;
+ int len = 0;
+ FILE *fp = NULL;
+ char line[GLFS_LINE_MAX] = {
+ 0,
+ };
+ char *path = NULL;
+ void *blob = NULL;
+ void *tmp_blob = NULL;
+
+ if (argc != 2) {
+ /* each line in the file has the following format
+ * uuid-in-canonical-form path-relative-to-gluster-mount.
+ * Both uuid and relative path are from master mount.
+ */
+ fprintf(stderr, "usage: %s <file-of-paths-to-be-synced>\n", argv[0]);
+ goto out;
+ }
+
+ file = argv[1];
+
+ fp = fopen(file, "r");
+ if (fp == NULL) {
+ fprintf(stderr, "cannot open %s for reading (%s)\n", file,
+ strerror(errno));
+ goto out;
+ }
+
+ while (fgets(line, GLFS_LINE_MAX, fp) != NULL) {
+ tmp = line;
+ path = gfid = line;
+
+ path += UUID_CANONICAL_FORM_LEN + 1;
+
+ while (isspace(*path))
+ path++;
+
+ len = strlen(line);
+ if ((len < GLFS_LINE_MAX) && (line[len - 1] == '\n'))
+ line[len - 1] = '\0';
+
+ line[UUID_CANONICAL_FORM_LEN] = '\0';
+
+ tmp = strdup(path);
+ tmp1 = strdup(path);
+ parent_dir = dirname(tmp);
+ bname = basename(tmp1);
+
+ /* gfid + '\0' + bname + '\0' */
+ len = UUID_CANONICAL_FORM_LEN + 1 + strlen(bname) + 1;
+
+ blob = malloc(len);
+
+ memcpy(blob, gfid, UUID_CANONICAL_FORM_LEN);
+
+ tmp_blob = blob + UUID_CANONICAL_FORM_LEN + 1;
+
+ memcpy(tmp_blob, bname, strlen(bname));
+
+ ret = sys_lsetxattr(parent_dir, GF_FUSE_AUX_GFID_HEAL, blob, len, 0);
+ if (ret < 0) {
+ fprintf(stderr, "setxattr on %s/%s failed (%s)\n", parent_dir,
+ bname, strerror(errno));
+ }
+ memset(line, 0, GLFS_LINE_MAX);
+
+ free(blob);
+ free(tmp);
+ free(tmp1);
+ blob = NULL;
+ }
+
+ ret = 0;
+out:
+ if (fp)
+ fclose(fp);
+ return ret;
+}
diff --git a/extras/geo-rep/gsync-upgrade.sh b/extras/geo-rep/gsync-upgrade.sh
new file mode 100644
index 00000000000..0f73a33884b
--- /dev/null
+++ b/extras/geo-rep/gsync-upgrade.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+#usage: gsync-upgrade.sh <slave-volfile-server:slave-volume> <gfid-file>
+# <path-to-gsync-sync-gfid> <ssh-identity-file>
+#<slave-volfile-server>: a machine on which gluster cli can fetch slave volume info.
+# slave-volfile-server defaults to localhost.
+#
+#<gfid-file>: a file containing paths and their associated gfids
+# on master. The paths are relative to master mount point
+# (not absolute). An example extract of <gfid-file> can be,
+#
+# <extract>
+# 22114455-57c5-46e9-a783-c40f83a72b09 /dir
+# 25772386-3eb8-4550-a802-c3fdc938ca80 /dir/file
+# </extract>
+#
+#<ssh-identity-file>: file from which the identity (private key) for public key authentication is read.
+
+SLAVE_MOUNT='/tmp/glfs_slave'
+
+function SSH()
+{
+ HOST=$1
+ SSHKEY=$2
+
+ shift 2
+
+ ssh -qi $SSHKEY \
+ -oPasswordAuthentication=no \
+ -oStrictHostKeyChecking=no \
+ "$HOST" "$@";
+}
+
+function get_bricks()
+{
+ SSHKEY=$3
+
+ SSH $1 $SSHKEY "gluster volume info $2" | grep -E 'Brick[0-9]+' | sed -e 's/[^:]*:\(.*\)/\1/g'
+}
+
+function cleanup_brick()
+{
+ HOST=$1
+ BRICK=$2
+ SSHKEY=$3
+
+ # TODO: write a C program to receive a list of files and does cleanup on
+ # them instead of spawning a new setfattr process for each file if
+ # performance is bad.
+ SSH -i $SSHKEY $HOST "rm -rf $BRICK/.glusterfs/* && find $BRICK -exec setfattr -x trusted.gfid {} \;"
+}
+
+function cleanup_slave()
+{
+ SSHKEY=$2
+
+ VOLFILE_SERVER=`echo $1 | sed -e 's/\(.*\):.*/\1/'`
+ VOLUME_NAME=`echo $1 | sed -e 's/.*:\(.*\)/\1/'`
+
+ BRICKS=`get_bricks $VOLFILE_SERVER $VOLUME_NAME $SSHKEY`
+
+ for i in $BRICKS; do
+ HOST=`echo $i | sed -e 's/\(.*\):.*/\1/'`
+ BRICK=`echo $i | sed -e 's/.*:\(.*\)/\1/'`
+ cleanup_brick $HOST $BRICK $SSHKEY
+ done
+
+ SSH -i $SSHKEY $VOLFILE_SERVER "gluster --mode=script volume stop $VOLUME_NAME; gluster volume start $VOLUME_NAME";
+
+}
+
+function mount_client()
+{
+ local T; # temporary mount
+ local i; # inode number
+ GFID_FILE=$3
+ SYNC_CMD=$4
+
+ T=$(mktemp -d -t ${0##*/}.XXXXXX);
+
+ glusterfs --aux-gfid-mount -s $1 --volfile-id $2 $T;
+
+ i=$(stat -c '%i' $T);
+
+ [ "x$i" = "x1" ] || fatal "could not mount volume $MASTER on $T";
+
+ cd $T;
+
+ $SYNC_CMD $GFID_FILE
+
+ cd -;
+
+ umount -l $T || fatal "could not umount $MASTER from $T";
+
+ rmdir $T || warn "rmdir of $T failed";
+}
+
+function sync_gfids()
+{
+ SLAVE=$1
+ GFID_FILE=$2
+
+ SLAVE_VOLFILE_SERVER=`echo $SLAVE | sed -e 's/\(.*\):.*/\1/'`
+ SLAVE_VOLUME_NAME=`echo $SLAVE | sed -e 's/.*:\(.*\)/\1/'`
+
+ if [ "x$SLAVE_VOLFILE_SERVER" = "x" ]; then
+ SLAVE_VOLFILE_SERVER="localhost"
+ fi
+
+ mount_client $SLAVE_VOLFILE_SERVER $SLAVE_VOLUME_NAME $GFID_FILE $3
+}
+
+function upgrade()
+{
+ SLAVE=$1
+ GFID_FILE=$2
+ SYNC_CMD=$3
+ SSHKEY=$4
+
+ cleanup_slave $SLAVE $SSHKEY
+ sync_gfids $SLAVE $GFID_FILE $SYNC_CMD
+}
+
+upgrade "$@"
diff --git a/extras/geo-rep/schedule_georep.py.in b/extras/geo-rep/schedule_georep.py.in
new file mode 100644
index 00000000000..48b2b507060
--- /dev/null
+++ b/extras/geo-rep/schedule_georep.py.in
@@ -0,0 +1,492 @@
+#!/usr/bin/python3
+"""
+Schedule Geo-replication
+------------------------
+A tool to run Geo-replication when required. This can be used to
+schedule the Geo-replication to run once in a day using
+
+ # Run daily at 08:30pm
+ 30 20 * * * root python /usr/share/glusterfs/scripts/schedule_georep.py \\
+ --no-color gv1 fvm1 gv2 >> /var/log/glusterfs/schedule_georep.log 2>&1
+
+This tool does the following,
+
+1. Stop Geo-replication if Started
+2. Start Geo-replication
+3. Set Checkpoint
+4. Check the Status and see Checkpoint is Complete.(LOOP)
+5. If checkpoint complete, Stop Geo-replication
+
+Usage:
+
+ python /usr/share/glusterfs/scripts/schedule_georep.py <MASTERVOL> \\
+ <SLAVEHOST> <SLAVEVOL>
+
+For example,
+
+ python /usr/share/glusterfs/scripts/schedule_georep.py gv1 fvm1 gv2
+
+"""
+import subprocess
+import time
+import xml.etree.cElementTree as etree
+import sys
+from contextlib import contextmanager
+import tempfile
+import os
+from argparse import ArgumentParser, RawDescriptionHelpFormatter
+
+ParseError = etree.ParseError if hasattr(etree, 'ParseError') else SyntaxError
+cache_data = {}
+
+SESSION_MOUNT_LOG_FILE = ("/var/log/glusterfs/geo-replication"
+ "/schedule_georep.mount.log")
+
+USE_CLI_COLOR = True
+mnt_list = []
+
+class GlusterBadXmlFormat(Exception):
+ """
+ Exception class for XML Parse Errors
+ """
+ pass
+
+
+def output_notok(msg, err="", exitcode=1):
+ if USE_CLI_COLOR:
+ out = "\033[31m[NOT OK]\033[0m {0}\n{1}\n"
+ else:
+ out = "[NOT OK] {0}\n{1}\n"
+ sys.stderr.write(out.format(msg, err))
+ sys.exit(exitcode)
+
+
+def output_warning(msg):
+ if USE_CLI_COLOR:
+ out = "\033[33m[ WARN]\033[0m {0}\n"
+ else:
+ out = "[ WARN] {0}\n"
+ sys.stderr.write(out.format(msg))
+
+
+def output_ok(msg):
+ if USE_CLI_COLOR:
+ out = "\033[32m[ OK]\033[0m {0}\n"
+ else:
+ out = "[ OK] {0}\n"
+ sys.stderr.write(out.format(msg))
+
+
+def execute(cmd, success_msg="", failure_msg="", exitcode=-1):
+ """
+ Generic wrapper to execute the CLI commands. Returns Output if success.
+ On success it can print message in stdout if specified.
+ On failure, exits after writing to stderr.
+ """
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
+ out, err = p.communicate()
+ if p.returncode == 0:
+ if success_msg:
+ output_ok(success_msg)
+ return out
+ else:
+ if exitcode == 0:
+ return
+ err_msg = err if err else out
+ output_notok(failure_msg, err=err_msg, exitcode=exitcode)
+
+
+def cache_output_with_args(func):
+ """
+ Decorator function to remember the output of any function
+ """
+ def wrapper(*args, **kwargs):
+ global cache_data
+ key = "_".join([func.func_name] + list(args))
+ if cache_data.get(key, None) is None:
+ cache_data[key] = func(*args, **kwargs)
+
+ return cache_data[key]
+ return wrapper
+
+
+def cleanup(hostname, volname, mnt):
+ """
+ Unmount the Volume and Remove the temporary directory
+ """
+ execute(["umount", "-l", mnt],
+ failure_msg="Unable to Unmount Gluster Volume "
+ "{0}:{1}(Mounted at {2})".format(hostname, volname, mnt))
+ execute(["rmdir", mnt],
+ failure_msg="Unable to Remove temp directory "
+ "{0}".format(mnt), exitcode=0)
+
+
+@contextmanager
+def glustermount(hostname, volname):
+ """
+ Context manager for Mounting Gluster Volume
+ Use as
+ with glustermount(HOSTNAME, VOLNAME) as MNT:
+ # Do your stuff
+ Automatically unmounts it in case of Exceptions/out of context
+ """
+ mnt = tempfile.mkdtemp(prefix="georepsetup_")
+ mnt_list.append(mnt)
+ execute(["@SBIN_DIR@/glusterfs",
+ "--volfile-server", hostname,
+ "--volfile-id", volname,
+ "-l", SESSION_MOUNT_LOG_FILE,
+ mnt],
+ failure_msg="Unable to Mount Gluster Volume "
+ "{0}:{1}".format(hostname, volname))
+ if os.path.ismount(mnt):
+ yield mnt
+ else:
+ output_notok("Unable to Mount Gluster Volume "
+ "{0}:{1}".format(hostname, volname))
+ cleanup(hostname, volname, mnt)
+
+
+@cache_output_with_args
+def get_bricks(volname):
+ """
+ Returns Bricks list, caches the Bricks list for a volume once
+ parsed.
+ """
+ value = []
+ cmd = ["@SBIN_DIR@/gluster", "volume", "info", volname, "--xml"]
+ info = execute(cmd)
+ try:
+ tree = etree.fromstring(info)
+ volume_el = tree.find('volInfo/volumes/volume')
+ for b in volume_el.findall('bricks/brick'):
+ value.append({"name": b.find("name").text,
+ "hostUuid": b.find("hostUuid").text})
+ except ParseError:
+ raise GlusterBadXmlFormat("Bad XML Format: %s" % " ".join(cmd))
+
+ return value
+
+
+def get_georep_status(mastervol, slave):
+ session_keys = set()
+ out = {}
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication"]
+ if mastervol is not None:
+ cmd += [mastervol]
+ if slave:
+ cmd += [slave]
+
+ cmd += ["status", "--xml"]
+ info = execute(cmd)
+
+ try:
+ tree = etree.fromstring(info)
+ # Get All Sessions
+ for volume_el in tree.findall("geoRep/volume"):
+ sessions_el = volume_el.find("sessions")
+ # Master Volume name if multiple Volumes
+ mvol = volume_el.find("name").text
+
+ # For each session, collect the details
+ for session in sessions_el.findall("session"):
+ session_slave = "{0}:{1}".format(mvol, session.find(
+ "session_slave").text)
+ session_keys.add(session_slave)
+ out[session_slave] = {}
+
+ for pair in session.findall('pair'):
+ master_brick = "{0}:{1}".format(
+ pair.find("master_node").text,
+ pair.find("master_brick").text
+ )
+
+ out[session_slave][master_brick] = {
+ "mastervol": mvol,
+ "slavevol": pair.find("slave").text.split("::")[-1],
+ "master_node": pair.find("master_node").text,
+ "master_brick": pair.find("master_brick").text,
+ "slave_user": pair.find("slave_user").text,
+ "slave": pair.find("slave").text,
+ "slave_node": pair.find("slave_node").text,
+ "status": pair.find("status").text,
+ "crawl_status": pair.find("crawl_status").text,
+ "entry": pair.find("entry").text,
+ "data": pair.find("data").text,
+ "meta": pair.find("meta").text,
+ "failures": pair.find("failures").text,
+ "checkpoint_completed": pair.find(
+ "checkpoint_completed").text,
+ "master_node_uuid": pair.find("master_node_uuid").text,
+ "last_synced": pair.find("last_synced").text,
+ "checkpoint_time": pair.find("checkpoint_time").text,
+ "checkpoint_completion_time":
+ pair.find("checkpoint_completion_time").text
+ }
+ except ParseError:
+ raise GlusterBadXmlFormat("Bad XML Format: %s" % " ".join(cmd))
+
+ return session_keys, out
+
+
+def get_offline_status(volname, brick, node_uuid, slave):
+ node, brick = brick.split(":")
+ if "@" not in slave:
+ slave_user = "root"
+ else:
+ slave_user, _ = slave.split("@")
+
+ return {
+ "mastervol": volname,
+ "slavevol": slave.split("::")[-1],
+ "master_node": node,
+ "master_brick": brick,
+ "slave_user": slave_user,
+ "slave": slave,
+ "slave_node": "N/A",
+ "status": "Offline",
+ "crawl_status": "N/A",
+ "entry": "N/A",
+ "data": "N/A",
+ "meta": "N/A",
+ "failures": "N/A",
+ "checkpoint_completed": "N/A",
+ "master_node_uuid": node_uuid,
+ "last_synced": "N/A",
+ "checkpoint_time": "N/A",
+ "checkpoint_completion_time": "N/A"
+ }
+
+
+def get(mastervol=None, slave=None):
+ """
+ This function gets list of Bricks of Master Volume and collects
+ respective Geo-rep status. Output will be always ordered as the
+ bricks list in Master Volume. If Geo-rep status is not available
+ for any brick then it updates OFFLINE status.
+ """
+ out = []
+ session_keys, gstatus = get_georep_status(mastervol, slave)
+
+ for session in session_keys:
+ mvol, _, slave = session.split(":", 2)
+ slave = slave.replace("ssh://", "")
+ master_bricks = get_bricks(mvol)
+ out.append([])
+ for brick in master_bricks:
+ bname = brick["name"]
+ if gstatus.get(session) and gstatus[session].get(bname, None):
+ out[-1].append(gstatus[session][bname])
+ else:
+ out[-1].append(
+ get_offline_status(mvol, bname, brick["hostUuid"], slave))
+
+ return out
+
+
+def get_summary(mastervol, slave_url):
+ """
+ Wrapper function around Geo-rep Status and Gluster Volume Info
+ This combines the output from Bricks list and Geo-rep Status.
+ If a Master Brick node is down or Status is faulty then increments
+ the faulty counter. It also collects the checkpoint status from all
+ workers and compares with Number of Bricks.
+ """
+ down_rows = []
+ faulty_rows = []
+ out = []
+
+ status_data = get(mastervol, slave_url)
+
+ for session in status_data:
+ session_name = ""
+ summary = {
+ "active": 0,
+ "passive": 0,
+ "faulty": 0,
+ "initializing": 0,
+ "stopped": 0,
+ "created": 0,
+ "offline": 0,
+ "paused": 0,
+ "workers": 0,
+ "completed_checkpoints": 0,
+ "checkpoint": False,
+ "checkpoints_ok": False,
+ "ok": False
+ }
+
+ for row in session:
+ summary[row["status"].replace("...", "").lower()] += 1
+ summary["workers"] += 1
+ if row["checkpoint_completed"] == "Yes":
+ summary["completed_checkpoints"] += 1
+
+ session_name = "{0}=>{1}".format(
+ row["mastervol"],
+ row["slave"].replace("ssh://", "")
+ )
+
+ if row["status"] == "Faulty":
+ faulty_rows.append("{0}:{1}".format(row["master_node"],
+ row["master_brick"]))
+
+ if row["status"] == "Offline":
+ down_rows.append("{0}:{1}".format(row["master_node"],
+ row["master_brick"]))
+
+ if summary["active"] == summary["completed_checkpoints"] and \
+ summary["faulty"] == 0 and summary["offline"] == 0:
+ summary["checkpoints_ok"] = True
+
+ if summary["faulty"] == 0 and summary["offline"] == 0:
+ summary["ok"] = True
+
+ if session_name != "":
+ out.append([session_name, summary, faulty_rows, down_rows])
+
+ return out
+
+
+def touch_mount_root(mastervol):
+ # Create a Mount and Touch the Mount point root,
+ # Hack to make sure some event available after
+ # setting Checkpoint. Without this there is a chance of
+ # Checkpoint never completes.
+ with glustermount("localhost", mastervol) as mnt:
+ execute(["touch", mnt])
+
+
+def main(args):
+ turns = 1
+
+ # Stop Force
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication", args.mastervol,
+ "%s::%s" % (args.slave, args.slavevol), "stop", "force"]
+ execute(cmd)
+ output_ok("Stopped Geo-replication")
+
+ # Set Checkpoint to NOW
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication", args.mastervol,
+ "%s::%s" % (args.slave, args.slavevol), "config", "checkpoint",
+ "now"]
+ execute(cmd)
+ output_ok("Set Checkpoint")
+
+ # Start the Geo-replication
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication", args.mastervol,
+ "%s::%s" % (args.slave, args.slavevol), "start"]
+ execute(cmd)
+ output_ok("Started Geo-replication and watching Status for "
+ "Checkpoint completion")
+
+ start_time = int(time.time())
+ duration = 0
+
+ # Sleep till Geo-rep initializes
+ time.sleep(60)
+
+ touch_mount_root(args.mastervol)
+
+ slave_url = "{0}::{1}".format(args.slave, args.slavevol)
+
+ # Loop to Check the Geo-replication Status and Checkpoint
+ # If All Status OK and all Checkpoints complete,
+ # Stop the Geo-replication and Log the Completeness
+ while True:
+ session_summary = get_summary(args.mastervol,
+ slave_url)
+ if len(session_summary) == 0:
+ # If Status command fails with another transaction error
+ # or any other error. Gluster cmd still produces XML output
+ # with different message
+ output_warning("Unable to get Geo-replication Status")
+ else:
+ session_name, summary, faulty_rows, down_rows = session_summary[0]
+ chkpt_status = "COMPLETE" if summary["checkpoints_ok"] else \
+ "NOT COMPLETE"
+ ok_status = "OK" if summary["ok"] else "NOT OK"
+
+ if summary["ok"]:
+ output_ok("All Checkpoints {1}, "
+ "All status {2} (Turns {0:>3})".format(
+ turns, chkpt_status, ok_status))
+ else:
+ output_warning("All Checkpoints {1}, "
+ "All status {2} (Turns {0:>3})".format(
+ turns, chkpt_status, ok_status))
+
+ output_warning("Geo-rep workers Faulty/Offline, "
+ "Faulty: {0} Offline: {1}".format(
+ repr(faulty_rows),
+ repr(down_rows)))
+
+ if summary["checkpoints_ok"]:
+ output_ok("Stopping Geo-replication session now")
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication",
+ args.mastervol,
+ "%s::%s" % (args.slave, args.slavevol), "stop"]
+ execute(cmd)
+ break
+ else:
+ # If Checkpoint is not complete after a iteration means brick
+ # was down and came online now. SETATTR on mount is not
+ # recorded, So again issue touch on mount root So that
+ # Stime will increase and Checkpoint will complete.
+ touch_mount_root(args.mastervol)
+
+ # Increment the turns and Sleep for 10 sec
+ turns += 1
+ duration = int(time.time()) - start_time
+ if args.timeout > 0 and duration > (args.timeout * 60):
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication",
+ args.mastervol,
+ "%s::%s" % (args.slave, args.slavevol), "stop", "force"]
+ execute(cmd)
+ output_notok("Timed out, Stopping Geo-replication("
+ "Duration: {0}sec)".format(duration))
+
+ time.sleep(args.interval)
+
+ for mnt in mnt_list:
+ execute(["rmdir", mnt],
+ failure_msg="Unable to Remove temp directory "
+ "{0}".format(mnt), exitcode=0)
+
+if __name__ == "__main__":
+ parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
+ description=__doc__)
+ parser.add_argument("mastervol", help="Master Volume Name")
+ parser.add_argument("slave",
+ help="Slave hostname "
+ "(<username>@SLAVEHOST or SLAVEHOST)",
+ metavar="SLAVE")
+ parser.add_argument("slavevol", help="Slave Volume Name")
+ parser.add_argument("--interval", help="Interval in Seconds. "
+ "Wait time before each status check",
+ type=int, default=10)
+ parser.add_argument("--timeout", help="Timeout in minutes. Script will "
+ "stop Geo-replication if Checkpoint is not complete "
+ "in the specified timeout time", type=int,
+ default=0)
+ parser.add_argument("--no-color", help="Don't use Color in CLI output",
+ action="store_true")
+ args = parser.parse_args()
+ if args.no_color:
+ USE_CLI_COLOR = False
+ try:
+ # Check for session existence
+ cmd = ["@SBIN_DIR@/gluster", "volume", "geo-replication",
+ args.mastervol, "%s::%s" % (args.slave, args.slavevol), "status"]
+ execute(cmd)
+ main(args)
+ except KeyboardInterrupt:
+ for mnt in mnt_list:
+ execute(["umount", "-l", mnt],
+ failure_msg="Unable to Unmount Gluster Volume "
+ "Mounted at {0}".format(mnt), exitcode=0)
+ execute(["rmdir", mnt],
+ failure_msg="Unable to Remove temp directory "
+ "{0}".format(mnt), exitcode=0)
+ output_notok("Exiting...")
diff --git a/extras/geo-rep/slave-upgrade.sh b/extras/geo-rep/slave-upgrade.sh
new file mode 100644
index 00000000000..3a37f8e3579
--- /dev/null
+++ b/extras/geo-rep/slave-upgrade.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+#usage: slave-upgrade.sh <volfile-server:volname> <gfid-file>
+# <path-to-gsync-sync-gfid>
+#<slave-volfile-server>: a machine on which gluster cli can fetch slave volume info.
+# slave-volfile-server defaults to localhost.
+#
+#<gfid-file>: a file containing paths and their associated gfids
+# on master. The paths are relative to master mount point
+# (not absolute). An example extract of <gfid-file> can be,
+#
+# <extract>
+# 22114455-57c5-46e9-a783-c40f83a72b09 /dir
+# 25772386-3eb8-4550-a802-c3fdc938ca80 /dir/file
+# </extract>
+
+function get_bricks()
+{
+ gluster volume info $1 | grep -E 'Brick[0-9]+' | sed -e 's/[^:]*:\(.*\)/\1/g'
+}
+
+function cleanup_brick()
+{
+ HOST=$1
+ BRICK=$2
+
+ # TODO: write a C program to receive a list of files and does cleanup on
+ # them instead of spawning a new setfattr process for each file if
+ # performance is bad.
+ ssh $HOST "rm -rf $BRICK/.glusterfs/* && find $BRICK -exec setfattr -x trusted.gfid {} \; 2>/dev/null"
+}
+
+function cleanup_slave()
+{
+ VOLUME_NAME=`echo $1 | sed -e 's/.*:\(.*\)/\1/'`
+
+ BRICKS=`get_bricks $VOLUME_NAME`
+
+ for i in $BRICKS; do
+ HOST=`echo $i | sed -e 's/\(.*\):.*/\1/'`
+ BRICK=`echo $i | sed -e 's/.*:\(.*\)/\1/'`
+ cleanup_brick $HOST $BRICK
+ done
+
+ # Now restart the volume
+ gluster --mode=script volume stop $VOLUME_NAME;
+ gluster volume start $VOLUME_NAME;
+}
+
+function mount_client()
+{
+ local T; # temporary mount
+ local i; # inode number
+
+ VOLUME_NAME=$2;
+ GFID_FILE=$3
+ SYNC_CMD=$4
+
+ T=$(mktemp -d -t ${0##*/}.XXXXXX);
+
+ glusterfs --aux-gfid-mount -s $1 --volfile-id $VOLUME_NAME $T;
+
+ i=$(stat -c '%i' $T);
+
+ cd $T;
+
+ $SYNC_CMD $GFID_FILE
+
+ cd -;
+
+ umount $T || fatal "could not umount $MASTER from $T";
+
+ rmdir $T || warn "rmdir of $T failed";
+}
+
+function sync_gfids()
+{
+ SLAVE=$1
+ GFID_FILE=$2
+ SYNC_CMD=$3
+
+ SLAVE_VOLFILE_SERVER=`echo $SLAVE | sed -e 's/\(.*\):.*/\1/'`
+ SLAVE_VOLUME_NAME=`echo $SLAVE | sed -e 's/.*:\(.*\)/\1/'`
+
+ if [ "x$SLAVE_VOLFILE_SERVER" = "x" ]; then
+ SLAVE_VOLFILE_SERVER="localhost"
+ fi
+
+ mount_client $SLAVE_VOLFILE_SERVER $SLAVE_VOLUME_NAME $GFID_FILE $SYNC_CMD
+}
+
+function upgrade()
+{
+ SLAVE=$1
+ GFID_FILE=$2
+ SYNC_CMD=$3
+
+ cleanup_slave $SLAVE
+
+ sync_gfids $SLAVE $GFID_FILE $SYNC_CMD
+}
+
+upgrade "$@"