diff options
Diffstat (limited to 'geo-replication/src')
| -rw-r--r-- | geo-replication/src/Makefile.am | 48 | ||||
| -rw-r--r-- | geo-replication/src/gsyncd.c | 402 | ||||
| -rwxr-xr-x | geo-replication/src/gverify.sh | 276 | ||||
| -rw-r--r-- | geo-replication/src/peer_georep-sshkey.py.in | 116 | ||||
| -rwxr-xr-x | geo-replication/src/peer_gsec_create.in | 24 | ||||
| -rw-r--r-- | geo-replication/src/peer_mountbroker.in | 211 | ||||
| -rw-r--r-- | geo-replication/src/peer_mountbroker.py.in | 401 | ||||
| -rw-r--r-- | geo-replication/src/procdiggy.c | 136 | ||||
| -rw-r--r-- | geo-replication/src/procdiggy.h | 21 | ||||
| -rwxr-xr-x | geo-replication/src/set_geo_rep_pem_keys.sh | 58 |
10 files changed, 1693 insertions, 0 deletions
diff --git a/geo-replication/src/Makefile.am b/geo-replication/src/Makefile.am new file mode 100644 index 00000000000..9937a0bd026 --- /dev/null +++ b/geo-replication/src/Makefile.am @@ -0,0 +1,48 @@ +gsyncddir = $(GLUSTERFS_LIBEXECDIR) + +gsyncd_SCRIPTS = gverify.sh peer_gsec_create \ + set_geo_rep_pem_keys.sh peer_mountbroker peer_mountbroker.py \ + peer_georep-sshkey.py + +# peer_gsec_create and peer_add_secret_pub are not added to +# EXTRA_DIST as it's derived from a .in file +EXTRA_DIST = gverify.sh set_geo_rep_pem_keys.sh peer_mountbroker.py.in \ + peer_georep-sshkey.py.in + +gsyncd_PROGRAMS = gsyncd + +gsyncd_SOURCES = gsyncd.c procdiggy.c + +gsyncd_LDADD = $(GF_LDADD) $(top_builddir)/libglusterfs/src/libglusterfs.la + +gsyncd_LDFLAGS = $(GF_LDFLAGS) + +noinst_HEADERS = procdiggy.h + +AM_CPPFLAGS = $(GF_CPPFLAGS) -I$(top_srcdir)/libglusterfs/src \ + -I$(top_srcdir)/rpc/xdr/src -I$(top_builddir)/rpc/xdr/src \ + -DGSYNCD_PREFIX=\"$(GLUSTERFS_LIBEXECDIR)\" -DUSE_LIBGLUSTERFS \ + -DSBIN_DIR=\"$(sbindir)\" -DPYTHON=\"$(PYTHON)\" + +AM_CFLAGS = -Wall $(GF_CFLAGS) + +CLEANFILES = + +$(top_builddir)/libglusterfs/src/libglusterfs.la: + $(MAKE) -C $(top_builddir)/libglusterfs/src/ all + + +install-exec-hook: + $(mkdir_p) $(DESTDIR)$(sbindir) + rm -f $(DESTDIR)$(sbindir)/gluster-mountbroker + ln -s $(GLUSTERFS_LIBEXECDIR)/peer_mountbroker.py \ + $(DESTDIR)$(sbindir)/gluster-mountbroker + + rm -f $(DESTDIR)$(sbindir)/gluster-georep-sshkey + ln -s $(GLUSTERFS_LIBEXECDIR)/peer_georep-sshkey.py \ + $(DESTDIR)$(sbindir)/gluster-georep-sshkey + + +uninstall-hook: + rm -f $(DESTDIR)$(sbindir)/gluster-mountbroker + rm -f $(DESTDIR)$(sbindir)/gluster-georep-sshkey diff --git a/geo-replication/src/gsyncd.c b/geo-replication/src/gsyncd.c new file mode 100644 index 00000000000..b5aeec5bf33 --- /dev/null +++ b/geo-replication/src/gsyncd.c @@ -0,0 +1,402 @@ +/* + Copyright (c) 2011-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ +#include <glusterfs/compat.h> +#include <glusterfs/syscall.h> + +#include <stdlib.h> +#include <stdio.h> +#include <unistd.h> +#include <string.h> +#include <sys/param.h> /* for PATH_MAX */ + +/* NOTE (USE_LIBGLUSTERFS): + * ------------------------ + * When USE_LIBGLUSTERFS debugging sumbol is passed; perform + * glusterfs translator like initialization so that glusterfs + * globals, contexts are valid when glustefs api's are invoked. + * We unconditionally pass then while building gsyncd binary. + */ +#ifdef USE_LIBGLUSTERFS +#include <glusterfs/glusterfs.h> +#include <glusterfs/globals.h> +#include <glusterfs/defaults.h> +#endif + +#include <glusterfs/common-utils.h> +#include <glusterfs/run.h> +#include "procdiggy.h" + +#define _GLUSTERD_CALLED_ "_GLUSTERD_CALLED_" +#define _GSYNCD_DISPATCHED_ "_GSYNCD_DISPATCHED_" +#define GSYNCD_CONF_TEMPLATE "geo-replication/gsyncd_template.conf" +#define GSYNCD_PY "gsyncd.py" +#define RSYNC "rsync" + +int restricted = 0; + +static int +duplexpand(void **buf, size_t tsiz, size_t *len) +{ + size_t osiz = tsiz * *len; + char *p = realloc(*buf, osiz << 1); + if (!p) { + return -1; + } + + memset(p + osiz, 0, osiz); + *buf = p; + *len <<= 1; + + return 0; +} + +static int +str2argv(char *str, char ***argv) +{ + char *p = NULL; + char *savetok = NULL; + char *temp = NULL; + char *temp1 = NULL; + int argc = 0; + size_t argv_len = 32; + int ret = 0; + int i = 0; + + assert(str); + temp = str = strdup(str); + if (!str) + goto error; + + *argv = calloc(argv_len, sizeof(**argv)); + if (!*argv) + goto error; + + while ((p = strtok_r(str, " ", &savetok))) { + str = NULL; + + argc++; + if (argc == argv_len) { + ret = duplexpand((void *)argv, sizeof(**argv), &argv_len); + if (ret == -1) + goto error; + } + temp1 = strdup(p); + if (!temp1) + goto error; + (*argv)[argc - 1] = temp1; + } + + free(temp); + return argc; + +error: + fprintf(stderr, "out of memory\n"); + free(temp); + for (i = 0; i < argc - 1; i++) + free((*argv)[i]); + free(*argv); + return -1; +} + +static int +invoke_gsyncd(int argc, char **argv) +{ + int i = 0; + int j = 0; + char *nargv[argc + 4]; + char *python = NULL; + + if (chdir("/") == -1) + goto error; + + j = 0; + python = getenv("PYTHON"); + if (!python) + python = PYTHON; + nargv[j++] = python; + nargv[j++] = GSYNCD_PREFIX "/python/syncdaemon/" GSYNCD_PY; + for (i = 1; i < argc; i++) + nargv[j++] = argv[i]; + + nargv[j++] = NULL; + + execvp(python, nargv); + + fprintf(stderr, "exec of '%s' failed\n", python); + return 127; + +error: + fprintf(stderr, "gsyncd initializaion failed\n"); + return 1; +} + +static int +find_gsyncd(pid_t pid, pid_t ppid, char *name, void *data) +{ + char buf[NAME_MAX * 2] = { + 0, + }; + char path[PATH_MAX] = { + 0, + }; + char *p = NULL; + int zeros = 0; + int ret = 0; + int fd = -1; + pid_t *pida = (pid_t *)data; + + if (ppid != pida[0]) + return 0; + + snprintf(path, sizeof path, PROC "/%d/cmdline", pid); + fd = open(path, O_RDONLY); + if (fd == -1) + return 0; + ret = sys_read(fd, buf, sizeof(buf)); + sys_close(fd); + if (ret == -1) + return 0; + for (zeros = 0, p = buf; zeros < 2 && p < buf + ret; p++) + zeros += !*p; + + ret = 0; + switch (zeros) { + case 2: + if ((strcmp(basename(buf), basename(PYTHON)) || + strcmp(basename(buf + strlen(buf) + 1), GSYNCD_PY)) == 0) { + ret = 1; + break; + } + /* fallthrough */ + case 1: + if (strcmp(basename(buf), GSYNCD_PY) == 0) + ret = 1; + } + + if (ret == 1) { + if (pida[1] != -1) { + fprintf(stderr, GSYNCD_PY " sibling is not unique"); + return -1; + } + pida[1] = pid; + } + + return 0; +} + +static int +invoke_rsync(int argc, char **argv) +{ + int i = 0; + char path[PATH_MAX] = { + 0, + }; + pid_t pid = -1; + pid_t ppid = -1; + pid_t pida[] = {-1, -1}; + char *name = NULL; + char buf[PATH_MAX + 1] = { + 0, + }; + int ret = 0; + + assert(argv[argc] == NULL); + + if (argc < 2 || strcmp(argv[1], "--server") != 0) + goto error; + + for (i = 2; i < argc && argv[i][0] == '-'; i++) + ; + + if (!(i == argc - 2 && strcmp(argv[i], ".") == 0 && + argv[i + 1][0] == '/')) { + fprintf(stderr, "need an rsync invocation without protected args\n"); + goto error; + } + + /* look up sshd we are spawned from */ + for (pid = getpid();; pid = ppid) { + ppid = pidinfo(pid, &name); + if (ppid < 0) { + fprintf(stderr, "sshd ancestor not found\n"); + goto error; + } + if (strcmp(name, "sshd") == 0) { + GF_FREE(name); + break; + } + GF_FREE(name); + } + /* look up "ssh-sibling" gsyncd */ + pida[0] = pid; + ret = prociter(find_gsyncd, pida); + if (ret == -1 || pida[1] == -1) { + fprintf(stderr, "gsyncd sibling not found\n"); + goto error; + } + /* check if rsync target matches gsyncd target */ + snprintf(path, sizeof path, PROC "/%d/cwd", pida[1]); + ret = sys_readlink(path, buf, sizeof(buf)); + if (ret == -1 || ret == sizeof(buf)) + goto error; + if (strcmp(argv[argc - 1], "/") == 0 /* root dir cannot be a target */ || + (strcmp(argv[argc - 1], path) /* match against gluster target */ && + strcmp(argv[argc - 1], buf) /* match against file target */) != 0) { + fprintf(stderr, "rsync target does not match " GEOREP " session\n"); + goto error; + } + + argv[0] = RSYNC; + + execvp(RSYNC, argv); + + fprintf(stderr, "exec of " RSYNC " failed\n"); + return 127; + +error: + fprintf(stderr, "disallowed " RSYNC " invocation\n"); + return 1; +} + +static int +invoke_gluster(int argc, char **argv) +{ + int i = 0; + int j = 0; + int optsover = 0; + char *ov = NULL; + + for (i = 1; i < argc; i++) { + ov = strtail(argv[i], "--"); + if (ov && !optsover) { + if (*ov == '\0') + optsover = 1; + continue; + } + switch (++j) { + case 1: + if (strcmp(argv[i], "volume") != 0) + goto error; + break; + case 2: + if (strcmp(argv[i], "info") != 0) + goto error; + break; + case 3: + break; + default: + goto error; + } + } + + argv[0] = "gluster"; + execvp(SBIN_DIR "/gluster", argv); + fprintf(stderr, "exec of gluster failed\n"); + return 127; + +error: + fprintf(stderr, "disallowed gluster invocation\n"); + return 1; +} + +struct invocable { + char *name; + int (*invoker)(int argc, char **argv); +}; + +struct invocable invocables[] = {{"rsync", invoke_rsync}, + {"gsyncd", invoke_gsyncd}, + {"gluster", invoke_gluster}, + {NULL, NULL}}; + +int +main(int argc, char **argv) +{ + int ret = -1; + char *evas = NULL; + struct invocable *i = NULL; + char *b = NULL; + char *sargv = NULL; + int j = 0; + +#ifdef USE_LIBGLUSTERFS + glusterfs_ctx_t *ctx = NULL; + + ctx = glusterfs_ctx_new(); + if (!ctx) + return ENOMEM; + + if (glusterfs_globals_init(ctx)) + return 1; + + THIS->ctx = ctx; + ret = default_mem_acct_init(THIS); + if (ret) { + fprintf(stderr, "internal error: mem accounting failed\n"); + return 1; + } +#endif + + evas = getenv(_GLUSTERD_CALLED_); + if (evas && strcmp(evas, "1") == 0) + /* OK, we know glusterd called us, no need to look for further config + *...although this conclusion should not inherit to our children + */ + unsetenv(_GLUSTERD_CALLED_); + else { + /* we regard all gsyncd invocations unsafe + * that do not come from glusterd and + * therefore restrict it + */ + restricted = 1; + + if (!getenv(_GSYNCD_DISPATCHED_)) { + evas = getenv("SSH_ORIGINAL_COMMAND"); + if (evas) + sargv = evas; + else { + evas = getenv("SHELL"); + if (evas && strcmp(basename(evas), "gsyncd") == 0 && + argc == 3 && strcmp(argv[1], "-c") == 0) + sargv = argv[2]; + } + } + } + + if (!(sargv && restricted)) + return invoke_gsyncd(argc, argv); + + argc = str2argv(sargv, &argv); + + if (argc == -1) { + fprintf(stderr, "internal error\n"); + return 1; + } + + if (setenv(_GSYNCD_DISPATCHED_, "1", 1) == -1) { + fprintf(stderr, "internal error\n"); + goto out; + } + + b = basename(argv[0]); + for (i = invocables; i->name; i++) { + if (strcmp(b, i->name) == 0) + return i->invoker(argc, argv); + } + + fprintf(stderr, "invoking %s in restricted SSH session is not allowed\n", + b); + +out: + for (j = 1; j < argc; j++) + free(argv[j]); + free(argv); + return 1; +} diff --git a/geo-replication/src/gverify.sh b/geo-replication/src/gverify.sh new file mode 100755 index 00000000000..f5f70d245e0 --- /dev/null +++ b/geo-replication/src/gverify.sh @@ -0,0 +1,276 @@ +#!/bin/bash + +# Script to verify the Master and Slave Gluster compatibility. +# To use ./gverify <master volume> <slave user> <slave host> <slave volume> <ssh port> <log file> +# Returns 0 if master and slave compatible. + +# Considering buffer_size 100MB +BUFFER_SIZE=104857600; +SSH_PORT=$5; +master_log_file=`gluster --print-logdir`/geo-replication/gverify-mastermnt.log +slave_log_file=`gluster --print-logdir`/geo-replication/gverify-slavemnt.log + +function SSHM() +{ + if [[ -z "${GR_SSH_IDENTITY_KEY}" ]]; then + ssh -p ${SSH_PORT} -q \ + -oPasswordAuthentication=no \ + -oStrictHostKeyChecking=no \ + -oControlMaster=yes \ + "$@"; + else + ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} -q \ + -oPasswordAuthentication=no \ + -oStrictHostKeyChecking=no \ + -oControlMaster=yes \ + "$@"; + fi +} + +function get_inode_num() +{ + local os + case `uname -s` in + NetBSD) os="NetBSD";; + Linux) os="Linux";; + *) os="Default";; + esac + + if [[ "X$os" = "XNetBSD" ]]; then + echo $(stat -f "%i" "$1") + else + echo $(stat -c "%i" "$1") + fi +} + +function umount_lazy() +{ + local os + case `uname -s` in + NetBSD) os="NetBSD";; + Linux) os="Linux";; + *) os="Default";; + esac + + if [[ "X$os" = "XNetBSD" ]]; then + umount -f -R "$1" + else + umount -l "$1" + fi; +} + +function disk_usage() +{ + local os + case `uname -s` in + NetBSD) os="NetBSD";; + Linux) os="Linux";; + *) os="Default";; + esac + + if [[ "X$os" = "XNetBSD" ]]; then + echo $(df -P "$1" | tail -1) + else + echo $(df -P -B1 "$1" | tail -1) + fi; + +} + +function cmd_slave() +{ + local cmd_line; + cmd_line=$(cat <<EOF +function do_verify() { +ver=\$(gluster --version | head -1 | cut -f2 -d " "); +echo \$ver; +}; +source /etc/profile && do_verify; +EOF +); + +echo $cmd_line; +} + +function master_stats() +{ + MASTERVOL=$1; + local inet6=$2; + local d; + local i; + local disk_size; + local used_size; + local ver; + local m_status; + + d=$(mktemp -d -t ${0##*/}.XXXXXX 2>/dev/null); + if [ "$inet6" = "inet6" ]; then + glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --xlator-option="transport.address-family=inet6" --volfile-id $MASTERVOL -l $master_log_file $d; + else + glusterfs -s localhost --xlator-option="*dht.lookup-unhashed=off" --volfile-id $MASTERVOL -l $master_log_file $d; + fi + + i=$(get_inode_num $d); + if [[ "$i" -ne "1" ]]; then + echo 0:0; + exit 1; + fi; + cd $d; + disk_size=$(disk_usage $d | awk "{print \$2}"); + used_size=$(disk_usage $d | awk "{print \$3}"); + umount_lazy $d; + rmdir $d; + ver=$(gluster --version | head -1 | cut -f2 -d " "); + m_status=$(echo "$disk_size:$used_size:$ver"); + echo $m_status +} + + +function slave_stats() +{ + SLAVEUSER=$1; + SLAVEHOST=$2; + SLAVEVOL=$3; + local inet6=$4; + local cmd_line; + local ver; + local status; + + d=$(mktemp -d -t ${0##*/}.XXXXXX 2>/dev/null); + if [ "$inet6" = "inet6" ]; then + glusterfs --xlator-option="*dht.lookup-unhashed=off" --xlator-option="transport.address-family=inet6" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file $d; + else + glusterfs --xlator-option="*dht.lookup-unhashed=off" --volfile-server $SLAVEHOST --volfile-id $SLAVEVOL -l $slave_log_file $d; + fi + + i=$(get_inode_num $d); + if [[ "$i" -ne "1" ]]; then + echo 0:0; + exit 1; + fi; + cd $d; + disk_size=$(disk_usage $d | awk "{print \$2}"); + used_size=$(disk_usage $d | awk "{print \$3}"); + no_of_files=$(find $d -maxdepth 1 -path "$d/.trashcan" -prune -o -path "$d" -o -print0 -quit); + umount_lazy $d; + rmdir $d; + + cmd_line=$(cmd_slave); + ver=`SSHM $SLAVEUSER@$SLAVEHOST bash -c "'$cmd_line'"`; + status=$disk_size:$used_size:$ver:$no_of_files; + echo $status +} + +function ping_host () +{ + ### Use bash internal socket support + { + exec 100<>/dev/tcp/$1/$2 + if [ $? -ne '0' ]; then + return 1; + else + exec 100>&- + return 0; + fi + } 1>&2 2>/dev/null +} + +function main() +{ + log_file=$6 + > $log_file + + inet6=$7 + local cmd_line + local ver + + # Use FORCE_BLOCKER flag in the error message to differentiate + # between the errors which the force command should bypass + + # Test tcp connection to port 22, this is necessary since `ping` + # does not work on all environments where 'ssh' is allowed but + # ICMP is filterd + + ping_host $3 ${SSH_PORT} + + if [ $? -ne 0 ]; then + echo "FORCE_BLOCKER|$3 not reachable." > $log_file + exit 1; + fi; + + if [[ -z "${GR_SSH_IDENTITY_KEY}" ]]; then + ssh -p ${SSH_PORT} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 "echo Testing_Passwordless_SSH"; + else + ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 "echo Testing_Passwordless_SSH"; + fi + + if [ $? -ne 0 ]; then + echo "FORCE_BLOCKER|Passwordless ssh login has not been setup with $3 for user $2." > $log_file + exit 1; + fi; + + cmd_line=$(cmd_slave); + if [[ -z "${GR_SSH_IDENTITY_KEY}" ]]; then + ver=$(ssh -p ${SSH_PORT} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 bash -c "'$cmd_line'") + else + ver=$(ssh -p ${SSH_PORT} -i ${GR_SSH_IDENTITY_KEY} -oNumberOfPasswordPrompts=0 -oStrictHostKeyChecking=no $2@$3 bash -c "'$cmd_line'") + fi + + if [ -z "$ver" ]; then + echo "FORCE_BLOCKER|gluster command not found on $3 for user $2." > $log_file + exit 1; + fi; + + ERRORS=0; + master_data=$(master_stats $1 ${inet6}); + slave_data=$(slave_stats $2 $3 $4 ${inet6}); + master_disk_size=$(echo $master_data | cut -f1 -d':'); + slave_disk_size=$(echo $slave_data | cut -f1 -d':'); + master_used_size=$(echo $master_data | cut -f2 -d':'); + slave_used_size=$(echo $slave_data | cut -f2 -d':'); + master_version=$(echo $master_data | cut -f3 -d':'); + slave_version=$(echo $slave_data | cut -f3 -d':'); + slave_no_of_files=$(echo $slave_data | cut -f4 -d':'); + + if [[ "x$master_disk_size" = "x" || "x$master_version" = "x" || "$master_disk_size" -eq "0" ]]; then + echo "FORCE_BLOCKER|Unable to mount and fetch master volume details. Please check the log: $master_log_file" > $log_file; + exit 1; + fi; + + if [[ "x$slave_disk_size" = "x" || "x$slave_version" = "x" || "$slave_disk_size" -eq "0" ]]; then + echo "FORCE_BLOCKER|Unable to mount and fetch slave volume details. Please check the log: $slave_log_file" > $log_file; + exit 1; + fi; + + # The above checks are mandatory and force command should be blocked + # if they fail. The checks below can be bypassed if force option is + # provided hence no FORCE_BLOCKER flag. + + if [ "$slave_disk_size" -lt "$master_disk_size" ]; then + echo "Total disk size of master is greater than disk size of slave." >> $log_file; + ERRORS=$(($ERRORS + 1)); + fi + + effective_master_used_size=$(( $master_used_size + $BUFFER_SIZE )) + slave_available_size=$(( $slave_disk_size - $slave_used_size )) + master_available_size=$(( $master_disk_size - $effective_master_used_size )); + + if [ "$slave_available_size" -lt "$master_available_size" ]; then + echo "Total available size of master is greater than available size of slave" >> $log_file; + ERRORS=$(($ERRORS + 1)); + fi + + if [ ! -z $slave_no_of_files ]; then + echo "$3::$4 is not empty. Please delete existing files in $3::$4 and retry, or use force to continue without deleting the existing files." >> $log_file; + ERRORS=$(($ERRORS + 1)); + fi; + + if [[ $master_version != $slave_version ]]; then + echo "Gluster version mismatch between master and slave. Master version: $master_version Slave version: $slave_version" >> $log_file; + ERRORS=$(($ERRORS + 1)); + fi; + + exit $ERRORS; +} + + +main "$@"; diff --git a/geo-replication/src/peer_georep-sshkey.py.in b/geo-replication/src/peer_georep-sshkey.py.in new file mode 100644 index 00000000000..58696e9a616 --- /dev/null +++ b/geo-replication/src/peer_georep-sshkey.py.in @@ -0,0 +1,116 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- +# +# Copyright (c) 2016 Red Hat, Inc. <http://www.redhat.com> +# This file is part of GlusterFS. +# +# This file is licensed to you under your choice of the GNU Lesser +# General Public License, version 3 or any later version (LGPLv3 or +# later), or the GNU General Public License, version 2 (GPLv2), in all +# cases as published by the Free Software Foundation. +# +""" +Usage: + gluster-georep-sshkey generate + or + gluster-georep-sshkey generate --no-prefix + +Generates two SSH keys(one for gsyncd access and other for tar) in all +peer nodes and collects the public keys to the local node where it is +initiated. Adds `command=` prefix to common_secret.pem.pub if `--no-prefix` +argument is not passed. +""" +import os +import glob + +from gluster.cliutils import (node_output_ok, execute, execute_in_peers, + Cmd, runcli) +from prettytable import PrettyTable + + +SECRET_PEM = "@GLUSTERD_WORKDIR@/geo-replication/secret.pem" +TAR_SSH_PEM = "@GLUSTERD_WORKDIR@/geo-replication/tar_ssh.pem" +GSYNCD_CMD = 'command="@GLUSTERFS_LIBEXECDIR@/gsyncd" ' +TAR_CMD = 'command="tar ${SSH_ORIGINAL_COMMAND#* }" ' +COMMON_SECRET_FILE = "@GLUSTERD_WORKDIR@/geo-replication/common_secret.pem.pub" + + +class NodeGenCmd(Cmd): + name = "node-generate" + + def args(self, parser): + parser.add_argument("no_prefix") + + def run(self, args): + # Regenerate if secret.pem.pub not exists + if not os.path.exists(SECRET_PEM + ".pub"): + # Cleanup old files + for f in glob.glob(SECRET_PEM + "*"): + os.remove(f) + + execute(["ssh-keygen", "-N", "", "-f", SECRET_PEM]) + + # Regenerate if ssh_tar.pem.pub not exists + if not os.path.exists(TAR_SSH_PEM + ".pub"): + # Cleanup old files + for f in glob.glob(TAR_SSH_PEM + "*"): + os.remove(f) + + execute(["ssh-keygen", "-N", "", "-f", TAR_SSH_PEM]) + + # Add required prefixes if prefix is not "container" + prefix_secret_pem_pub = "" + prefix_tar_ssh_pem_pub = "" + if args.no_prefix != "no-prefix": + prefix_secret_pem_pub = GSYNCD_CMD + prefix_tar_ssh_pem_pub = TAR_CMD + + data = {"default_pub": "", "tar_pub": ""} + with open(SECRET_PEM + ".pub") as f: + data["default_pub"] = prefix_secret_pem_pub + f.read().strip() + + with open(TAR_SSH_PEM + ".pub") as f: + data["tar_pub"] = prefix_tar_ssh_pem_pub + f.read().strip() + + node_output_ok(data) + + +def color_status(value): + if value in ["UP", "OK"]: + return "green" + return "red" + + +class GenCmd(Cmd): + name = "generate" + + def args(self, parser): + parser.add_argument("--no-prefix", help="Do not use prefix in " + "generated pub keys", action="store_true") + + def run(self, args): + prefix = "no-prefix" if args.no_prefix else "." + out = execute_in_peers("node-generate", [prefix]) + + common_secrets = [] + table = PrettyTable(["NODE", "NODE STATUS", "KEYGEN STATUS"]) + table.align["NODE STATUS"] = "r" + table.align["KEYGEN STATUS"] = "r" + for p in out: + if p.ok: + common_secrets.append(p.output["default_pub"]) + common_secrets.append(p.output["tar_pub"]) + + table.add_row([p.hostname, + "UP" if p.node_up else "DOWN", + "OK" if p.ok else "NOT OK: {0}".format( + p.error)]) + + with open(COMMON_SECRET_FILE, "w") as f: + f.write("\n".join(common_secrets) + "\n") + + print (table) + + +if __name__ == "__main__": + runcli() diff --git a/geo-replication/src/peer_gsec_create.in b/geo-replication/src/peer_gsec_create.in new file mode 100755 index 00000000000..6d4a4847013 --- /dev/null +++ b/geo-replication/src/peer_gsec_create.in @@ -0,0 +1,24 @@ +#!/bin/bash + +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libexecdir=@libexecdir@ + +if [ ! -f "$GLUSTERD_WORKDIR"/geo-replication/secret.pem.pub ]; then + \rm -rf "$GLUSTERD_WORKDIR"/geo-replication/secret.pem* + ssh-keygen -N '' -f "$GLUSTERD_WORKDIR"/geo-replication/secret.pem > /dev/null +fi + +if [ ! -f "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem.pub ]; then + \rm -rf "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem* + ssh-keygen -N '' -f "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem > /dev/null +fi + +if [ "Xcontainer" = "X$1" ]; then + output1=`cat "$GLUSTERD_WORKDIR"/geo-replication/secret.pem.pub` + output2=`cat "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem.pub` +else + output1=`echo command=\"${libexecdir}/glusterfs/gsyncd\" ""``cat "$GLUSTERD_WORKDIR"/geo-replication/secret.pem.pub` + output2=`echo command=\"tar \$\{SSH_ORIGINAL_COMMAND#* \}\" ""``cat "$GLUSTERD_WORKDIR"/geo-replication/tar_ssh.pem.pub` +fi +echo -e "$output1\n$output2" diff --git a/geo-replication/src/peer_mountbroker.in b/geo-replication/src/peer_mountbroker.in new file mode 100644 index 00000000000..8ecf38ded41 --- /dev/null +++ b/geo-replication/src/peer_mountbroker.in @@ -0,0 +1,211 @@ +#!/usr/bin/python3 + +from __future__ import print_function + +import os +from argparse import ArgumentParser, RawDescriptionHelpFormatter +import json +import sys + +PROG_DESCRIPTION = """ +GlusterFS Mountbroker user management +""" + +args = None + + +def ok(message=""): + if (not args and "-j" in sys.argv) or (args and args.json): + print(json.dumps({"ok": True, "message": message})) + else: + if message: + print(message) + + sys.exit(0) + + +def notok(message=""): + if (not args and "-j" in sys.argv) or (args and args.json): + print(json.dumps({"ok": False, "message": message})) + else: + print("error: %s" % message) + + # Always return zero due to limitation while executing + # as `gluster system:: execute` + sys.exit(0) + + +class NoStdErrParser(ArgumentParser): + """ + with gluster system:: execute, stderr gives + "Unable to end. Error : Bad file descriptor" error, + so deriving new class, prints error message and + exits with zero. + """ + def error(self, message): + notok(message) + + +class MountbrokerUserMgmt(object): + def __init__(self, volfile): + self.volfile = volfile + self._options = {} + self.commented_lines = [] + self._parse() + + def _parse(self): + with open(self.volfile, "r") as f: + for line in f: + line = line.strip() + if line.startswith("option "): + key, value = line.split(" ")[1:] + self._options[key] = value + if line.startswith("#"): + self.commented_lines.append(line) + + def _get_write_data(self): + op = "volume management\n" + op += " type mgmt/glusterd\n" + for k, v in self._options.items(): + op += " option %s %s\n" % (k, v) + for line in self.commented_lines: + op += " %s\n" % line + op += "end-volume" + return op + + def save(self): + with open(self.volfile + "_tmp", "w") as f: + f.write(self._get_write_data()) + f.flush() + os.fsync(f.fileno()) + os.rename(self.volfile + "_tmp", self.volfile) + + def set_opt(self, key, value): + self._options[key] = value.strip() + + def remove_opt(self, key): + if key in self._options: + del(self._options[key]) + + def add_user(self, user, volumes): + vols = set() + for k, v in self._options.items(): + if k.startswith("mountbroker-geo-replication.") \ + and user == k.split(".")[-1]: + vols.update(v.split(",")) + + vols.update(volumes) + self.set_opt("mountbroker-geo-replication.%s" % user, + ",".join(vols)) + + def remove_volume(self, user, volumes): + vols = set() + for k, v in self._options.items(): + if k.startswith("mountbroker-geo-replication.") \ + and user == k.split(".")[-1]: + vols.update(v.split(",")) + + for v1 in volumes: + vols.discard(v1) + + if vols: + self.set_opt("mountbroker-geo-replication.%s" % user, + ",".join(vols)) + else: + self.remove_opt("mountbroker-geo-replication.%s" % user) + + def remove_user(self, user): + self.remove_opt("mountbroker-geo-replication.%s" % user) + + def info(self): + data = {"users": []} + + for k, v in self._options.items(): + if k.startswith("mountbroker-geo-replication."): + data["users"].append( + {"name": k.split(".")[-1], "volumes": v.split(",")} + ) + else: + data[k] = v + + return data + + +def format_info(data): + op = "%s %s\n" % ("Option".ljust(50), "Value".ljust(50)) + op += ("-" * 101) + "\n" + for key, value in data.items(): + if key != "users": + op += "%s %s\n" % (key.ljust(50), value) + + op += "\nUsers: %s\n" % ("None" if not data["users"] else "") + for user in data["users"]: + op += "%s: %s\n" % (user["name"], ", ".join(user["volumes"])) + op += "\n\n" + return op + + +def _get_args(): + parser = NoStdErrParser(formatter_class=RawDescriptionHelpFormatter, + description=PROG_DESCRIPTION) + + parser.add_argument('-j', dest="json", help="JSON output", + action="store_true") + subparsers = parser.add_subparsers(title='subcommands', dest='cmd') + parser_useradd = subparsers.add_parser('user') + parser_userdel = subparsers.add_parser('userdel') + parser_volumedel = subparsers.add_parser('volumedel') + subparsers.add_parser('info') + parser_opt = subparsers.add_parser('opt') + parser_optdel = subparsers.add_parser('optdel') + + parser_useradd.add_argument('username', help="Username", type=str) + parser_useradd.add_argument('volumes', type=str, default='', + help="Volumes list. ',' separated") + + parser_volumedel.add_argument('username', help="Username", type=str) + parser_volumedel.add_argument('volumes', type=str, default='', + help="Volumes list. ',' separated") + + parser_userdel.add_argument('username', help="Username", type=str) + + parser_opt.add_argument('opt_name', help="Name", type=str) + parser_opt.add_argument('opt_value', help="Value", type=str) + + parser_optdel.add_argument('opt_name', help="Name", type=str) + + return parser.parse_args() + + +def main(): + global args + args = _get_args() + + m = MountbrokerUserMgmt("@GLUSTERD_VOLFILE@") + + if args.cmd == "opt": + m.set_opt(args.opt_name, args.opt_value) + elif args.cmd == "optdel": + m.remove_opt(args.opt_name) + elif args.cmd == "userdel": + m.remove_user(args.username) + elif args.cmd == "user": + volumes = [v.strip() for v in args.volumes.split(",") + if v.strip() != ""] + m.add_user(args.username, volumes) + elif args.cmd == "volumedel": + volumes = [v.strip() for v in args.volumes.split(",") + if v.strip() != ""] + m.remove_volume(args.username, volumes) + elif args.cmd == "info": + info = m.info() + if not args.json: + info = format_info(info) + ok(info) + + if args.cmd != "info": + m.save() + ok() + +if __name__ == "__main__": + main() diff --git a/geo-replication/src/peer_mountbroker.py.in b/geo-replication/src/peer_mountbroker.py.in new file mode 100644 index 00000000000..40b90ffc560 --- /dev/null +++ b/geo-replication/src/peer_mountbroker.py.in @@ -0,0 +1,401 @@ +#!/usr/bin/python3 + +from __future__ import print_function + +import os +from errno import EEXIST, ENOENT + +from gluster.cliutils import (execute, Cmd, node_output_ok, + node_output_notok, execute_in_peers, + runcli, oknotok) +from prettytable import PrettyTable + +LOG_DIR = "@localstatedir@/log/glusterfs/geo-replication-slaves" +CLI_LOG = "@localstatedir@/log/glusterfs/cli.log" +GEOREP_DIR = "@GLUSTERD_WORKDIR@/geo-replication" +GLUSTERD_VOLFILE = "@GLUSTERD_VOLFILE@" + + +class MountbrokerUserMgmt(object): + def __init__(self, volfile): + self.volfile = volfile + self._options = {} + self.commented_lines = [] + self.user_volumes = {} + self._parse() + + def _parse(self): + """ Example glusterd.vol + volume management + type mgmt/glusterd + option working-directory /var/lib/glusterd + option transport-type socket,rdma + option transport.socket.keepalive-time 10 + option transport.socket.keepalive-interval 2 + option transport.socket.read-fail-log off + option rpc-auth-allow-insecure on + option ping-timeout 0 + option event-threads 1 + # option base-port 49152 + option mountbroker-root /var/mountbroker-root + option mountbroker-geo-replication.user1 vol1,vol2,vol3 + option geo-replication-log-group geogroup + option rpc-auth-allow-insecure on + end-volume + """ + with open(self.volfile, "r") as f: + for line in f: + line = line.strip() + if line.startswith("option "): + key, value = line.split()[1:] + self._options[key] = value + if line.startswith("#"): + self.commented_lines.append(line) + + for k, v in self._options.items(): + if k.startswith("mountbroker-geo-replication."): + user = k.split(".")[-1] + self.user_volumes[user] = set(v.split(",")) + + def get_group(self): + return self._options.get("geo-replication-log-group", None) + + def _get_write_data(self): + op = "volume management\n" + op += " type mgmt/glusterd\n" + for k, v in self._options.items(): + if k.startswith("mountbroker-geo-replication."): + # Users will be added seperately + continue + + op += " option %s %s\n" % (k, v) + + for k, v in self.user_volumes.items(): + if v: + op += (" option mountbroker-geo-replication." + "%s %s\n" % (k, ",".join(v))) + + for line in self.commented_lines: + op += " %s\n" % line + + op += "end-volume" + return op + + def save(self): + with open(self.volfile + "_tmp", "w") as f: + f.write(self._get_write_data()) + f.flush() + os.fsync(f.fileno()) + os.rename(self.volfile + "_tmp", self.volfile) + + def set_mount_root_and_group(self, mnt_root, group): + self._options["mountbroker-root"] = mnt_root + self._options["geo-replication-log-group"] = group + + def add(self, volume, user): + user_volumes = self.user_volumes.get(user, None) + + if user_volumes is not None and volume in user_volumes: + # User and Volume already exists + return + + if user_volumes is None: + # User not exists + self.user_volumes[user] = set() + + self.user_volumes[user].add(volume) + + def remove(self, volume=None, user=None): + if user is not None: + if volume is None: + self.user_volumes[user] = set() + else: + try: + self.user_volumes.get(user, set()).remove(volume) + except KeyError: + pass + else: + if volume is None: + return + + for k, v in self.user_volumes.items(): + try: + self.user_volumes[k].remove(volume) + except KeyError: + pass + + def info(self): + # Convert Volumes set into Volumes list + users = {} + for k, v in self.user_volumes.items(): + users[k] = list(v) + + data = { + "mountbroker-root": self._options.get("mountbroker-root", "None"), + "geo-replication-log-group": self._options.get( + "geo-replication-log-group", ""), + "users": users + } + + return data + + +class NodeSetup(Cmd): + # Test if group exists using `getent group <grp>` + # and then group add using `groupadd <grp>` + # chgrp -R <grp> /var/log/glusterfs/geo-replication-slaves + # chgrp -R <grp> /var/lib/glusterd/geo-replication + # chmod -R 770 /var/log/glusterfs/geo-replication-slaves + # chmod 770 /var/lib/glusterd/geo-replication + # mkdir -p <mnt_root> + # chmod 0711 <mnt_root> + # If selinux, + # semanage fcontext -a -e /home /var/mountbroker-root + # restorecon -Rv /var/mountbroker-root + name = "node-setup" + + def args(self, parser): + parser.add_argument("mount_root") + parser.add_argument("group") + + def run(self, args): + m = MountbrokerUserMgmt(GLUSTERD_VOLFILE) + + try: + os.makedirs(args.mount_root) + except OSError as e: + if e.errno == EEXIST: + pass + else: + node_output_notok("Unable to Create {0}".format( + args.mount_root)) + + execute(["chmod", "0711", args.mount_root]) + try: + execute(["semanage", "fcontext", "-a", "-e", + "/home", args.mount_root]) + except OSError as e: + if e.errno == ENOENT: + pass + else: + node_output_notok( + "Unable to run semanage: {0}".format(e)) + + try: + execute(["restorecon", "-Rv", args.mount_root]) + except OSError as e: + if e.errno == ENOENT: + pass + else: + node_output_notok( + "Unable to run restorecon: {0}".format(e)) + + rc, out, err = execute(["getent", "group", args.group]) + if rc != 0: + node_output_notok("User Group not exists") + + execute(["chgrp", "-R", args.group, GEOREP_DIR]) + execute(["chgrp", "-R", args.group, LOG_DIR]) + execute(["chgrp", args.group, CLI_LOG]) + execute(["chmod", "770", GEOREP_DIR]) + execute(["find", LOG_DIR, "-type", "d", "-exec", "chmod", "770", "{}", + "+"]) + execute(["find", LOG_DIR, "-type", "f", "-exec", "chmod", "660", "{}", + "+"]) + execute(["chmod", "660", CLI_LOG]) + + m.set_mount_root_and_group(args.mount_root, args.group) + m.save() + + node_output_ok() + + +def color_status(value): + if value.lower() in ("up", "ok", "yes"): + return "green" + else: + return "red" + + +class CliSetup(Cmd): + # gluster-mountbroker setup <MOUNT_ROOT> <GROUP> + name = "setup" + + def args(self, parser): + parser.add_argument("mount_root", + help="Path to the mountbroker-root directory.") + parser.add_argument("group", + help="Group to be used for setup.") + + def run(self, args): + out = execute_in_peers("node-setup", [args.mount_root, + args.group]) + table = PrettyTable(["NODE", "NODE STATUS", "SETUP STATUS"]) + table.align["NODE STATUS"] = "r" + table.align["SETUP STATUS"] = "r" + for p in out: + table.add_row([p.hostname, + "UP" if p.node_up else "DOWN", + "OK" if p.ok else "NOT OK: {0}".format( + p.error)]) + + print(table) + + +class NodeStatus(Cmd): + # Check if Group exists + # Check if user exists + # Check directory permission /var/log/glusterfs/geo-replication-slaves + # and /var/lib/glusterd/geo-replication + # Check mount root and its permissions + # Check glusterd.vol file for user, group, dir existance + name = "node-status" + + def run(self, args): + m = MountbrokerUserMgmt(GLUSTERD_VOLFILE) + data = m.info() + data["group_exists"] = False + data["path_exists"] = False + + rc, out, err = execute(["getent", "group", + data["geo-replication-log-group"]]) + + if rc == 0: + data["group_exists"] = True + + if os.path.exists(data["mountbroker-root"]): + data["path_exists"] = True + + node_output_ok(data) + + +class CliStatus(Cmd): + # gluster-mountbroker status + name = "status" + + def run(self, args): + out = execute_in_peers("node-status") + table = PrettyTable(["NODE", "NODE STATUS", "MOUNT ROOT", + "GROUP", "USERS"]) + table.align["NODE STATUS"] = "r" + + for p in out: + node_data = p.output + if node_data == "" or node_data == "N/A": + node_data = {} + + users_row_data = "" + for k, v in node_data.get("users", {}).items(): + users_row_data += "{0}({1}) ".format(k, ", ".join(v)) + + if not users_row_data: + users_row_data = "None" + + mount_root = node_data.get("mountbroker-root", "None") + if mount_root != "None": + mount_root += "({0})".format(oknotok( + node_data.get("path_exists", False))) + + grp = node_data.get("geo-replication-log-group", "None") + if grp != "None": + grp += "({0})".format(oknotok( + node_data.get("group_exists", False))) + + table.add_row([p.hostname, + "UP" if p.node_up else "DOWN", + mount_root, + grp, + users_row_data]) + + print(table) + + +class NodeAdd(Cmd): + # useradd -m -g <grp> <usr> + # useradd to glusterd.vol + name = "node-add" + + def args(self, parser): + parser.add_argument("volume") + parser.add_argument("user") + + def run(self, args): + m = MountbrokerUserMgmt(GLUSTERD_VOLFILE) + grp = m.get_group() + if grp is None: + node_output_notok("Group is not available") + + m.add(args.volume, args.user) + m.save() + node_output_ok() + + +class CliAdd(Cmd): + # gluster-mountbroker add <VOLUME> <USER> + name = "add" + + def args(self, parser): + parser.add_argument("volume", + help="Volume to be added.") + parser.add_argument("user", + help="User for which volume is to be added.") + + def run(self, args): + out = execute_in_peers("node-add", [args.volume, + args.user]) + table = PrettyTable(["NODE", "NODE STATUS", "ADD STATUS"]) + table.align["NODE STATUS"] = "r" + table.align["ADD STATUS"] = "r" + + for p in out: + table.add_row([p.hostname, + "UP" if p.node_up else "DOWN", + "OK" if p.ok else "NOT OK: {0}".format( + p.error)]) + + print(table) + + +class NodeRemove(Cmd): + # userremove from glusterd.vol file + name = "node-remove" + + def args(self, parser): + parser.add_argument("volume") + parser.add_argument("user") + + def run(self, args): + m = MountbrokerUserMgmt(GLUSTERD_VOLFILE) + volume = None if args.volume == "." else args.volume + user = None if args.user == "." else args.user + m.remove(volume=volume, user=user) + m.save() + node_output_ok() + + +class CliRemove(Cmd): + # gluster-mountbroker remove --volume <VOLUME> --user <USER> + name = "remove" + + def args(self, parser): + parser.add_argument("--volume", default=".", help="Volume to be removed.") + parser.add_argument("--user", default=".", + help="User for which volume has to be removed.") + + def run(self, args): + out = execute_in_peers("node-remove", [args.volume, + args.user]) + table = PrettyTable(["NODE", "NODE STATUS", "REMOVE STATUS"]) + table.align["NODE STATUS"] = "r" + table.align["REMOVE STATUS"] = "r" + + for p in out: + table.add_row([p.hostname, + "UP" if p.node_up else "DOWN", + "OK" if p.ok else "NOT OK: {0}".format( + p.error)]) + + print(table) + +if __name__ == "__main__": + runcli() diff --git a/geo-replication/src/procdiggy.c b/geo-replication/src/procdiggy.c new file mode 100644 index 00000000000..8068ef79a42 --- /dev/null +++ b/geo-replication/src/procdiggy.c @@ -0,0 +1,136 @@ +/* + Copyright (c) 2011-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ + +#include <stdlib.h> +#include <stdio.h> +#include <unistd.h> +#include <string.h> +#include <ctype.h> +#include <sys/param.h> /* for PATH_MAX */ + +#include <glusterfs/common-utils.h> +#include <glusterfs/syscall.h> +#include "procdiggy.h" + +pid_t +pidinfo(pid_t pid, char **name) +{ + char buf[NAME_MAX * 2] = { + 0, + }; + FILE *f = NULL; + char path[PATH_MAX] = { + 0, + }; + char *p = NULL; + int ret = 0; + pid_t lpid = -1; + + if (name) + *name = NULL; + + snprintf(path, sizeof path, PROC "/%d/status", pid); + + f = fopen(path, "r"); + if (!f) + return -1; + + for (;;) { + size_t len; + memset(buf, 0, sizeof(buf)); + if (fgets(buf, sizeof(buf), f) == NULL || (len = strlen(buf)) == 0 || + buf[len - 1] != '\n') { + lpid = -1; + goto out; + } + buf[len - 1] = '\0'; + + if (name && !*name) { + p = strtail(buf, "Name:"); + if (p) { + while (isspace(*++p)) + ; + *name = gf_strdup(p); + if (!*name) { + lpid = -2; + goto out; + } + continue; + } + } + + p = strtail(buf, "PPid:"); + if (p) + break; + } + + while (isspace(*++p)) + ; + ret = gf_string2int(p, &lpid); + if (ret == -1) + lpid = -1; + +out: + fclose(f); + if (lpid == -1 && name && *name) + GF_FREE(*name); + if (lpid == -2) + fprintf(stderr, "out of memory\n"); + return lpid; +} + +int +prociter(int (*proch)(pid_t pid, pid_t ppid, char *tmpname, void *data), + void *data) +{ + char *name = NULL; + DIR *d = NULL; + struct dirent *de = NULL; + struct dirent scratch[2] = { + { + 0, + }, + }; + pid_t pid = -1; + pid_t ppid = -1; + int ret = 0; + + d = sys_opendir(PROC); + if (!d) + return -1; + + for (;;) { + errno = 0; + de = sys_readdir(d, scratch); + if (!de || errno != 0) + break; + + if (gf_string2int(de->d_name, &pid) != -1 && pid >= 0) { + ppid = pidinfo(pid, &name); + switch (ppid) { + case -1: + continue; + case -2: + break; + } + ret = proch(pid, ppid, name, data); + GF_FREE(name); + if (ret) + break; + } + } + sys_closedir(d); + if (!de && errno) { + fprintf(stderr, "failed to traverse " PROC " (%s)\n", strerror(errno)); + ret = -1; + } + + return ret; +} diff --git a/geo-replication/src/procdiggy.h b/geo-replication/src/procdiggy.h new file mode 100644 index 00000000000..e17ccd31c89 --- /dev/null +++ b/geo-replication/src/procdiggy.h @@ -0,0 +1,21 @@ +/* + Copyright (c) 2011-2012 Red Hat, Inc. <http://www.redhat.com> + This file is part of GlusterFS. + + This file is licensed to you under your choice of the GNU Lesser + General Public License, version 3 or any later version (LGPLv3 or + later), or the GNU General Public License, version 2 (GPLv2), in all + cases as published by the Free Software Foundation. +*/ +#ifdef __NetBSD__ +#include <sys/syslimits.h> +#endif /* __NetBSD__ */ + +#define PROC "/proc" + +pid_t +pidinfo(pid_t pid, char **name); + +int +prociter(int (*proch)(pid_t pid, pid_t ppid, char *name, void *data), + void *data); diff --git a/geo-replication/src/set_geo_rep_pem_keys.sh b/geo-replication/src/set_geo_rep_pem_keys.sh new file mode 100755 index 00000000000..8a43fa39d1f --- /dev/null +++ b/geo-replication/src/set_geo_rep_pem_keys.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# Script to copy the pem keys from the user's home directory +# to $GLUSTERD_WORKDIR/geo-replication and then copy +# the keys to other nodes in the cluster and add them to the +# respective authorized keys. The script takes as argument the +# user name and assumes that the user will be present in all +# the nodes in the cluster. Not to be used for root user + +function main() +{ + user=$1 + master_vol=$2 + slave_vol=$3 + GLUSTERD_WORKDIR=$(gluster system:: getwd) + + if [ "$user" == "" ]; then + echo "Please enter the user's name" + exit 1; + fi + + if [ "$master_vol" == "" ]; then + echo "Invalid master volume name" + exit 1; + fi + + if [ "$slave_vol" == "" ]; then + echo "Invalid slave volume name" + exit 1; + fi + + COMMON_SECRET_PEM_PUB=${master_vol}_${slave_vol}_common_secret.pem.pub + + if [ "$user" == "root" ]; then + echo "This script is not needed for root" + exit 1; + fi + + home_dir=`getent passwd $user | cut -d ':' -f 6`; + + if [ "$home_dir" == "" ]; then + echo "No user $user found" + exit 1; + fi + + if [ -f $home_dir/${COMMON_SECRET_PEM_PUB} ]; then + cp $home_dir/${COMMON_SECRET_PEM_PUB} ${GLUSTERD_WORKDIR}/geo-replication/ + gluster system:: copy file /geo-replication/${COMMON_SECRET_PEM_PUB} + gluster system:: execute add_secret_pub $user geo-replication/${master_vol}_${slave_vol}_common_secret.pem.pub + gluster vol set ${slave_vol} features.read-only on + else + echo "$home_dir/common_secret.pem.pub not present. Please run geo-replication command on master with push-pem option to generate the file" + exit 1; + fi + exit 0; +} + +main "$@"; |
