summaryrefslogtreecommitdiffstats
path: root/extras
diff options
context:
space:
mode:
Diffstat (limited to 'extras')
-rwxr-xr-xextras/distributed-testing/distributed-test-runner.py12
-rw-r--r--extras/ganesha/ocf/ganesha_nfsd2
-rwxr-xr-xextras/ganesha/scripts/create-export-ganesha.sh1
-rw-r--r--extras/ganesha/scripts/ganesha-ha.sh93
-rwxr-xr-xextras/glusterfs-georep-upgrade.py77
-rw-r--r--extras/group-virt.example5
-rwxr-xr-xextras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh27
-rwxr-xr-xextras/hook-scripts/start/post/S31ganesha-start.sh2
-rwxr-xr-xextras/quota/quota_fsck.py8
-rwxr-xr-xextras/snap_scheduler/gcron.py7
-rwxr-xr-xextras/snap_scheduler/snap_scheduler.py4
-rwxr-xr-xextras/statedumpparse.rb151
-rw-r--r--extras/systemd/glusterd.service.in5
13 files changed, 275 insertions, 119 deletions
diff --git a/extras/distributed-testing/distributed-test-runner.py b/extras/distributed-testing/distributed-test-runner.py
index 7bfb6c9652a..5a07e2feab1 100755
--- a/extras/distributed-testing/distributed-test-runner.py
+++ b/extras/distributed-testing/distributed-test-runner.py
@@ -383,14 +383,17 @@ class Handlers:
return self.shell.call("make install") == 0
@synchronized
- def prove(self, id, test, timeout, valgrind=False, asan_noleaks=True):
+ def prove(self, id, test, timeout, valgrind="no", asan_noleaks=True):
assert id == self.client_id
self.shell.cd(self.gluster_root)
env = "DEBUG=1 "
- if valgrind:
+ if valgrind == "memcheck" or valgrind == "yes":
cmd = "valgrind"
cmd += " --tool=memcheck --leak-check=full --track-origins=yes"
cmd += " --show-leak-kinds=all -v prove -v"
+ elif valgrind == "drd":
+ cmd = "valgrind"
+ cmd += " --tool=drd -v prove -v"
elif asan_noleaks:
cmd = "prove -v"
env += "ASAN_OPTIONS=detect_leaks=0 "
@@ -827,8 +830,9 @@ parser.add_argument("--port", help="server port to listen",
type=int, default=DEFAULT_PORT)
# test role
parser.add_argument("--tester", help="start tester", action="store_true")
-parser.add_argument("--valgrind", help="run tests under valgrind",
- action="store_true")
+parser.add_argument("--valgrind[=memcheck,drd]",
+ help="run tests with valgrind tool 'memcheck' or 'drd'",
+ default="no")
parser.add_argument("--asan", help="test with asan enabled",
action="store_true")
parser.add_argument("--asan-noleaks", help="test with asan but no mem leaks",
diff --git a/extras/ganesha/ocf/ganesha_nfsd b/extras/ganesha/ocf/ganesha_nfsd
index 93fc8be5136..f91e8b6b8f7 100644
--- a/extras/ganesha/ocf/ganesha_nfsd
+++ b/extras/ganesha/ocf/ganesha_nfsd
@@ -36,7 +36,7 @@ else
. ${OCF_FUNCTIONS_DIR}/ocf-shellfuncs
fi
-OCF_RESKEY_ha_vol_mnt_default="/var/run/gluster/shared_storage"
+OCF_RESKEY_ha_vol_mnt_default="/run/gluster/shared_storage"
: ${OCF_RESKEY_ha_vol_mnt=${OCF_RESKEY_ha_vol_mnt_default}}
ganesha_meta_data() {
diff --git a/extras/ganesha/scripts/create-export-ganesha.sh b/extras/ganesha/scripts/create-export-ganesha.sh
index 1ffba427457..3040e8138b0 100755
--- a/extras/ganesha/scripts/create-export-ganesha.sh
+++ b/extras/ganesha/scripts/create-export-ganesha.sh
@@ -64,6 +64,7 @@ echo " Pseudo=\"/$VOL\";"
echo ' Protocols = "3", "4" ;'
echo ' Transports = "UDP","TCP";'
echo ' SecType = "sys";'
+echo ' Security_Label = False;'
echo " }"
}
if [ "$OPTION" = "on" ];
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh
index 32af1cab10e..9790a719e10 100644
--- a/extras/ganesha/scripts/ganesha-ha.sh
+++ b/extras/ganesha/scripts/ganesha-ha.sh
@@ -24,11 +24,16 @@ GANESHA_HA_SH=$(realpath $0)
HA_NUM_SERVERS=0
HA_SERVERS=""
HA_VOL_NAME="gluster_shared_storage"
-HA_VOL_MNT="/var/run/gluster/shared_storage"
+HA_VOL_MNT="/run/gluster/shared_storage"
HA_CONFDIR=$HA_VOL_MNT"/nfs-ganesha"
SERVICE_MAN="DISTRO_NOT_FOUND"
-RHEL6_PCS_CNAME_OPTION="--name"
+# rhel, fedora id, version
+ID=""
+VERSION_ID=""
+
+PCS9OR10_PCS_CNAME_OPTION=""
+PCS9OR10_PCS_CLONE_OPTION="clone"
SECRET_PEM="/var/lib/glusterd/nfs/secret.pem"
# UNBLOCK RA uses shared_storage which may become unavailable
@@ -101,9 +106,9 @@ determine_service_manager () {
then
SERVICE_MAN="/sbin/service"
fi
- if [ "${SERVICE_MAN}" == "DISTRO_NOT_FOUND" ]
+ if [[ "${SERVICE_MAN}X" == "DISTRO_NOT_FOUNDX" ]]
then
- echo "Service manager not recognized, exiting"
+ logger "Service manager not recognized, exiting"
exit 1
fi
}
@@ -114,7 +119,7 @@ manage_service ()
local new_node=${2}
local option=
- if [ "${action}" == "start" ]; then
+ if [[ "${action}" == "start" ]]; then
option="yes"
else
option="no"
@@ -122,7 +127,7 @@ manage_service ()
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
${SECRET_PEM} root@${new_node} "${GANESHA_HA_SH} --setup-ganesha-conf-files $HA_CONFDIR $option"
- if [ "${SERVICE_MAN}" == "/bin/systemctl" ]
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]
then
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
${SECRET_PEM} root@${new_node} "${SERVICE_MAN} ${action} nfs-ganesha"
@@ -140,7 +145,7 @@ check_cluster_exists()
if [ -e /var/run/corosync.pid ]; then
cluster_name=$(pcs status | grep "Cluster name:" | cut -d ' ' -f 3)
- if [ ${cluster_name} -a ${cluster_name} = ${name} ]; then
+ if [[ "${cluster_name}X" == "${name}X" ]]; then
logger "$name already exists, exiting"
exit 0
fi
@@ -155,7 +160,7 @@ determine_servers()
local tmp_ifs=${IFS}
local ha_servers=""
- if [ "X${cmd}X" != "XsetupX" -a "X${cmd}X" != "XstatusX" ]; then
+ if [ "${cmd}X" != "setupX" -a "${cmd}X" != "statusX" ]; then
ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')
IFS=$' '
for server in ${ha_servers} ; do
@@ -193,15 +198,21 @@ setup_cluster()
logger "setting up cluster ${name} with the following ${servers}"
- pcs cluster auth ${servers}
- # pcs cluster setup --name ${name} ${servers}
- pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --enable --transport udpu ${servers}
+ # pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} ${servers}
+ pcs cluster setup --force ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers}
if [ $? -ne 0 ]; then
- logger "pcs cluster setup ${RHEL6_PCS_CNAME_OPTION} ${name} --enable --transport udpu ${servers} failed"
+ logger "pcs cluster setup ${PCS9OR10_PCS_CNAME_OPTION} ${name} --enable ${servers} failed, shutting down ganesha and bailing out"
#set up failed stop all ganesha process and clean up symlinks in cluster
stop_ganesha_all "${servers}"
exit 1;
fi
+
+ # pcs cluster auth ${servers}
+ pcs cluster auth
+ if [ $? -ne 0 ]; then
+ logger "pcs cluster auth failed"
+ fi
+
pcs cluster start --all
if [ $? -ne 0 ]; then
logger "pcs cluster start failed"
@@ -217,7 +228,7 @@ setup_cluster()
done
unclean=$(pcs status | grep -u "UNCLEAN")
- while [[ "${unclean}X" = "UNCLEANX" ]]; do
+ while [[ "${unclean}X" == "UNCLEANX" ]]; do
sleep 1
unclean=$(pcs status | grep -u "UNCLEAN")
done
@@ -244,7 +255,7 @@ setup_finalize_ha()
local stopped=""
stopped=$(pcs status | grep -u "Stopped")
- while [[ "${stopped}X" = "StoppedX" ]]; do
+ while [[ "${stopped}X" == "StoppedX" ]]; do
sleep 1
stopped=$(pcs status | grep -u "Stopped")
done
@@ -265,7 +276,7 @@ refresh_config ()
if [ -e ${SECRET_PEM} ]; then
while [[ ${3} ]]; do
current_host=`echo ${3} | cut -d "." -f 1`
- if [ ${short_host} != ${current_host} ]; then
+ if [[ ${short_host} != ${current_host} ]]; then
output=$(ssh -oPasswordAuthentication=no \
-oStrictHostKeyChecking=no -i ${SECRET_PEM} root@${current_host} \
"dbus-send --print-reply --system --dest=org.ganesha.nfsd \
@@ -398,7 +409,7 @@ wrap_create_virt_ip_constraints()
# the result is "node2 node3 node4"; for node2, "node3 node4 node1"
# and so on.
while [[ ${1} ]]; do
- if [ "${1}" = "${primary}" ]; then
+ if [[ ${1} == ${primary} ]]; then
shift
while [[ ${1} ]]; do
tail=${tail}" "${1}
@@ -429,15 +440,15 @@ setup_create_resources()
local cibfile=$(mktemp -u)
# fixup /var/lib/nfs
- logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone"
- pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone
+ logger "pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}"
+ pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION}
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} --clone failed"
+ logger "warning: pcs resource create nfs_setup ocf:heartbeat:ganesha_nfsd ha_vol_mnt=${HA_VOL_MNT} ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
- pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone
+ pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION}
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon --clone failed"
+ logger "warning: pcs resource create nfs-mon ocf:heartbeat:ganesha_mon ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
# see comment in (/usr/lib/ocf/resource.d/heartbeat/ganesha_grace
@@ -445,9 +456,9 @@ setup_create_resources()
# ganesha-active crm_attribute
sleep 5
- pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone notify=true
+ pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} notify=true
if [ $? -ne 0 ]; then
- logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace --clone failed"
+ logger "warning: pcs resource create nfs-grace ocf:heartbeat:ganesha_grace ${PCS9OR10_PCS_CLONE_OPTION} failed"
fi
pcs constraint location nfs-grace-clone rule score=-INFINITY grace-active ne 1
@@ -616,7 +627,7 @@ addnode_recreate_resources()
--after ${add_node}-nfs_block
if [ $? -ne 0 ]; then
logger "warning pcs resource create ${add_node}-cluster_ip-1 ocf:heartbeat:IPaddr \
- ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed"
+ ip=${add_vip} cidr_netmask=32 op monitor interval=15s failed"
fi
pcs -f ${cibfile} constraint order nfs-grace-clone then ${add_node}-cluster_ip-1
@@ -780,7 +791,7 @@ setup_state_volume()
touch ${mnt}/nfs-ganesha/${dirname}/nfs/statd/state
fi
for server in ${HA_SERVERS} ; do
- if [ ${server} != ${dirname} ]; then
+ if [[ ${server} != ${dirname} ]]; then
ln -s ${mnt}/nfs-ganesha/${server}/nfs/ganesha ${mnt}/nfs-ganesha/${dirname}/nfs/ganesha/${server}
ln -s ${mnt}/nfs-ganesha/${server}/nfs/statd ${mnt}/nfs-ganesha/${dirname}/nfs/statd/${server}
fi
@@ -794,7 +805,7 @@ setup_state_volume()
enable_pacemaker()
{
while [[ ${1} ]]; do
- if [ "${SERVICE_MAN}" == "/usr/bin/systemctl" ]; then
+ if [[ "${SERVICE_MAN}" == "/bin/systemctl" ]]; then
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i \
${SECRET_PEM} root@${1} "${SERVICE_MAN} enable pacemaker"
else
@@ -892,7 +903,7 @@ delnode_state_volume()
rm -rf ${mnt}/nfs-ganesha/${dirname}
for server in ${HA_SERVERS} ; do
- if [[ "${server}" != "${dirname}" ]]; then
+ if [[ ${server} != ${dirname} ]]; then
rm -f ${mnt}/nfs-ganesha/${server}/nfs/ganesha/${dirname}
rm -f ${mnt}/nfs-ganesha/${server}/nfs/statd/${dirname}
fi
@@ -908,8 +919,9 @@ status()
local index=1
local nodes
- # change tabs to spaces, strip leading spaces
- pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*//" > ${scratch}
+ # change tabs to spaces, strip leading spaces, including any
+ # new '*' at the beginning of a line introduced in pcs-0.10.x
+ pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*\*//" -e "s/^[ ]*//" > ${scratch}
nodes[0]=${1}; shift
@@ -924,7 +936,7 @@ status()
done
# print the nodes that are expected to be online
- grep -E "^Online:" ${scratch}
+ grep -E "Online:" ${scratch}
echo
@@ -963,7 +975,7 @@ status()
create_ganesha_conf_file()
{
- if [ $1 == "yes" ];
+ if [[ "$1" == "yes" ]];
then
if [ -e $GANESHA_CONF ];
then
@@ -1012,6 +1024,13 @@ main()
semanage boolean -m gluster_use_execmem --on
fi
+ local osid=""
+
+ osid=$(grep ^ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F ID=)
+ osid=$(grep ^VERSION_ID= /etc/os-release)
+ eval $(echo ${osid} | grep -F VERSION_ID=)
+
HA_CONFDIR=${1%/}; shift
local ha_conf=${HA_CONFDIR}/ganesha-ha.conf
local node=""
@@ -1032,7 +1051,17 @@ main()
determine_servers "setup"
- if [ "X${HA_NUM_SERVERS}X" != "X1X" ]; then
+ # Fedora 29+ and rhel/centos 8 has PCS-0.10.x
+ # default is pcs-0.10.x options but check for
+ # rhel/centos 7 (pcs-0.9.x) and adjust accordingly
+ if [[ ! ${ID} =~ {rhel,centos} ]]; then
+ if [[ ${VERSION_ID} == 7.* ]]; then
+ PCS9OR10_PCS_CNAME_OPTION="--name"
+ PCS9OR10_PCS_CLONE_OPTION="--clone"
+ fi
+ fi
+
+ if [[ "${HA_NUM_SERVERS}X" != "1X" ]]; then
determine_service_manager
diff --git a/extras/glusterfs-georep-upgrade.py b/extras/glusterfs-georep-upgrade.py
new file mode 100755
index 00000000000..634576058d6
--- /dev/null
+++ b/extras/glusterfs-georep-upgrade.py
@@ -0,0 +1,77 @@
+#!/usr/bin/python3
+"""
+
+Copyright (c) 2020 Red Hat, Inc. <http://www.redhat.com>
+This file is part of GlusterFS.
+
+This file is licensed to you under your choice of the GNU Lesser
+General Public License, version 3 or any later version (LGPLv3 or
+later), or the GNU General Public License, version 2 (GPLv2), in all
+cases as published by the Free Software Foundation.
+
+"""
+
+import argparse
+import errno
+import os, sys
+import shutil
+from datetime import datetime
+
+def find_htime_path(brick_path):
+ dirs = []
+ htime_dir = os.path.join(brick_path, '.glusterfs/changelogs/htime')
+ for file in os.listdir(htime_dir):
+ if os.path.isfile(os.path.join(htime_dir,file)) and file.startswith("HTIME"):
+ dirs.append(os.path.join(htime_dir, file))
+ else:
+ raise FileNotFoundError("%s unavailable" % (os.path.join(htime_dir, file)))
+ return dirs
+
+def modify_htime_file(brick_path):
+ htime_file_path_list = find_htime_path(brick_path)
+
+ for htime_file_path in htime_file_path_list:
+ changelog_path = os.path.join(brick_path, '.glusterfs/changelogs')
+ temp_htime_path = os.path.join(changelog_path, 'htime/temp_htime_file')
+ with open(htime_file_path, 'r') as htime_file, open(temp_htime_path, 'w') as temp_htime_file:
+ #extract epoch times from htime file
+ paths = htime_file.read().split("\x00")
+
+ for pth in paths:
+ epoch_no = pth.split(".")[-1]
+ changelog = os.path.basename(pth)
+ #convert epoch time to year, month and day
+ if epoch_no != '':
+ date=(datetime.fromtimestamp(float(int(epoch_no))).strftime("%Y/%m/%d"))
+ #update paths in temp htime file
+ temp_htime_file.write("%s/%s/%s\x00" % (changelog_path, date, changelog))
+ #create directory in the format year/month/days
+ path = os.path.join(changelog_path, date)
+
+ if changelog.startswith("CHANGELOG."):
+ try:
+ os.makedirs(path, mode = 0o600);
+ except OSError as exc:
+ if exc.errno == errno.EEXIST:
+ pass
+ else:
+ raise
+
+ #copy existing changelogs to new directory structure, delete old changelog files
+ shutil.copyfile(pth, os.path.join(path, changelog))
+ os.remove(pth)
+
+ #rename temp_htime_file with htime file
+ os.rename(htime_file_path, os.path.join('%s.bak'%htime_file_path))
+ os.rename(temp_htime_path, htime_file_path)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ parser.add_argument('brick_path', help="This upgrade script, which is to be run on\
+ server side, takes brick path as the argument, \
+ updates paths inside htime file and alters the directory structure \
+ above the changelog files inorder to support new optimised format \
+ of the directory structure as per \
+ https://review.gluster.org/#/c/glusterfs/+/23733/")
+ args = parser.parse_args()
+ modify_htime_file(args.brick_path)
diff --git a/extras/group-virt.example b/extras/group-virt.example
index 6a66f977b2f..cc37c98a25c 100644
--- a/extras/group-virt.example
+++ b/extras/group-virt.example
@@ -17,3 +17,8 @@ cluster.choose-local=off
client.event-threads=4
server.event-threads=4
performance.client-io-threads=on
+network.ping-timeout=20
+server.tcp-user-timeout=20
+server.keepalive-time=10
+server.keepalive-interval=2
+server.keepalive-count=5
diff --git a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
index 885ed03ad5b..1f2564b44ff 100755
--- a/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
+++ b/extras/hook-scripts/set/post/S32gluster_enable_shared_storage.sh
@@ -79,9 +79,9 @@ done
if [ "$option" == "disable" ]; then
# Unmount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- cat /etc/fstab | grep -v "gluster_shared_storage /var/run/gluster/shared_storage/" > /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ umount /run/gluster/shared_storage
+ cat /etc/fstab | grep -v "gluster_shared_storage /run/gluster/shared_storage/" > /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
if [ "$is_originator" == 1 ]; then
@@ -104,8 +104,15 @@ function check_volume_status()
echo $status
}
-mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
- /var/run/gluster/shared_storage"
+key=`echo $5 | cut -d '=' -f 1`
+val=`echo $5 | cut -d '=' -f 2`
+if [ "$key" == "transport.address-family" ]; then
+ mount_cmd="mount -t glusterfs -o xlator-option=transport.address-family=inet6 \
+ $local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage"
+else
+ mount_cmd="mount -t glusterfs $local_node_hostname:/gluster_shared_storage \
+ /run/gluster/shared_storage"
+fi
if [ "$option" == "enable" ]; then
retry=0;
@@ -120,10 +127,10 @@ if [ "$option" == "enable" ]; then
status=$(check_volume_status)
done
# Mount the volume on all the nodes
- umount /var/run/gluster/shared_storage
- mkdir -p /var/run/gluster/shared_storage
+ umount /run/gluster/shared_storage
+ mkdir -p /run/gluster/shared_storage
$mount_cmd
- cp /etc/fstab /var/run/gluster/fstab.tmp
- echo "$local_node_hostname:/gluster_shared_storage /var/run/gluster/shared_storage/ glusterfs defaults 0 0" >> /var/run/gluster/fstab.tmp
- mv /var/run/gluster/fstab.tmp /etc/fstab
+ cp /etc/fstab /run/gluster/fstab.tmp
+ echo "$local_node_hostname:/gluster_shared_storage /run/gluster/shared_storage/ glusterfs defaults 0 0" >> /run/gluster/fstab.tmp
+ mv /run/gluster/fstab.tmp /etc/fstab
fi
diff --git a/extras/hook-scripts/start/post/S31ganesha-start.sh b/extras/hook-scripts/start/post/S31ganesha-start.sh
index 90ba6bc73a5..7ad6f23ad06 100755
--- a/extras/hook-scripts/start/post/S31ganesha-start.sh
+++ b/extras/hook-scripts/start/post/S31ganesha-start.sh
@@ -4,7 +4,7 @@ OPTSPEC="volname:,gd-workdir:"
VOL=
declare -i EXPORT_ID
ganesha_key="ganesha.enable"
-GANESHA_DIR="/var/run/gluster/shared_storage/nfs-ganesha"
+GANESHA_DIR="/run/gluster/shared_storage/nfs-ganesha"
CONF1="$GANESHA_DIR/ganesha.conf"
GLUSTERD_WORKDIR=
diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py
index 174f2a2a44c..e62f7fc52a3 100755
--- a/extras/quota/quota_fsck.py
+++ b/extras/quota/quota_fsck.py
@@ -156,12 +156,10 @@ def get_quota_xattr_brick(dpath):
xattr_dict = {}
xattr_dict['parents'] = {}
- for xattr in pairs:
+ for xattr in pairs[1:]:
+ xattr = xattr.decode("utf-8")
xattr_key = xattr.split("=")[0]
- if re.search("# file:", xattr_key):
- # skip the file comment
- continue
- elif xattr_key is "":
+ if xattr_key == "":
# skip any empty lines
continue
elif not re.search("quota", xattr_key):
diff --git a/extras/snap_scheduler/gcron.py b/extras/snap_scheduler/gcron.py
index 1127be0e976..0e4df77d481 100755
--- a/extras/snap_scheduler/gcron.py
+++ b/extras/snap_scheduler/gcron.py
@@ -19,10 +19,10 @@ import logging.handlers
import fcntl
-GCRON_TASKS = "/var/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
+GCRON_TASKS = "/run/gluster/shared_storage/snaps/glusterfs_snap_cron_tasks"
GCRON_CROND_TASK = "/etc/cron.d/glusterfs_snap_cron_tasks"
GCRON_RELOAD_FLAG = "/var/run/gluster/crond_task_reload_flag"
-LOCK_FILE_DIR = "/var/run/gluster/shared_storage/snaps/lock_files/"
+LOCK_FILE_DIR = "/run/gluster/shared_storage/snaps/lock_files/"
log = logging.getLogger("gcron-logger")
start_time = 0.0
@@ -38,7 +38,8 @@ def initLogger(script_name):
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE,
+ universal_newlines=True)
out, err = process.communicate()
if process.returncode == 0:
logfile = os.path.join(out.strip(), script_name[:-3]+".log")
diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py
index a66c5e3d5ce..e8fcc449a9b 100755
--- a/extras/snap_scheduler/snap_scheduler.py
+++ b/extras/snap_scheduler/snap_scheduler.py
@@ -67,7 +67,7 @@ except ImportError:
SCRIPT_NAME = "snap_scheduler"
scheduler_enabled = False
log = logging.getLogger(SCRIPT_NAME)
-SHARED_STORAGE_DIR="/var/run/gluster/shared_storage"
+SHARED_STORAGE_DIR="/run/gluster/shared_storage"
GCRON_DISABLED = SHARED_STORAGE_DIR+"/snaps/gcron_disabled"
GCRON_ENABLED = SHARED_STORAGE_DIR+"/snaps/gcron_enabled"
GCRON_TASKS = SHARED_STORAGE_DIR+"/snaps/glusterfs_snap_cron_tasks"
@@ -149,7 +149,7 @@ def initLogger():
sh.setFormatter(formatter)
process = subprocess.Popen(["gluster", "--print-logdir"],
- stdout=subprocess.PIPE)
+ stdout=subprocess.PIPE, universal_newlines=True)
logfile = os.path.join(process.stdout.read()[:-1], SCRIPT_NAME + ".log")
fh = logging.FileHandler(logfile)
diff --git a/extras/statedumpparse.rb b/extras/statedumpparse.rb
index 872f284fcb8..1aff43377db 100755
--- a/extras/statedumpparse.rb
+++ b/extras/statedumpparse.rb
@@ -17,25 +17,35 @@ end
# client.c:client_fd_lk_ctx_dump uses a six-dash record separator.
ARRSEP = /^(-{5,6}=-{5,6})?$/
HEAD = /^\[(.*)\]$/
+INPUT_FORMATS = %w[statedump json]
format = 'json'
+input_format = 'statedump'
tz = '+0000'
memstat_select,memstat_reject = //,/\Z./
-human = false
OptionParser.new do |op|
op.banner << " [<] <STATEDUMP>"
- op.on("-f", "--format=F", "json/yaml/memstat") { |s| format = s }
+ op.on("-f", "--format=F", "json/yaml/memstat(-[plain|human|json])") { |s| format = s }
+ op.on("--input-format=F", INPUT_FORMATS.join(?/)) { |s| input_format = s }
op.on("--timezone=T",
"time zone to apply to zoneless timestamps [default UTC]") { |s| tz = s }
- op.on("--memstat-select=RX", "memstat: select memory types maxtching RX") { |s|
+ op.on("--memstat-select=RX", "memstat: select memory types matching RX") { |s|
memstat_select = Regexp.new s
}
- op.on("--memstat-reject=RX", "memstat: reject memory types maxtching RX") { |s|
+ op.on("--memstat-reject=RX", "memstat: reject memory types matching RX") { |s|
memstat_reject = Regexp.new s
}
- op.on("--memstat-human", "memstat: human readable") { human = true }
end.parse!
+
+if format =~ /\Amemstat(?:-(.*))?/
+ memstat_type = $1 || 'plain'
+ unless %w[plain human json].include? memstat_type
+ raise "unknown memstat type #{memstat_type.dump}"
+ end
+ format = 'memstat'
+end
+
repr, logsep = case format
when 'yaml'
require 'yaml'
@@ -50,8 +60,11 @@ else
end
formatter = proc { |e| puts repr.call(e) }
-d1 = {}
+INPUT_FORMATS.include? input_format or raise "unkwown input format '#{input_format}'"
+
+dumpinfo = {}
+# parse a statedump entry
elem_cbk = proc { |s,&cbk|
arraylike = false
s.grep(/\S/).empty? and next
@@ -74,15 +87,8 @@ elem_cbk = proc { |s,&cbk|
body.reject(&:empty?).map { |e|
ea = e.map { |l|
k,v = l.split("=",2)
- [k, begin
- Integer v
- rescue ArgumentError
- begin
- Float v
- rescue ArgumentError
- v
- end
- end]
+ m = /\A(0|-?[1-9]\d*)(\.\d+)?\Z/.match v
+ [k, m ? (m[2] ? Float(v) : Integer(v)) : v]
}
begin
ea.to_h
@@ -93,83 +99,110 @@ elem_cbk = proc { |s,&cbk|
}
if body
- cbk.call [head, arraylike ? body : body[0]]
+ cbk.call [head, arraylike ? body : (body.empty? ? {} : body[0])]
else
STDERR.puts ["WARNING: failed to parse record:", repr.call(s)].join(logsep)
end
}
+# aggregator routine
aggr = case format
when 'memstat'
- mh = {}
+ meminfo = {}
+ # commit memory-related entries to meminfo
proc { |k,r|
case k
when /memusage/
- (mh["GF_MALLOC"]||={})[k] ||= r["size"] if k =~ memstat_select and k !~ memstat_reject
+ (meminfo["GF_MALLOC"]||={})[k] ||= r["size"] if k =~ memstat_select and k !~ memstat_reject
when "mempool"
r.each {|e|
kk = "mempool:#{e['pool-name']}"
- (mh["mempool"]||={})[kk] ||= e["size"] if kk =~ memstat_select and kk !~ memstat_reject
+ (meminfo["mempool"]||={})[kk] ||= e["size"] if kk =~ memstat_select and kk !~ memstat_reject
}
end
}
else
+ # just format data, don't actually aggregate anything
proc { |pair| formatter.call pair }
end
-acc = []
-$<.each { |l|
- l = l.strip
- if l =~ /^(DUMP-(?:START|END)-TIME):\s+(.*)/
- d1["_meta"]||={}
- (d1["_meta"]["date"]||={})[$1] = Time.parse([$2, tz].join " ")
- next
- end
+# processing the data
+case input_format
+when 'statedump'
+ acc = []
+ $<.each { |l|
+ l = l.strip
+ if l =~ /^(DUMP-(?:START|END)-TIME):\s+(.*)/
+ dumpinfo["_meta"]||={}
+ (dumpinfo["_meta"]["date"]||={})[$1] = Time.parse([$2, tz].join " ")
+ next
+ end
- if l =~ HEAD
- elem_cbk.call(acc, &aggr)
- acc = [l]
- next
- end
+ if l =~ HEAD
+ elem_cbk.call(acc, &aggr)
+ acc = [l]
+ next
+ end
- acc << l
-}
-elem_cbk.call(acc, &aggr)
+ acc << l
+ }
+ elem_cbk.call(acc, &aggr)
+when 'json'
+ $<.each { |l|
+ r = JSON.load l
+ case r
+ when Array
+ aggr[r]
+ when Hash
+ dumpinfo.merge! r
+ end
+ }
+end
+# final actions: output aggregated data
case format
when 'memstat'
- hr = proc { |n|
- qa = %w[B kB MB GB]
- q = ((1...qa.size).find {|i| n < (1 << i*10)} || qa.size) - 1
- [n*100 / (1 << q*10) / 100.0, qa[q]].join
- }
-
- ma = mh.values.map(&:to_a).inject(:+)
- totals = mh.map { |coll,h| [coll, h.values.inject(:+)] }.to_h
+ ma = meminfo.values.map(&:to_a).inject(:+)
+ totals = meminfo.map { |coll,h| [coll, h.values.inject(:+)] }.to_h
tt = ma.transpose[1].inject(:+)
- templ = "%{val} %{key}"
- tft = proc { |t| t }
- nft = if human
- nw = [ma.transpose[1], totals.values, tt].flatten.map{|n| hr[n].size}.max
- proc { |n|
- hn = hr[n]
- " " * (nw - hn.size) + hn
+ summary_sep,showm = case memstat_type
+ when 'json'
+ ["", proc { |k,v| puts({type: k, value: v}.to_json) }]
+ when 'plain', 'human'
+ # human-friendly number representation
+ hr = proc { |n|
+ qa = %w[B kB MB GB]
+ q = ((1...qa.size).find {|i| n < (1 << i*10)} || qa.size) - 1
+ "%.2f%s" % [n.to_f / (1 << q*10), qa[q]]
}
+
+ templ = "%{val} %{key}"
+ tft = proc { |t| t }
+ nft = if memstat_type == 'human'
+ nw = [ma.transpose[1], totals.values, tt].flatten.map{|n| hr[n].size}.max
+ proc { |n|
+ hn = hr[n]
+ " " * (nw - hn.size) + hn
+ }
+ else
+ nw = tt.to_s.size
+ proc { |n| "%#{nw}d" % n }
+ end
+ ## Alternative template, key first:
+ # templ = "%{key} %{val}"
+ # tw = ma.transpose[0].map(&:size).max
+ # tft = proc { |t| t + " " * [tw - t.size, 0].max }
+ # nft = (memstat_type == 'human') ? hr : proc { |n| n }
+ ["\n", proc { |k,v| puts templ % {key: tft[k], val: nft[v]} }]
else
- nw = tt.to_s.size
- proc { |n| "%#{nw}d" % n }
+ raise 'this should be impossible'
end
- # templ = "%{key} %{val}"
- # tw = ma.transpose[0].map(&:size).max
- # tft = proc { |t| t + " " * [tw - t.size, 0].max }
- # nft = human ? hr : proc { |n| n }
- showm = proc { |k,v| puts templ % {key: tft[k], val: nft[v]} }
ma.sort_by { |k,v| v }.each(&showm)
- puts
+ print summary_sep
totals.each { |coll,t| showm.call "Total #{coll}", t }
showm.call "TOTAL", tt
else
- formatter.call d1
+ formatter.call dumpinfo
end
diff --git a/extras/systemd/glusterd.service.in b/extras/systemd/glusterd.service.in
index 32542d9f78b..abb0d82911f 100644
--- a/extras/systemd/glusterd.service.in
+++ b/extras/systemd/glusterd.service.in
@@ -1,6 +1,8 @@
[Unit]
Description=GlusterFS, a clustered file-system server
Documentation=man:glusterd(8)
+StartLimitBurst=6
+StartLimitIntervalSec=3600
Requires=@RPCBIND_SERVICE@
After=network.target @RPCBIND_SERVICE@
Before=network-online.target
@@ -10,7 +12,7 @@ Type=forking
PIDFile=@localstatedir@/run/glusterd.pid
LimitNOFILE=65536
Environment="LOG_LEVEL=INFO"
-EnvironmentFile=-@sysconfdir@/sysconfig/glusterd
+EnvironmentFile=-@SYSCONF_DIR@/sysconfig/glusterd
ExecStart=@prefix@/sbin/glusterd -p @localstatedir@/run/glusterd.pid --log-level $LOG_LEVEL $GLUSTERD_OPTIONS
KillMode=process
TimeoutSec=300
@@ -18,7 +20,6 @@ SuccessExitStatus=15
Restart=on-abnormal
RestartSec=60
StartLimitBurst=6
-StartLimitIntervalSec=3600
StartLimitInterval=3600
[Install]