summaryrefslogtreecommitdiffstats
path: root/tests/volume.rc
diff options
context:
space:
mode:
Diffstat (limited to 'tests/volume.rc')
-rw-r--r--tests/volume.rc607
1 files changed, 575 insertions, 32 deletions
diff --git a/tests/volume.rc b/tests/volume.rc
index 887a9cae861..b38848c0e52 100644
--- a/tests/volume.rc
+++ b/tests/volume.rc
@@ -6,6 +6,13 @@ function volinfo_field()
$CLI volume info $vol | grep "^$field: " | sed 's/.*: //';
}
+function volume_get_field()
+{
+ local vol=$1
+ local field=$2
+ $CLI volume get $vol $field | tail -1 | awk '{print $2}'
+}
+
function brick_count()
{
@@ -14,9 +21,47 @@ function brick_count()
$CLI volume info $vol | egrep "^Brick[0-9]+: " | wc -l;
}
+function check_brick_status() {
+ cmd="gluster --xml volume status"
+ local daemon=$1
+
+ if [[ -z $daemon ]]
+ then
+ echo `$cmd | grep '<status>1' | wc -l`
+ else
+ echo `$cmd | grep -A 5 ${daemon} | grep '<status>1' | wc -l`
+ fi
+}
+
function online_brick_count ()
{
- pidof glusterfsd | wc -w
+ local v1=0
+ local v2=0
+ local v3=0
+ local v4=0
+ local v5=0
+ local tot=0
+
+ #First count total Number of bricks and then subtract daemon status
+ v1=`check_brick_status`
+ v2=`check_brick_status "Self-heal"`
+ v3=`check_brick_status "Quota"`
+ v4=`check_brick_status "Snapshot"`
+ v5=`check_brick_status "Tier"`
+ v6=`check_brick_status "Scrubber"`
+ v7=`check_brick_status "Bitrot"`
+
+ tot=$((v1-v2-v3-v4-v5-v6-v7))
+ echo $tot
+
+}
+
+
+function brick_up_status {
+ local vol=$1
+ local host=$2
+ local brick=$3
+ $CLI volume status $vol $host:$brick --xml | sed -ne 's/.*<status>\([01]\)<\/status>/\1/p'
}
function volume_option()
@@ -27,12 +72,16 @@ function volume_option()
}
function rebalance_status_field {
- #The rebalance status can be upto 3 words, (ex:'fix-layout in progress'), hence the awk-print $7 thru $9.
+ $CLI volume rebalance $1 status | awk '{print $7}' | sed -n 3p
+}
+
+function fix-layout_status_field {
+ #The fix-layout status can be up to 3 words, (ex:'fix-layout in progress'), hence the awk-print $2 thru $4.
#But if the status is less than 3 words, it also prints the next field i.e the run_time_in_secs.(ex:'completed 3.00').
#So we trim the numbers out with `tr`. Finally remove the trailing white spaces with sed. What we get is one of the
#strings in the 'cli_vol_task_status_str' char array of cli-rpc-ops.c
- $CLI volume rebalance $1 status | awk '{print $7,$8,$9}' |sed -n 3p |tr -d '[^0-9+\.]'|sed 's/ *$//g'
+ $CLI volume rebalance $1 status | awk '{print $2,$3,$4}' |sed -n 3p |tr -d '[^0-9+\.]'|sed 's/ *$//g'
}
function remove_brick_status_completed_field {
@@ -43,7 +92,8 @@ function remove_brick_status_completed_field {
function get_mount_process_pid {
local vol=$1
- ps auxww | grep glusterfs | grep -E "volfile-id[ =]/?$vol " | awk '{print $2}' | head -1
+ local mnt=$2
+ ps auxww | grep glusterfs | grep -E "volfile-id[ =]/?$vol .*$mnt" | awk '{print $2}' | head -1
}
function get_nfs_pid ()
@@ -53,7 +103,7 @@ function get_nfs_pid ()
function read_nfs_pidfile ()
{
- echo `cat $GLUSTERD_WORKDIR/nfs/run/nfs.pid`
+ echo `cat $GLUSTERD_PIDFILEDIR/nfs/nfs.pid`
}
function cleanup_statedump {
@@ -62,21 +112,39 @@ function cleanup_statedump {
#.vimrc friendly comment */
}
+function wait_statedump_ready {
+ local maxtime="${1}000000000"
+ local pid="$2"
+ local deadline="$(($(date +%s%N) + maxtime))"
+ local fname
+
+ while [[ "$(date +%s%N)" < "$deadline" ]]; do
+ fname="$statedumpdir/$(ls $statedumpdir | grep -E "\.$pid\.dump\.")"
+ if [[ -f "$fname" ]]; then
+ grep "^DUMP-END-TIME" "$fname" >/dev/null
+ if [[ $? -eq 0 ]]; then
+ echo $fname
+ return
+ fi
+ fi
+ sleep 0.1
+ done
+
+ echo "nostatedump"
+}
+
function generate_statedump {
- local fpath=""
pid=$1
#remove old stale statedumps
cleanup_statedump $pid
kill -USR1 $pid
- #Wait till the statedump is generated
- sleep 1
- fname=$(ls $statedumpdir | grep -E "\.$pid\.dump\.")
- echo $statedumpdir/$fname
+ wait_statedump_ready 3 $pid
}
function generate_mount_statedump {
local vol=$1
- generate_statedump $(get_mount_process_pid $vol)
+ local mnt=$2
+ generate_statedump $(get_mount_process_pid $vol $mnt)
}
function cleanup_mount_statedump {
@@ -87,18 +155,36 @@ function cleanup_mount_statedump {
function snap_client_connected_status {
local vol=$1
local fpath=$(generate_mount_statedump $vol)
- up=$(grep -A2 xlator.protocol.client.$vol-snapd-client.priv $fpath | tail -1 | cut -f 2 -d'=')
+ up=$(grep -a -A1 xlator.protocol.client.$vol-snapd-client.priv $fpath | tail -1 | cut -f 2 -d'=')
rm -f $fpath
echo "$up"
}
+function _jbrc_child_up_status {
+ local vol=$1
+ #brick_id is (brick-num in volume info - 1)
+ local brick_id=$2
+ local gen_state_dump=$3
+ local fpath=$($gen_state_dump $vol)
+ up=$(grep -a -B1 child_$brick_id=$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
+ rm -f $fpath
+ echo "$up"
+}
+
+function jbrc_child_up_status {
+ local vol=$1
+ #brick_id is (brick-num in volume info - 1)
+ local brick_id=$2
+ _jbrc_child_up_status $vol $brick_id generate_mount_statedump
+}
+
function _afr_child_up_status {
local vol=$1
#brick_id is (brick-num in volume info - 1)
local brick_id=$2
local gen_state_dump=$3
local fpath=$($gen_state_dump $vol)
- up=$(grep -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
+ up=$(grep -a -B1 trusted.afr.$vol-client-$brick_id $fpath | head -1 | cut -f2 -d'=')
rm -f $fpath
echo "$up"
}
@@ -107,7 +193,13 @@ function afr_child_up_status_meta {
local mnt=$1
local repl=$2
local child=$3
- grep "child_up\[$child\]" $mnt/.meta/graphs/active/$repl/private | awk '{print $3}'
+ grep -E "^child_up\[$child\]" $mnt/.meta/graphs/active/$repl/private | awk '{print $3}'
+}
+
+function client_connected_status_meta {
+ local mnt=$1
+ local client=$2
+ grep "connected" $mnt/.meta/graphs/active/$client/private | awk '{print $3}'
}
function afr_child_up_status {
@@ -121,7 +213,7 @@ function ec_get_info {
local vol=$1
local dist_id=$2
local key=$3
- local fpath=$(generate_mount_statedump $vol)
+ local fpath=$4
local value=$(sed -n "/^\[cluster\/disperse\.$vol-disperse-$dist_id\]/,/^\[/{s/^$key=\(.*\)/\1/p;}" $fpath | head -1)
rm -f $fpath
echo "$value"
@@ -131,22 +223,40 @@ function ec_child_up_status {
local vol=$1
local dist_id=$2
local brick_id=$(($3 + 1))
- local mask=$(ec_get_info $vol $dist_id "childs_up_mask")
+ local mnt=$4
+ local mask=$(ec_get_info $vol $dist_id "childs_up_mask" $(generate_mount_statedump $vol $mnt))
echo "${mask: -$brick_id:1}"
}
function ec_child_up_count {
local vol=$1
local dist_id=$2
- ec_get_info $vol $dist_id "childs_up"
+ local mnt=$3
+ ec_get_info $vol $dist_id "childs_up" $(generate_mount_statedump $vol $mnt)
+}
+
+function ec_child_up_status_shd {
+ local vol=$1
+ local dist_id=$2
+ local brick_id=$(($3 + 1))
+ local mask=$(ec_get_info $vol $dist_id "childs_up_mask" $(generate_shd_statedump $vol))
+ echo "${mask: -$brick_id:1}"
+}
+
+function ec_child_up_count_shd {
+ local vol=$1
+ local dist_id=$2
+ ec_get_info $vol $dist_id "childs_up" $(generate_shd_statedump $vol)
}
function get_shd_process_pid {
- ps auxww | grep glusterfs | grep -E "glustershd/run/glustershd.pid" | awk '{print $2}' | head -1
+ local vol=$1
+ ps auxww | grep "process-name\ glustershd" | awk '{print $2}' | head -1
}
function generate_shd_statedump {
- generate_statedump $(get_shd_process_pid)
+ local vol=$1
+ generate_statedump $(get_shd_process_pid $vol)
}
function generate_nfs_statedump {
@@ -186,19 +296,39 @@ function quotad_up_status {
gluster volume status | grep "Quota Daemon" | awk '{print $7}'
}
-function get_brick_pid {
+function get_glusterd_pid {
+ pgrep '^glusterd$' | head -1
+}
+
+function get_brick_pidfile {
local vol=$1
local host=$2
local brick=$3
local brick_hiphenated=$(echo $brick | tr '/' '-')
- echo `cat $GLUSTERD_WORKDIR/vols/$vol/run/${host}${brick_hiphenated}.pid`
+ echo $GLUSTERD_PIDFILEDIR/vols/$vol/${host}${brick_hiphenated}.pid
+}
+
+function get_brick_pid {
+ cat $(get_brick_pidfile $*)
}
function kill_brick {
local vol=$1
local host=$2
local brick=$3
- kill -9 $(get_brick_pid $vol $host $brick)
+
+ local pidfile=$(get_brick_pidfile $vol $host $brick)
+ local cmdline="/proc/$(cat $pidfile)/cmdline"
+ local socket=$(cat $cmdline | tr '\0' '\n' | grep '\.socket$')
+
+ gf_attach -d $socket $brick
+
+ local deadline="$(($(date +%s%N) + ${PROCESS_UP_TIMEOUT}000000000))"
+ while [[ "$(date +%s%N)" < "$deadline" ]]; do
+ if [[ "$(brick_up_status $vol $host $brick)" == "0" ]]; then
+ break
+ fi
+ done
}
function check_option_help_presence {
@@ -209,10 +339,14 @@ function check_option_help_presence {
function afr_get_changelog_xattr {
local file=$1
local xkey=$2
- getfattr -n $xkey -e hex $file 2>/dev/null | grep "$xkey" | cut -f2 -d'='
+ local xval=$(getfattr -n $xkey -e hex $file 2>/dev/null | grep "$xkey" | cut -f2 -d'=')
+ if [ -z $xval ]; then
+ xval="0x000000000000000000000000"
+ fi
+ echo $xval
}
-function afr_get_pending_heal_count {
+function get_pending_heal_count {
local vol=$1
gluster volume heal $vol info | grep "Number of entries" | awk '{ sum+=$4} END {print sum}'
}
@@ -242,6 +376,36 @@ function gf_gfid_xattr_to_str {
echo "${xval:2:8}-${xval:10:4}-${xval:14:4}-${xval:18:4}-${xval:22:12}"
}
+function get_text_xattr {
+ local key=$1
+ local path=$2
+ getfattr -h -d -m. -e text $path 2>/dev/null | grep -a $key | cut -f2 -d'='
+}
+
+function get_gfid2path {
+ local path=$1
+ getfattr -h --only-values -n glusterfs.gfidtopath $path 2>/dev/null
+}
+
+function get_mdata {
+ local path=$1
+ getfattr -h -e hex -n trusted.glusterfs.mdata $path 2>/dev/null | grep "trusted.glusterfs.mdata" | cut -f2 -d'='
+}
+
+function get_mdata_count {
+ getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | wc -l
+}
+
+function get_mdata_uniq_count {
+ getfattr -d -m . -e hex $@ 2>/dev/null | grep mdata | uniq | wc -l
+}
+
+function get_xattr_key {
+ local key=$1
+ local path=$2
+ getfattr -h -d -m. -e text $path 2>/dev/null | grep -a $key | cut -f1 -d'='
+}
+
function gf_check_file_opened_in_brick {
vol=$1
host=$2
@@ -396,13 +560,17 @@ function delete_volumes() {
}
function volume_exists() {
- local volname=$1
- $CLI volume info $volname 2>&1 | grep -q 'does not exist'
- if [ $? -eq 0 ]; then
- return 1
- else
- return 0
- fi
+ $CLI volume info $1 > /dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function killall_gluster() {
+ terminate_pids $(process_pids gluster)
+ find $GLUSTERD_PIDFILEDIR -name '*.pid' | xargs rm -f
}
function afr_get_index_count {
@@ -420,8 +588,13 @@ function path_exists {
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
}
+function path_size {
+ local size=$(stat -c %s $1)
+ if [ $? -eq 0 ]; then echo $size; else echo ""; fi
+}
+
function force_umount {
- umount -f $*
+ ${UMOUNT_F} $*
if [ $? -eq 0 ]; then echo "Y"; else echo "N"; fi
}
@@ -457,3 +630,373 @@ function volgen_volume_option {
local xl_option="$5"
sed -e "/./{H;\$!d;}" -e "x;/volume $xl_vol/!d;/type $xl_type\/$xl_feature/!d;/option $xl_option/!d" $volfile | grep " $xl_option " | awk '{print $3}'
}
+
+function mount_get_option_value {
+ local m=$1
+ local subvol=$2
+ local key=$3
+
+ grep -w "$3" $m/.meta/graphs/active/$subvol/private | awk '{print $3}'
+}
+
+function get_volume_mark {
+ getfattr -n trusted.glusterfs.volume-mark -ehex $1 | sed -n 's/^trusted.glusterfs.volume-mark=0x//p' | cut -b5-36 | sed 's/\([a-f0-9]\{8\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)\([a-f0-9]\{4\}\)/\1-\2-\3-\4-/'
+}
+
+# setup geo-rep in a single a node.
+
+function setup_georep {
+
+ $CLI volume create $GMV0 replica 2 $H0:$B0/${GMV0}{1,2,3,4};
+
+ $CLI volume start $GMV0
+
+ $CLI volume create $GSV0 replica 2 $H0:$B0/${GSV0}{1,2,3,4};
+
+ $CLI volume start $GSV0
+
+ $CLI system:: execute gsec_create
+
+ $CLI volume geo-rep $GMV0 $H0::$GSV0 create push-pem
+
+ $CLI volume geo-rep $GMV0 $H0::$GSV0 start
+
+ sleep 80 # after start geo-rep takes a minute to get stable
+
+}
+
+
+# stop and delete geo-rep session
+
+function cleanup_georep {
+
+ $CLI volume geo-rep $GMV0 $H0::$GSV0 stop
+
+ $CLI volume geo-rep $GMV0 $H0::$GSV0 delete
+}
+
+function num_graphs
+{
+ local mountpoint=$1
+ echo `ls $mountpoint/.meta/graphs/ | grep -v active | wc -l`
+}
+
+function get_aux()
+{
+##Check if a auxiliary mount is there
+local aux_suffix=$1
+local rundir=$(gluster --print-statedumpdir)
+local pidfile="${rundir}/${V0}$aux_suffix.pid"
+if [ -f $pidfile ];
+then
+ local pid=$(cat ${rundir}/${V0}.pid)
+ pidof glusterfs 2>&1 | grep -w $pid > /dev/null
+
+ if [ $? -eq 0 ]
+ then
+ echo "0"
+ else
+ echo "1"
+ fi
+else
+ echo "1"
+fi
+}
+
+function get_list_aux()
+{
+# check for quota list aux mount
+ get_aux "_quota_list"
+}
+
+function get_limit_aux()
+{
+# check for quota list aux mount
+ get_aux "_quota_limit"
+}
+
+function check_for_xattr {
+ local xattr=$1
+ local filepath=$2
+ getfattr -n $xattr $filepath 2>/dev/null | grep "$xattr" | cut -f1 -d'='
+}
+
+function get_bitd_count {
+ ps auxww | grep glusterfs | grep bitd.pid | grep -v grep | wc -l
+}
+
+function get_scrubd_count {
+ ps auxww | grep glusterfs | grep scrub.pid | grep -v grep | wc -l
+}
+
+function get_quarantine_count {
+ ls -l "$1/.glusterfs/quarantine" | wc -l
+}
+
+function get_quotad_count {
+ ps auxww | grep glusterfs | grep quotad.pid | grep -v grep | wc -l
+}
+
+function get_nfs_count {
+ ps auxww | grep glusterfs | grep nfs.pid | grep -v grep | wc -l
+}
+
+function get_snapd_count {
+ ps auxww | grep glusterfs | grep snapd.pid | grep -v grep | wc -l
+}
+
+function drop_cache() {
+ case $OSTYPE in
+ Linux)
+ echo 3 > /proc/sys/vm/drop_caches
+ ;;
+ *)
+ # fail but flush caches
+ ( cd $1 && umount $1 2>/dev/null )
+ ;;
+ esac
+}
+
+function quota_list_field () {
+ local QUOTA_PATH=$1
+ local FIELD=$2
+ local awk_arg="{print \$$FIELD}"
+
+ $CLI volume quota $V0 list $QUOTA_PATH | grep $QUOTA_PATH | awk "$awk_arg"
+}
+
+function quota_object_list_field () {
+ local QUOTA_PATH=$1
+ local FIELD=$2
+ local awk_arg="{print \$$FIELD}"
+
+ $CLI volume quota $V0 list-objects $QUOTA_PATH | grep $QUOTA_PATH | awk "$awk_arg"
+}
+
+function quotausage()
+{
+ quota_list_field $1 4
+}
+
+function quota_hard_limit()
+{
+ quota_list_field $1 2
+}
+
+function quota_soft_limit()
+{
+ quota_list_field $1 3
+}
+
+function quota_sl_exceeded()
+{
+ quota_list_field $1 6
+}
+
+function quota_hl_exceeded()
+{
+ quota_list_field $1 7
+
+}
+
+function quota_object_hard_limit()
+{
+ quota_object_list_field $1 2
+}
+
+function scrub_status()
+{
+ local vol=$1;
+ local field=$2;
+
+ $CLI volume bitrot $vol scrub status | grep "^$field: " | sed 's/.*: //';
+}
+
+function get_gfid_string {
+ local path=$1;
+ getfattr -n glusterfs.gfid.string $1 2>/dev/null \
+ | grep glusterfs.gfid.string | cut -d '"' -f 2
+}
+
+function file_all_zeroes {
+ < $1 tr -d '\0' | read -n 1 || echo 1
+}
+
+function get_hard_link_count {
+ local path=$1;
+ stat -c %h $path
+}
+
+function count_sh_entries()
+{
+ ls $1/.glusterfs/indices/xattrop | grep -v "xattrop-" | wc -l
+}
+
+function check_brick_multiplex() {
+ cnt="$(ls /var/log/glusterfs/bricks|wc -l)"
+ local ret=$($CLI volume info|grep "cluster.brick-multiplex"|cut -d" " -f2)
+ local bcnt="$(brick_count)"
+
+ if [ $bcnt -ne 1 ]; then
+ if [ "$ret" = "on" ] || [ $cnt -eq 1 ]; then
+ echo "Y"
+ else
+ echo "N"
+ fi
+ else
+ echo "N"
+ fi
+}
+
+function get_fd_count {
+ local vol=$1
+ local host=$2
+ local brick=$3
+ local fname=$4
+ local val="$(check_brick_multiplex)"
+ local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname))
+ local statedump=$(generate_brick_statedump $vol $host $brick)
+ if [ $val == "N" ]; then
+ count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
+ else
+ count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
+ fi
+# If no information is found for a given gfid, it means it has not been
+# accessed, so it doesn't have any open fd. In this case we return 0.
+ count="${count:-0}"
+ rm -f $statedump
+ echo $count
+}
+
+
+function get_active_fd_count {
+ local vol=$1
+ local host=$2
+ local brick=$3
+ local fname=$4
+ local val="$(check_brick_multiplex)"
+ local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname))
+ local statedump=$(generate_brick_statedump $vol $host $brick)
+ if [ $val == "N" ]; then
+ count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
+ else
+ count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1)
+ fi
+ rm -f $statedump
+ echo $count
+}
+
+function get_mount_active_size_value {
+ local vol=$1
+ local mount=$2
+ local statedump=$(generate_mount_statedump $vol $mount)
+ local val=$(grep "active_size" $statedump | cut -f2 -d'=' | tail -1)
+ rm -f $statedump
+ echo $val
+}
+
+function get_mount_lru_size_value {
+ local vol=$1
+ local mount=$2
+ local statedump=$(generate_mount_statedump $vol $mount)
+ local val=$(grep "lru_size" $statedump | cut -f2 -d'=' | tail -1)
+ rm -f $statedump
+ echo $val
+}
+
+function check_changelog_op {
+ local clog_path=$1
+ local op=$2
+
+ $PYTHON $(dirname $0)/../../utils/changelogparser.py ${clog_path}/CHANGELOG | grep "$op" | wc -l
+}
+
+function processed_changelogs {
+ local processed_dir=$1
+ count=$(ls -l $processed_dir | grep CHANGELOG | wc -l)
+ if [ $count -gt 0 ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function volgen_check_ancestry {
+ #Returns Y if ancestor_xl is an ancestor of $child_xl according to the volfile
+ local volfile="$1"
+
+ local child_xl_type="$2"
+ local child_xl="$3"
+
+ local ancestor_xl_type="$4"
+ local ancestor_xl="$5"
+
+ child_linenum=$(awk '/type $child_xl_type\/$child_xl/ {print FNR}' $volfile)
+ ancestor_linenum=$(awk '/type $ancestor_xl_type\/$ancestor_xl/ {print FNR}' $volfile)
+
+ if [ $child_linenum -lt $ancestor_linenum ];
+ then
+ echo "Y"
+ else
+ echo "N"
+ fi
+}
+
+function get_shd_mux_pid {
+ local volume=$1
+ pid=`$CLI volume status $volume shd | awk '/Self-heal/{print $8}'`
+ echo $pid
+}
+
+function shd_count {
+ ps aux | grep "glustershd" | grep -v grep | wc -l
+}
+
+function number_healer_threads_shd {
+ local pid=$(get_shd_mux_pid $1)
+ pstack $pid | grep $2 | wc -l
+}
+
+function get_mtime {
+ local time=$(get-mdata-xattr -m $1)
+ if [ $time == "-1" ];
+ then
+ echo $(stat -c %Y $1)
+ else
+ echo $time
+ fi
+}
+
+function get_ctime {
+ local time=$(get-mdata-xattr -c $1)
+ if [ $time == "-1" ];
+ then
+ echo $(stat -c %Z $1)
+ else
+ echo $time
+ fi
+}
+
+function get_atime {
+ local time=$(get-mdata-xattr -a $1)
+ if [ $time == "-1" ];
+ then
+ echo $(stat -c %X $1)
+ else
+ echo $time
+ fi
+}
+
+function get-xml()
+{
+ $CLI $1 --xml | xmllint --format - | grep $2 | sed 's/\(<"$2">\|<\/"$2">\)//g'
+}
+
+function logging_time_check()
+{
+ local logdir=$1
+ local logfile=`echo ${0##*/}`_glusterd1.log
+
+ cat $logdir/1/$logfile | tail -n 2 | head -n 1 | grep $(date +%H:%M) | wc -l
+}