diff options
| author | Kaleb S. KEITHLEY <kkeithle@redhat.com> | 2016-11-18 13:07:50 -0500 | 
|---|---|---|
| committer | Kaleb KEITHLEY <kkeithle@redhat.com> | 2016-12-01 03:45:25 -0800 | 
| commit | 5bc501fde8d8d81ed4bd12edc306bc2c6fa268e4 (patch) | |
| tree | 08838611f8fbde9ec6207cadaaa831d637427cc0 /extras | |
| parent | 1b2b5be970f78cc32069516fa347d9943dc17d3e (diff) | |
common-ha: add cluster HA status to --status output for gdeploy
gdeploy desires a one-liner "health" assessment.
If all the VIP and port block/unblock RAs are located on their
prefered nodes and 'Started', then the cluster is deemed to be
good (healthy).
N.B. status originally only checked the "online" nodes obtained
from `pcs status` but we really want to consider all the configured
nodes, whether they are online or not.
Also one `pcs status` is enough.
Change-Id: Id0e0380b6982e23763edeb0488843b5363e370b8
BUG: 1395648
Signed-off-by: Kaleb S. KEITHLEY <kkeithle@redhat.com>
Reviewed-on: http://review.gluster.org/15882
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Arthy Loganathan <aloganat@redhat.com>
Reviewed-by: soumya k <skoduri@redhat.com>
Diffstat (limited to 'extras')
| -rw-r--r-- | extras/ganesha/scripts/ganesha-ha.sh | 93 | 
1 files changed, 65 insertions, 28 deletions
diff --git a/extras/ganesha/scripts/ganesha-ha.sh b/extras/ganesha/scripts/ganesha-ha.sh index 06c3e8214ae..95bccca169b 100644 --- a/extras/ganesha/scripts/ganesha-ha.sh +++ b/extras/ganesha/scripts/ganesha-ha.sh @@ -1,6 +1,6 @@  #!/bin/bash -# Copyright 2015 Red Hat Inc.  All Rights Reserved +# Copyright 2015-2016 Red Hat Inc.  All Rights Reserved  #  # Pacemaker+Corosync High Availability for NFS-Ganesha  # @@ -78,13 +78,14 @@ GANESHA_CONF=${CONFFILE:-/etc/ganesha/ganesha.conf}  usage() { -        echo "Usage      : add|delete|status" -        echo "Add-node   : ganesha-ha.sh --add <HA_CONF_DIR>  \ +        echo "Usage      : add|delete|refresh-config|status" +        echo "Add-node   : ganesha-ha.sh --add <HA_CONF_DIR> \  <NODE-HOSTNAME>  <NODE-VIP>" -        echo "Delete-node: ganesha-ha.sh --delete <HA_CONF_DIR>  \ +        echo "Delete-node: ganesha-ha.sh --delete <HA_CONF_DIR> \  <NODE-HOSTNAME>" -        echo "Refresh-config : ganesha-ha.sh --refresh-config <HA_CONFDIR>\ - <volume>" +        echo "Refresh-config : ganesha-ha.sh --refresh-config <HA_CONFDIR> \ +<volume>" +        echo "Status : ganesha-ha.sh --status <HA_CONFDIR>"  }  determine_service_manager () { @@ -153,7 +154,7 @@ determine_servers()      local tmp_ifs=${IFS}      local ha_servers="" -    if [[ "X${cmd}X" != "XsetupX" ]]; then +    if [ "X${cmd}X" != "XsetupX" -a "X${cmd}X" != "XstatusX" ]; then          ha_servers=$(pcs status | grep "Online:" | grep -o '\[.*\]' | sed -e 's/\[//' | sed -e 's/\]//')          IFS=$' '          for server in ${ha_servers} ; do @@ -745,25 +746,63 @@ setup_state_volume()  status()  { -    local regex_str="^ ${1}"; shift -    local status_file=$(mktemp) +    local scratch=$(mktemp) +    local regex_str="^${1}-cluster_ip-1" +    local healthy=0 +    local index=1 +    local nodes -    while [[ ${1} ]]; do +    # change tabs to spaces, strip leading spaces +    pcs status | sed -e "s/\t/ /g" -e "s/^[ ]*//" > ${scratch} + +    nodes[0]=${1}; shift -        regex_str="${regex_str}|^ ${1}" +    # make a regex of the configured nodes +    # and initalize the nodes array for later +    while [[ ${1} ]]; do +        regex_str="${regex_str}|^${1}-cluster_ip-1" +        nodes[${index}]=${1} +        ((index++))          shift      done -    pcs status | egrep "^Online:" > ${status_file} +    # print the nodes that are expected to be online +    grep -E "^Online:" ${scratch} -    echo >> ${status_file} +    echo -    pcs status | egrep "${regex_str}" | sed -e "s/\t/ /" | cut -d ' ' -f 2,4 >> ${status_file} +    # print the VIPs and which node they are on +    grep -E "${regex_str}" < ${scratch} | cut -d ' ' -f 1,4 -    cat ${status_file} +    echo + +    # check if the VIP and port block/unblock RAs are on the expected nodes +    for n in ${nodes[*]}; do + +        grep -E -x "${n}-nfs_block \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch} +        result=$? +        ((healthy+=${result})) +        grep -E -x "${n}-cluster_ip-1 \(ocf::heartbeat:IPaddr\): Started ${n}" > /dev/null 2>&1 ${scratch} +        result=$? +        ((healthy+=${result})) +        grep -E -x "${n}-nfs_unblock \(ocf::heartbeat:portblock\): Started ${n}" > /dev/null 2>&1 ${scratch} +        result=$? +        ((healthy+=${result})) +    done -    rm -f ${status_file} +    grep -E "\):\ Stopped|FAILED" > /dev/null 2>&1 ${scratch} +    result=$? + +    if [ ${result} -eq 0 ]; then +        echo "Cluster HA Status: BAD" +    elif [ ${healthy} -eq 0 ]; then +        echo "Cluster HA Status: HEALTHY" +    else +        echo "Cluster HA Status: FAILOVER" +    fi + +    rm -f ${scratch}  }  create_ganesha_conf_file() @@ -798,18 +837,16 @@ main()          usage          exit 0      fi -    if [[ ${cmd} != *status ]]; then -        HA_CONFDIR=${1%/}; shift -        local ha_conf=${HA_CONFDIR}/ganesha-ha.conf -        local node="" -        local vip="" - -        # ignore any comment lines -        cfgline=$(grep  ^HA_NAME= ${ha_conf}) -        eval $(echo ${cfgline} | grep -F HA_NAME=) -        cfgline=$(grep  ^HA_CLUSTER_NODES= ${ha_conf}) -        eval $(echo ${cfgline} | grep -F HA_CLUSTER_NODES=) -    fi +    HA_CONFDIR=${1%/}; shift +    local ha_conf=${HA_CONFDIR}/ganesha-ha.conf +    local node="" +    local vip="" + +    # ignore any comment lines +    cfgline=$(grep  ^HA_NAME= ${ha_conf}) +    eval $(echo ${cfgline} | grep -F HA_NAME=) +    cfgline=$(grep  ^HA_CLUSTER_NODES= ${ha_conf}) +    eval $(echo ${cfgline} | grep -F HA_CLUSTER_NODES=)      case "${cmd}" in  | 
