summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSahina Bose <sabose@redhat.com>2014-04-23 15:46:37 +0530
committerSahina Bose <sabose@redhat.com>2014-04-29 01:09:15 -0700
commitcbfd13c455f89043d8782f86ae7c1832fe84e0b5 (patch)
treeb2e26675792b845e9cf855e88b373f575ee7dab3
parent1634fd3ab1b9e6fca134b434d2bfafcc629d8782 (diff)
glustercli: Added geo-rep status
Added method to parse gluster cli output of "gluster volume geo-rep <vol-name> status" and provide output in the format {volumename : {'status': STATUS, 'detail': message string}} Temporarily parsing string output till cli xml output is available for geo-rep (https://bugzilla.redhat.com/show_bug.cgi?id=1090910) Change-Id: Ie12cfcd8bb0d3bf0b3d9c13567e40d9014c47f59 Signed-off-by: Sahina Bose <sabose@redhat.com> Reviewed-on: http://review.gluster.org/7590 Reviewed-by: Aravinda VK <avishwan@redhat.com> Reviewed-by: Bala FA <barumuga@redhat.com>
-rwxr-xr-xglusternagios/glustercli.py67
-rw-r--r--tests/test_glustercli.py96
2 files changed, 163 insertions, 0 deletions
diff --git a/glusternagios/glustercli.py b/glusternagios/glustercli.py
index 771feeb..4426e22 100755
--- a/glusternagios/glustercli.py
+++ b/glusternagios/glustercli.py
@@ -94,6 +94,13 @@ class VolumeSplitBrainStatus:
SPLITBRAIN = 'SPLITBRAIN'
+class GeoRepStatus:
+ OK = 'OK'
+ NOT_STARTED = "NOT_STARTED"
+ FAULTY = "FAULTY"
+ PARTIAL_FAULTY = "PARTIAL_FAULTY"
+
+
class TransportType:
TCP = 'TCP'
RDMA = 'RDMA'
@@ -469,6 +476,66 @@ def _parseVolumeSelfHealSplitBrainInfo(out):
return value
+def _parseVolumeGeoRepStatus(volumeName, out):
+ detail = ""
+ status = GeoRepStatus.OK
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1090910 - opened for xml
+ # output. For now parsing below string output format
+ # MASTER NODE MASTER VOL MASTER BRICK
+ # SLAVE STATUS CHECKPOINT STATUS CRAWL STATUS
+ slaves = {}
+ all_status = ['ACTIVE', 'PASSIVE', 'FAULTY', 'NOT STARTED', 'INITIALISING']
+ for line in out:
+ if any(gstatus in line.upper() for gstatus in all_status):
+ nodeline = line.split()
+ slave = nodeline[3]
+ if slaves.get(slave) is None:
+ slaves[slave] = {'nodecount': 0,
+ 'faultcount': 0,
+ 'notstarted': 0}
+ slaves[slave]['nodecount'] += 1
+ if GeoRepStatus.FAULTY in line.upper() \
+ or "NOT STARTED" in line.upper():
+ node = nodeline[0]
+ if GeoRepStatus.FAULTY == nodeline[4].upper():
+ slaves[slave]['faultcount'] += 1
+ tempstatus = GeoRepStatus.FAULTY
+ else:
+ slaves[slave]['notstarted'] += 1
+ tempstatus = GeoRepStatus.NOT_STARTED
+ detail += ("%s - %s - %s;" % (slave,
+ node,
+ tempstatus))
+ for slave, count_dict in slaves.iteritems():
+ if count_dict['nodecount'] == count_dict['faultcount']:
+ status = GeoRepStatus.FAULTY
+ break
+ elif count_dict['faultcount'] > 0:
+ status = GeoRepStatus.PARTIAL_FAULTY
+ elif count_dict['notstarted'] > 0 and status == GeoRepStatus.OK:
+ status = GeoRepStatus.NOT_STARTED
+ return {volumeName: {'status': status, 'detail': detail}}
+
+
+def volumeGeoRepStatus(volumeName, remoteServer=None):
+ """
+ Arguments:
+ * VolumeName
+ Returns:
+ {VOLUMENAME: {'status': GEOREPSTATUS,
+ 'detail': detailed message}}
+ """
+ command = _getGlusterVolCmd() + ["geo-replication", volumeName, "status"]
+ if remoteServer:
+ command += ['--remote-host=%s' % remoteServer]
+
+ rc, out, err = _execGluster(command)
+
+ if rc == 0:
+ return _parseVolumeGeoRepStatus(volumeName, out)
+ raise GlusterCmdFailedException(rc=rc, err=err)
+
+
def volumeHealSplitBrainStatus(volumeName, remoteServer=None):
"""
Arguments:
diff --git a/tests/test_glustercli.py b/tests/test_glustercli.py
index 1d7d517..8f271a3 100644
--- a/tests/test_glustercli.py
+++ b/tests/test_glustercli.py
@@ -1125,6 +1125,54 @@ class GlusterCliTests(TestCaseBase):
print(status)
self.assertEquals(status, expectedOut)
+ @mock.patch('glusternagios.utils.execCmd')
+ @mock.patch('glusternagios.glustercli._getGlusterVolCmd')
+ def test_getVolumeGeoRepStatus(self, mock_glusterVolCmd,
+ mock_execCmd,):
+ mock_glusterVolCmd.return_value = ["gluster", "volume"]
+ mock_execCmd.return_value = (0,
+ self.__getGlusterGeoRepStatusResult(),
+ None)
+ expectedOut = {'test-vol':
+ {'status': gcli.GeoRepStatus.FAULTY,
+ 'detail': "10.70.43.68::slave-vol - "
+ "rhs3.novalocal - FAULTY;"
+ "10.70.43.68::slave-vol - "
+ "rhs3-2.novalocal - FAULTY;"}}
+ status = gcli.volumeGeoRepStatus("test-vol")
+ print(status)
+ self.assertEquals(status, expectedOut)
+
+ @mock.patch('glusternagios.utils.execCmd')
+ @mock.patch('glusternagios.glustercli._getGlusterVolCmd')
+ def test_getVolumeGeoRepStatusMuliSlave(self, mock_glusterVolCmd,
+ mock_execCmd,):
+ mock_glusterVolCmd.return_value = ["gluster", "volume"]
+ mock_execCmd.return_value = (0,
+ self.__getGlusterGeoRepStatusResult2(),
+ None)
+ expectedOut = {'test-vol':
+ {'status': gcli.GeoRepStatus.PARTIAL_FAULTY,
+ 'detail': "10.70.43.68::slave-vol - "
+ "rhs3-2.novalocal - FAULTY;"
+ "10.70.43.68::slave-vol2 - "
+ "rhs3.novalocal - NOT_STARTED;"
+ "10.70.43.68::slave-vol2 - "
+ "rhs3-2.novalocal - NOT_STARTED;"}}
+ status = gcli.volumeGeoRepStatus("test-vol")
+ print(status)
+ self.assertEquals(status, expectedOut)
+ mock_execCmd.return_value = (0,
+ self.__getGlusterGeoRepStatusResult3(),
+ None)
+ expectedOut = {'test-vol':
+ {'status': gcli.GeoRepStatus.NOT_STARTED,
+ 'detail': "10.70.43.68::slave-vol - "
+ "rhs3-2.novalocal - NOT_STARTED;"}}
+ status = gcli.volumeGeoRepStatus("test-vol")
+ print(status)
+ self.assertEquals(status, expectedOut)
+
def __getQuotaOut(self):
return \
[" Path Hard-limit Soft-limit"
@@ -1156,3 +1204,51 @@ class GlusterCliTests(TestCaseBase):
"/dir.7/file.2",
"/dir.10/file.2",
"/dir.7/file.4"]
+
+ def __getGlusterGeoRepStatusResult(self):
+ return ["MASTER NODE MASTER VOL MASTER BRICK "
+ "SLAVE STATUS CHECKPOINT STATUS "
+ "CRAWL STATUS",
+ "--------------------------------------------------------"
+ "--------------------------------------------------------"
+ "----------------",
+ "rhs3.novalocal rep /bricks/b3 "
+ "10.70.43.68::slave-vol faulty "
+ "N/A N/A",
+ "rhs3-2.novalocal rep /bricks/b3 "
+ "10.70.43.68::slave-vol faulty "
+ "N/A N/A "]
+
+ def __getGlusterGeoRepStatusResult2(self):
+ return ["MASTER NODE MASTER VOL MASTER BRICK "
+ "SLAVE STATUS CHECKPOINT STATUS "
+ "CRAWL STATUS",
+ "--------------------------------------------------------"
+ "--------------------------------------------------------"
+ "----------------",
+ "rhs3.novalocal rep /bricks/b3 "
+ "10.70.43.68::slave-vol Passive "
+ "N/A N/A",
+ "rhs3-2.novalocal rep /bricks/b3 "
+ "10.70.43.68::slave-vol faulty "
+ "N/A N/A ",
+ "rhs3.novalocal rep /bricks/b3 "
+ "10.70.43.68::slave-vol2 Not Started "
+ "N/A N/A",
+ "rhs3-2.novalocal rep /bricks/b3 "
+ "10.70.43.68::slave-vol2 Not Started "
+ "N/A N/A "]
+
+ def __getGlusterGeoRepStatusResult3(self):
+ return ["MASTER NODE MASTER VOL MASTER BRICK "
+ "SLAVE STATUS CHECKPOINT STATUS "
+ "CRAWL STATUS",
+ "--------------------------------------------------------"
+ "--------------------------------------------------------"
+ "----------------",
+ "rhs3.novalocal rep /bricks/b3 "
+ "10.70.43.68::slave-vol Passive "
+ "N/A N/A",
+ "rhs3-2.novalocal rep /bricks/b3 "
+ "10.70.43.68::slave-vol Not Started "
+ "N/A N/A "]