summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShubhendu Tripathi <shtripat@redhat.com>2014-04-01 15:07:44 +0530
committerBala.FA <barumuga@redhat.com>2014-04-28 16:20:46 +0530
commit2873ff21e4f99b35ab88595a96c0ee45c83d26c3 (patch)
tree70c350ef307c63072a604b3874ea70e3a5366123
parentea28af4f36370506a76a5ae4fbd56990dd49c71a (diff)
gluster-nagios-common: Added gluster cli module
Introduced gluster cli module to add all the gluster related get methods Change-Id: I440ae89ac3f93f961024a6e78870154f57b7dfbd Signed-off-by: Shubhendu Tripathi <shtripat@redhat.com> Reviewed-on: https://code.engineering.redhat.com/gerrit/22253 Reviewed-by: Darshan Narayana Murthy <dnarayan@redhat.com> Reviewed-by: Timothy Asir Jeyasingh <tjeyasin@redhat.com> Reviewed-by: Balamurugan Arumugam <barumuga@redhat.com> Reviewed-by: Sahina Bose <sabose@redhat.com> Tested-by: Sahina Bose <sabose@redhat.com>
-rw-r--r--glusternagios/Makefile.am2
-rwxr-xr-xglusternagios/glustercli.py469
-rw-r--r--glusternagios/hostname.py41
-rw-r--r--tests/test_glustercli.py1059
4 files changed, 1571 insertions, 0 deletions
diff --git a/glusternagios/Makefile.am b/glusternagios/Makefile.am
index 55f8642..7f46e08 100644
--- a/glusternagios/Makefile.am
+++ b/glusternagios/Makefile.am
@@ -1,4 +1,6 @@
dist_glusternagioscommonpylib_PYTHON = \
__init__.py \
+ glustercli.py \
+ hostname.py \
utils.py \
$(NULL)
diff --git a/glusternagios/glustercli.py b/glusternagios/glustercli.py
new file mode 100755
index 0000000..0a126e7
--- /dev/null
+++ b/glusternagios/glustercli.py
@@ -0,0 +1,469 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+import xml.etree.cElementTree as etree
+import ethtool
+
+import utils
+from utils import CommandPath
+from hostname import getHostNameFqdn, HostNameException
+
+glusterCmdPath = CommandPath("gluster",
+ "/usr/sbin/gluster")
+
+
+# Class for exception definition
+class GlusterCmdFailedException(Exception):
+ message = "command execution failed"
+
+ def __init__(self, rc=0, out=(), err=()):
+ self.rc = rc
+ self.out = out
+ self.err = err
+
+ def __str__(self):
+ o = '\n'.join(self.out)
+ e = '\n'.join(self.err)
+ if o and e:
+ m = o + '\n' + e
+ else:
+ m = o or e
+
+ s = self.message
+ if m:
+ s += '\nerror: ' + m
+ if self.rc:
+ s += '\nreturn code: %s' % self.rc
+ return s
+
+
+if hasattr(etree, 'ParseError'):
+ _etreeExceptions = (etree.ParseError, AttributeError, ValueError)
+else:
+ _etreeExceptions = (SyntaxError, AttributeError, ValueError)
+
+
+def _getGlusterVolCmd():
+ return [glusterCmdPath.cmd, "--mode=script", "volume"]
+
+
+def _getGlusterPeerCmd():
+ return [glusterCmdPath.cmd, "--mode=script", "peer"]
+
+
+def _getGlusterSystemCmd():
+ return [glusterCmdPath.cmd, "system::"]
+
+
+class HostStatus:
+ CONNECTED = 'CONNECTED'
+ DISCONNECTED = 'DISCONNECTED'
+ UNKNOWN = 'UNKNOWN'
+
+
+class VolumeStatus:
+ ONLINE = 'ONLINE'
+ OFFLINE = 'OFFLINE'
+
+
+class TransportType:
+ TCP = 'TCP'
+ RDMA = 'RDMA'
+
+
+class TaskType:
+ REBALANCE = 'REBALANCE'
+ REPLACE_BRICK = 'REPLACE_BRICK'
+ REMOVE_BRICK = 'REMOVE_BRICK'
+
+
+def _getaddr(dev):
+ dev_info_list = ethtool.get_interfaces_info(dev.encode('utf8'))
+ addr = dev_info_list[0].ipv4_address
+ if addr is None:
+ addr = ''
+ return addr
+
+
+def _getIpAddresses():
+ devinfo = {}
+ for dev in ethtool.get_active_devices():
+ try:
+ devinfo[dev] = ethtool.get_ipaddr(dev)
+ except IOError, e:
+ print e
+
+ return devinfo
+
+
+def _getGlusterHostName():
+ try:
+ return getHostNameFqdn()
+ except HostNameException:
+ return ''
+
+
+def _getLocalIpAddress():
+ for ip in _getIpAddresses():
+ if not ip.startswith('127.'):
+ return ip
+ return ''
+
+
+def _execGluster(cmd):
+ return utils.execCmd(cmd)
+
+
+def _execGlusterXml(cmd):
+ cmd.append('--xml')
+ rc, out, err = utils.execCmd(cmd)
+ if rc != 0:
+ raise GlusterCmdFailedException(rc, out, err)
+ try:
+ tree = etree.fromstring('\n'.join(out))
+ rv = int(tree.find('opRet').text)
+ msg = tree.find('opErrstr').text
+ errNo = int(tree.find('opErrno').text)
+ except _etreeExceptions:
+ raise GlusterCmdFailedException(err=out)
+ if rv == 0:
+ return tree
+ else:
+ if errNo != 0:
+ rv = errNo
+ raise GlusterCmdFailedException(rc=rv, err=[msg])
+
+
+def hostUUIDGet():
+ command = _getGlusterSystemCmd() + ["uuid", "get"]
+ rc, out, err = _execGluster(command)
+ if rc == 0:
+ for line in out:
+ if line.startswith('UUID: '):
+ return line[6:]
+
+ raise GlusterCmdFailedException()
+
+
+def _parseVolumeStatus(tree):
+ status = {'name': tree.find('volStatus/volumes/volume/volName').text,
+ 'bricks': [],
+ 'nfs': [],
+ 'shd': []}
+ hostname = _getLocalIpAddress() or _getGlusterHostName()
+ for el in tree.findall('volStatus/volumes/volume/node'):
+ value = {}
+
+ for ch in el.getchildren():
+ value[ch.tag] = ch.text or ''
+
+ if value['path'] == 'localhost':
+ value['path'] = hostname
+
+ if value['status'] == '1':
+ value['status'] = 'ONLINE'
+ else:
+ value['status'] = 'OFFLINE'
+
+ if value['hostname'] == 'NFS Server':
+ status['nfs'].append({'hostname': value['path'],
+ 'port': value['port'],
+ 'status': value['status'],
+ 'pid': value['pid']})
+ elif value['hostname'] == 'Self-heal Daemon':
+ status['shd'].append({'hostname': value['path'],
+ 'status': value['status'],
+ 'pid': value['pid']})
+ else:
+ status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
+ value['path']),
+ 'port': value['port'],
+ 'status': value['status'],
+ 'pid': value['pid']})
+ return status
+
+
+def _parseVolumeStatusDetail(tree):
+ status = {'name': tree.find('volStatus/volumes/volume/volName').text,
+ 'bricks': []}
+ for el in tree.findall('volStatus/volumes/volume/node'):
+ value = {}
+
+ for ch in el.getchildren():
+ value[ch.tag] = ch.text or ''
+
+ sizeTotal = int(value['sizeTotal'])
+ value['sizeTotal'] = sizeTotal / (1024.0 * 1024.0)
+ sizeFree = int(value['sizeFree'])
+ value['sizeFree'] = sizeFree / (1024.0 * 1024.0)
+ status['bricks'].append({'brick': '%s:%s' % (value['hostname'],
+ value['path']),
+ 'sizeTotal': '%.3f' % (value['sizeTotal'],),
+ 'sizeFree': '%.3f' % (value['sizeFree'],),
+ 'device': value['device'],
+ 'blockSize': value['blockSize'],
+ 'mntOptions': value['mntOptions'],
+ 'fsName': value['fsName']})
+ return status
+
+
+def _parseVolumeStatusClients(tree):
+ status = {'name': tree.find('volStatus/volumes/volume/volName').text,
+ 'bricks': []}
+ for el in tree.findall('volStatus/volumes/volume/node'):
+ hostname = el.find('hostname').text
+ path = el.find('path').text
+
+ clientsStatus = []
+ for c in el.findall('clientsStatus/client'):
+ clientValue = {}
+ for ch in c.getchildren():
+ clientValue[ch.tag] = ch.text or ''
+ clientsStatus.append({'hostname': clientValue['hostname'],
+ 'bytesRead': clientValue['bytesRead'],
+ 'bytesWrite': clientValue['bytesWrite']})
+
+ status['bricks'].append({'brick': '%s:%s' % (hostname, path),
+ 'clientsStatus': clientsStatus})
+ return status
+
+
+def _parseVolumeStatusMem(tree):
+ status = {'name': tree.find('volStatus/volumes/volume/volName').text,
+ 'bricks': []}
+ for el in tree.findall('volStatus/volumes/volume/node'):
+ brick = {'brick': '%s:%s' % (el.find('hostname').text,
+ el.find('path').text),
+ 'mallinfo': {},
+ 'mempool': []}
+
+ for ch in el.find('memStatus/mallinfo').getchildren():
+ brick['mallinfo'][ch.tag] = ch.text or ''
+
+ for c in el.findall('memStatus/mempool/pool'):
+ mempool = {}
+ for ch in c.getchildren():
+ mempool[ch.tag] = ch.text or ''
+ brick['mempool'].append(mempool)
+
+ status['bricks'].append(brick)
+ return status
+
+
+def volumeStatus(volumeName, brick=None, option=None):
+ """
+ Get volume status
+
+ Arguments:
+ * VolumeName
+ * brick
+ * option = 'detail' or 'clients' or 'mem' or None
+ Returns:
+ When option=None,
+ {'name': NAME,
+ 'bricks': [{'brick': BRICK,
+ 'port': PORT,
+ 'status': STATUS,
+ 'pid': PID}, ...],
+ 'nfs': [{'hostname': HOST,
+ 'port': PORT,
+ 'status': STATUS,
+ 'pid': PID}, ...],
+ 'shd: [{'hostname': HOST,
+ 'status': STATUS,
+ 'pid': PID}, ...]}
+
+ When option='detail',
+ {'name': NAME,
+ 'bricks': [{'brick': BRICK,
+ 'sizeTotal': SIZE,
+ 'sizeFree': FREESIZE,
+ 'device': DEVICE,
+ 'blockSize': BLOCKSIZE,
+ 'mntOptions': MOUNTOPTIONS,
+ 'fsName': FSTYPE}, ...]}
+
+ When option='clients':
+ {'name': NAME,
+ 'bricks': [{'brick': BRICK,
+ 'clientsStatus': [{'hostname': HOST,
+ 'bytesRead': BYTESREAD,
+ 'bytesWrite': BYTESWRITE}, ...]},
+ ...]}
+
+ When option='mem':
+ {'name': NAME,
+ 'bricks': [{'brick': BRICK,
+ 'mallinfo': {'arena': int,
+ 'fordblks': int,
+ 'fsmblks': int,
+ 'hblkhd': int,
+ 'hblks': int,
+ 'keepcost': int,
+ 'ordblks': int,
+ 'smblks': int,
+ 'uordblks': int,
+ 'usmblks': int},
+ 'mempool': [{'allocCount': int,
+ 'coldCount': int,
+ 'hotCount': int,
+ 'maxAlloc': int,
+ 'maxStdAlloc': int,
+ 'name': NAME,
+ 'padddedSizeOf': int,
+ 'poolMisses': int},...]}, ...]}
+ """
+ command = _getGlusterVolCmd() + ["status", volumeName]
+ if brick:
+ command.append(brick)
+ if option:
+ command.append(option)
+ try:
+ xmltree = _execGlusterXml(command)
+ except GlusterCmdFailedException as e:
+ raise GlusterCmdFailedException(rc=e.rc, err=e.err)
+ try:
+ if option == 'detail':
+ return _parseVolumeStatusDetail(xmltree)
+ elif option == 'clients':
+ return _parseVolumeStatusClients(xmltree)
+ elif option == 'mem':
+ return _parseVolumeStatusMem(xmltree)
+ else:
+ return _parseVolumeStatus(xmltree)
+ except _etreeExceptions:
+ raise GlusterCmdFailedException(err=[etree.tostring(xmltree)])
+
+
+def _parseVolumeInfo(tree):
+ """
+ {VOLUMENAME: {'brickCount': BRICKCOUNT,
+ 'bricks': [BRICK1, BRICK2, ...],
+ 'options': {OPTION: VALUE, ...},
+ 'transportType': [TCP,RDMA, ...],
+ 'uuid': UUID,
+ 'volumeName': NAME,
+ 'volumeStatus': STATUS,
+ 'volumeType': TYPE}, ...}
+ """
+ volumes = {}
+ for el in tree.findall('volInfo/volumes/volume'):
+ value = {}
+ value['volumeName'] = el.find('name').text
+ value['uuid'] = el.find('id').text
+ value['volumeType'] = el.find('typeStr').text.upper().replace('-', '_')
+ status = el.find('statusStr').text.upper()
+ if status == 'STARTED':
+ value["volumeStatus"] = VolumeStatus.ONLINE
+ else:
+ value["volumeStatus"] = VolumeStatus.OFFLINE
+ value['brickCount'] = el.find('brickCount').text
+ value['distCount'] = el.find('distCount').text
+ value['stripeCount'] = el.find('stripeCount').text
+ value['replicaCount'] = el.find('replicaCount').text
+ transportType = el.find('transport').text
+ if transportType == '0':
+ value['transportType'] = [TransportType.TCP]
+ elif transportType == '1':
+ value['transportType'] = [TransportType.RDMA]
+ else:
+ value['transportType'] = [TransportType.TCP, TransportType.RDMA]
+ value['bricks'] = []
+ value['options'] = {}
+ value['bricksInfo'] = []
+ for b in el.findall('bricks/brick'):
+ value['bricks'].append(b.text)
+ for o in el.findall('options/option'):
+ value['options'][o.find('name').text] = o.find('value').text
+ for d in el.findall('bricks/brick'):
+ brickDetail = {}
+ #this try block is to maintain backward compatibility
+ #it returns an empty list when gluster doesnot return uuid
+ try:
+ brickDetail['name'] = d.find('name').text
+ #brickDetail['hostUuid'] = d.find('hostUuid').text
+ value['bricksInfo'].append(brickDetail)
+ except AttributeError:
+ break
+ volumes[value['volumeName']] = value
+ return volumes
+
+
+def volumeInfo(volumeName=None, remoteServer=None):
+ """
+ Returns:
+ {VOLUMENAME: {'brickCount': BRICKCOUNT,
+ 'bricks': [BRICK1, BRICK2, ...],
+ 'options': {OPTION: VALUE, ...},
+ 'transportType': [TCP,RDMA, ...],
+ 'uuid': UUID,
+ 'volumeName': NAME,
+ 'volumeStatus': STATUS,
+ 'volumeType': TYPE}, ...}
+ """
+ command = _getGlusterVolCmd() + ["info"]
+ if remoteServer:
+ command += ['--remote-host=%s' % remoteServer]
+ if volumeName:
+ command.append(volumeName)
+ try:
+ xmltree = _execGlusterXml(command)
+ except GlusterCmdFailedException as e:
+ raise GlusterCmdFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parseVolumeInfo(xmltree)
+ except _etreeExceptions:
+ raise GlusterCmdFailedException(err=[etree.tostring(xmltree)])
+
+
+def _parsePeerStatus(tree, gHostName, gUuid, gStatus):
+ hostList = [{'hostname': gHostName,
+ 'uuid': gUuid,
+ 'status': gStatus}]
+
+ for el in tree.findall('peerStatus/peer'):
+ if el.find('state').text != '3':
+ status = HostStatus.UNKNOWN
+ elif el.find('connected').text == '1':
+ status = HostStatus.CONNECTED
+ else:
+ status = HostStatus.DISCONNECTED
+ hostList.append({'hostname': el.find('hostname').text,
+ 'uuid': el.find('uuid').text,
+ 'status': status})
+
+ return hostList
+
+
+def peerStatus():
+ """
+ Returns:
+ [{'hostname': HOSTNAME, 'uuid': UUID, 'status': STATE}, ...]
+ """
+ command = _getGlusterPeerCmd() + ["status"]
+ try:
+ xmltree = _execGlusterXml(command)
+ except GlusterCmdFailedException as e:
+ raise GlusterCmdFailedException(rc=e.rc, err=e.err)
+ try:
+ return _parsePeerStatus(xmltree,
+ _getLocalIpAddress() or _getGlusterHostName(),
+ hostUUIDGet(), HostStatus.CONNECTED)
+ except _etreeExceptions:
+ raise GlusterCmdFailedException(err=[etree.tostring(xmltree)])
diff --git a/glusternagios/hostname.py b/glusternagios/hostname.py
new file mode 100644
index 0000000..6277569
--- /dev/null
+++ b/glusternagios/hostname.py
@@ -0,0 +1,41 @@
+# Copyright 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+import utils
+
+_hostNameCommandPath = utils.CommandPath("hostname",
+ "/bin/hostname",
+ )
+
+
+class HostNameException(Exception):
+ def __init__(self, rc):
+ self.rc = rc
+ self.message = 'hostname execution failed with error code %s' % self.rc
+
+ def __str__(self):
+ return self.message
+
+
+def getHostNameFqdn():
+ rc, out, err = utils.execCmd([_hostNameCommandPath.cmd, '--fqdn'])
+ if rc:
+ raise HostNameException(rc)
+ else:
+ return out[0]
diff --git a/tests/test_glustercli.py b/tests/test_glustercli.py
new file mode 100644
index 0000000..53865cd
--- /dev/null
+++ b/tests/test_glustercli.py
@@ -0,0 +1,1059 @@
+#
+# Copyright 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+from testrunner import GlusterNagiosTestCase as TestCaseBase
+from glusternagios import glustercli as gcli
+import xml.etree.cElementTree as etree
+
+
+class GlusterCliTests(TestCaseBase):
+ maxDiff = None
+
+ def _parseVolumeInfo_empty_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr/>
+ <volInfo/>
+</cliOutput>
+"""
+ tree = etree.fromstring(out)
+ self.assertFalse(gcli._parseVolumeInfo(tree))
+
+ def _parseVolumeInfo_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr/>
+ <volInfo>
+ <volumes>
+ <volume>
+ <name>music</name>
+ <id>b3114c71-741b-4c6f-a39e-80384c4ea3cf</id>
+ <status>1</status>
+ <statusStr>Started</statusStr>
+ <brickCount>2</brickCount>
+ <distCount>2</distCount>
+ <stripeCount>1</stripeCount>
+ <replicaCount>2</replicaCount>
+ <type>2</type>
+ <typeStr>Replicate</typeStr>
+ <transport>0</transport>
+ <bricks>
+ <brick>192.168.122.2:/tmp/m_b1<name>192.168.122.2:/tmp/m_b1</name>
+ </brick>
+ <brick>192.168.122.2:/tmp/m_b2<name>192.168.122.2:/tmp/m_b2</name>
+ </brick>
+ </bricks>
+ <optCount>1</optCount>
+ <options>
+ <option>
+ <name>auth.allow</name>
+ <value>*</value>
+ </option>
+ </options>
+ </volume>
+ <volume>
+ <name>test1</name>
+ <id>b444ed94-f346-4cda-bd55-0282f21d22db</id>
+ <status>2</status>
+ <statusStr>Stopped</statusStr>
+ <brickCount>1</brickCount>
+ <distCount>1</distCount>
+ <stripeCount>1</stripeCount>
+ <replicaCount>1</replicaCount>
+ <type>0</type>
+ <typeStr>Distribute</typeStr>
+ <transport>1</transport>
+ <bricks>
+ <brick>192.168.122.2:/tmp/t_b1<name>192.168.122.2:/tmp/t_b1</name>
+ </brick>
+ </bricks>
+ <optCount>0</optCount>
+ <options/>
+ </volume>
+ <count>2</count>
+ </volumes>
+ </volInfo>
+</cliOutput>
+"""
+ tree = etree.fromstring(out)
+ oVolumeInfo = \
+ {'music': {'brickCount': '2',
+ 'bricks': ['192.168.122.2:/tmp/m_b1',
+ '192.168.122.2:/tmp/m_b2'],
+ 'distCount': '2',
+ 'bricksInfo': [{
+ 'name': '192.168.122.2:/tmp/m_b1',
+ }, {
+ 'name': '192.168.122.2:/tmp/m_b2',
+ }],
+ 'options': {'auth.allow': '*'},
+ 'replicaCount': '2',
+ 'stripeCount': '1',
+ 'transportType': [gcli.TransportType.TCP],
+ 'uuid': 'b3114c71-741b-4c6f-a39e-80384c4ea3cf',
+ 'volumeName': 'music',
+ 'volumeStatus': gcli.VolumeStatus.ONLINE,
+ 'volumeType': 'REPLICATE'},
+ 'test1': {'brickCount': '1',
+ 'bricks': ['192.168.122.2:/tmp/t_b1'],
+ 'distCount': '1',
+ 'bricksInfo': [{
+ 'name': '192.168.122.2:/tmp/t_b1',
+ }],
+ 'options': {},
+ 'replicaCount': '1',
+ 'stripeCount': '1',
+ 'transportType': [gcli.TransportType.RDMA],
+ 'uuid': 'b444ed94-f346-4cda-bd55-0282f21d22db',
+ 'volumeName': 'test1',
+ 'volumeStatus': gcli.VolumeStatus.OFFLINE,
+ 'volumeType': 'DISTRIBUTE'}}
+ volumeInfo = gcli._parseVolumeInfo(tree)
+ print volumeInfo
+ print oVolumeInfo
+ self.assertEquals(volumeInfo, oVolumeInfo)
+
+ def test_parseVolumeInfo(self):
+ self._parseVolumeInfo_empty_test()
+ self._parseVolumeInfo_test()
+
+ def _parsePeerStatus_empty_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr>No peers present</opErrstr>
+ <peerStatus/>
+</cliOutput>
+"""
+ tree = etree.fromstring(out)
+ hostList = \
+ gcli._parsePeerStatus(tree, 'fedora-16-test',
+ '711d2887-3222-46d8-801a-7e3f646bdd4d',
+ gcli.HostStatus.CONNECTED)
+ self.assertEquals(hostList,
+ [{'hostname': 'fedora-16-test',
+ 'uuid': '711d2887-3222-46d8-801a-7e3f646bdd4d',
+ 'status': gcli.HostStatus.CONNECTED}])
+
+ def _parsePeerStatus_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr/>
+ <peerStatus>
+ <peer>
+ <uuid>610f466c-781a-4e04-8f67-8eba9a201867</uuid>
+ <hostname>192.168.2.21</hostname>
+ <connected>1</connected>
+ <state>3</state>
+ <stateStr>Peer in Cluster</stateStr>
+ </peer>
+ <peer>
+ <uuid>12345678-781a-aaaa-bbbb-8eba9a201867</uuid>
+ <hostname>FC16-1</hostname>
+ <connected>0</connected>
+ <state>3</state>
+ <stateStr>Peer in Cluster</stateStr>
+ </peer>
+ <peer>
+ <uuid>12345678-cccc-aaaa-bbbb-8eba9a201867</uuid>
+ <hostname>FC16-2</hostname>
+ <connected>1</connected>
+ <state>2</state>
+ <stateStr>Peer rejected</stateStr>
+ </peer>
+ </peerStatus>
+</cliOutput>
+"""
+ tree = etree.fromstring(out)
+ hostList = \
+ gcli._parsePeerStatus(tree, 'fedora-16-test',
+ '711d2887-3222-46d8-801a-7e3f646bdd4d',
+ gcli.HostStatus.CONNECTED)
+ self.assertEquals(hostList,
+ [{'hostname': 'fedora-16-test',
+ 'uuid': '711d2887-3222-46d8-801a-7e3f646bdd4d',
+ 'status': gcli.HostStatus.CONNECTED},
+ {'hostname': '192.168.2.21',
+ 'uuid': '610f466c-781a-4e04-8f67-8eba9a201867',
+ 'status': gcli.HostStatus.CONNECTED},
+ {'hostname': 'FC16-1',
+ 'uuid': '12345678-781a-aaaa-bbbb-8eba9a201867',
+ 'status': gcli.HostStatus.DISCONNECTED},
+ {'hostname': 'FC16-2',
+ 'uuid': '12345678-cccc-aaaa-bbbb-8eba9a201867',
+ 'status': gcli.HostStatus.UNKNOWN}])
+
+ def test_parsePeerStatus(self):
+ self._parsePeerStatus_empty_test()
+ self._parsePeerStatus_test()
+
+ def _parseVolumeStatus_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr/>
+ <volStatus>
+ <volumes>
+ <volume>
+ <volName>music</volName>
+ <nodeCount>4</nodeCount>
+ <node>
+ <hostname>192.168.122.2</hostname>
+ <path>/tmp/music-b1</path>
+ <peerid>f06b108e-a780-4519-bb22-c3083a1e3f8a</peerid>
+ <port>49152</port>
+ <status>1</status>
+ <pid>1313</pid>
+ </node>
+ <node>
+ <hostname>192.168.122.2</hostname>
+ <path>/tmp/music-b2</path>
+ <peerid>f06b108e-a780-4519-bb22-c3083a1e3f8a</peerid>
+ <port>49153</port>
+ <status>1</status>
+ <pid>1335</pid>
+ </node>
+ <node>
+ <hostname>NFS Server</hostname>
+ <path>192.168.122.2</path>
+ <peerid>f06b108e-a780-4519-bb22-c3083a1e3f8a</peerid>
+ <port>38467</port>
+ <status>1</status>
+ <pid>1357</pid>
+ </node>
+ <node>
+ <hostname>Self-heal Daemon</hostname>
+ <path>192.168.122.2</path>
+ <peerid>f06b108e-a780-4519-bb22-c3083a1e3f8a</peerid>
+ <port>0</port>
+ <status>1</status>
+ <pid>1375</pid>
+ </node>
+ </volume>
+ </volumes>
+ </volStatus>
+</cliOutput>
+"""
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeStatus(tree)
+ self.assertEquals(status,
+ {'bricks': [{'brick': '192.168.122.2:/tmp/music-b1',
+ 'pid': '1313',
+ 'port': '49152',
+ 'status': 'ONLINE'},
+ {'brick': '192.168.122.2:/tmp/music-b2',
+ 'pid': '1335',
+ 'port': '49153',
+ 'status': 'ONLINE'}],
+ 'name': 'music',
+ 'nfs': [{'hostname': '192.168.122.2',
+ 'pid': '1357',
+ 'port': '38467',
+ 'status': 'ONLINE'}],
+ 'shd': [{'hostname': '192.168.122.2',
+ 'pid': '1375',
+ 'status': 'ONLINE'}]})
+
+ def _parseVolumeStatusDetail_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr/>
+ <volStatus>
+ <volumes>
+ <volume>
+ <volName>music</volName>
+ <nodeCount>2</nodeCount>
+ <node>
+ <hostname>192.168.122.2</hostname>
+ <path>/tmp/music-b1</path>
+ <peerid>f06b108e-a780-4519-bb22-c3083a1e3f8a</peerid>
+ <port>49152</port>
+ <status>1</status>
+ <pid>1313</pid>
+ <sizeTotal>8370712576</sizeTotal>
+ <sizeFree>4478812160</sizeFree>
+ <device>/dev/vda1</device>
+ <blockSize>4096</blockSize>
+ <mntOptions>rw,seclabel,relatime,data=ordered</mntOptions>
+ <fsName>ext4</fsName>
+ </node>
+ <node>
+ <hostname>192.168.122.2</hostname>
+ <path>/tmp/music-b2</path>
+ <peerid>f06b108e-a780-4519-bb22-c3083a1e3f8a</peerid>
+ <port>49153</port>
+ <status>1</status>
+ <pid>1335</pid>
+ <sizeTotal>8370712576</sizeTotal>
+ <sizeFree>4478812160</sizeFree>
+ <device>/dev/vda1</device>
+ <blockSize>4096</blockSize>
+ <mntOptions>rw,seclabel,relatime,data=ordered</mntOptions>
+ <fsName>ext4</fsName>
+ </node>
+ </volume>
+ </volumes>
+ </volStatus>
+</cliOutput>"""
+ tree = etree.fromstring(out)
+ oStatus = \
+ {'bricks': [{'blockSize': '4096',
+ 'brick': '192.168.122.2:/tmp/music-b1',
+ 'device': '/dev/vda1',
+ 'fsName': 'ext4',
+ 'mntOptions': 'rw,seclabel,relatime,data=ordered',
+ 'sizeFree': '4271.328',
+ 'sizeTotal': '7982.934'},
+ {'blockSize': '4096',
+ 'brick': '192.168.122.2:/tmp/music-b2',
+ 'device': '/dev/vda1',
+ 'fsName': 'ext4',
+ 'mntOptions': 'rw,seclabel,relatime,data=ordered',
+ 'sizeFree': '4271.328',
+ 'sizeTotal': '7982.934'}],
+ 'name': 'music'}
+ status = gcli._parseVolumeStatusDetail(tree)
+ self.assertEquals(status, oStatus)
+
+ def _parseVolumeStatusClients_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr/>
+ <volStatus>
+ <volumes>
+ <volume>
+ <volName>music</volName>
+ <nodeCount>2</nodeCount>
+ <node>
+ <hostname>192.168.122.2</hostname>
+ <path>/tmp/music-b1</path>
+ <port>49152</port>
+ <status>1</status>
+ <pid>1313</pid>
+ <clientsStatus>
+ <clientCount>2</clientCount>
+ <client>
+ <hostname>192.168.122.2:1021</hostname>
+ <bytesRead>1172</bytesRead>
+ <bytesWrite>792</bytesWrite>
+ </client>
+ <client>
+ <hostname>192.168.122.2:1011</hostname>
+ <bytesRead>10076</bytesRead>
+ <bytesWrite>12152</bytesWrite>
+ </client>
+ </clientsStatus>
+ </node>
+ <node>
+ <hostname>192.168.122.2</hostname>
+ <path>/tmp/music-b2</path>
+ <port>49153</port>
+ <status>1</status>
+ <pid>1335</pid>
+ <clientsStatus>
+ <clientCount>2</clientCount>
+ <client>
+ <hostname>192.168.122.2:1020</hostname>
+ <bytesRead>1172</bytesRead>
+ <bytesWrite>792</bytesWrite>
+ </client>
+ <client>
+ <hostname>192.168.122.2:1010</hostname>
+ <bytesRead>10864</bytesRead>
+ <bytesWrite>12816</bytesWrite>
+ </client>
+ </clientsStatus>
+ </node>
+ </volume>
+ </volumes>
+ </volStatus>
+</cliOutput>
+"""
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeStatusClients(tree)
+ self.assertEquals(status.keys(), ['bricks', 'name'])
+ self.assertEquals(status['name'], 'music')
+ oBricks = [{'brick': '192.168.122.2:/tmp/music-b1',
+ 'clientsStatus': [{'bytesRead': '1172',
+ 'bytesWrite': '792',
+ 'hostname': '192.168.122.2:1021'},
+ {'bytesRead': '10076',
+ 'bytesWrite': '12152',
+ 'hostname': '192.168.122.2:1011'}]},
+ {'brick': '192.168.122.2:/tmp/music-b2',
+ 'clientsStatus': [{'bytesRead': '1172',
+ 'bytesWrite': '792',
+ 'hostname': '192.168.122.2:1020'},
+ {'bytesRead': '10864',
+ 'bytesWrite': '12816',
+ 'hostname': '192.168.122.2:1010'}]}]
+ self.assertEquals(status['bricks'], oBricks)
+
+ def _parseVolumeStatusMem_test(self):
+ out = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<cliOutput>
+ <opRet>0</opRet>
+ <opErrno>0</opErrno>
+ <opErrstr/>
+ <volStatus>
+ <volumes>
+ <volume>
+ <volName>music</volName>
+ <nodeCount>2</nodeCount>
+ <node>
+ <hostname>192.168.122.2</hostname>
+ <path>/tmp/music-b1</path>
+ <peerid>f06b108e-a780-4519-bb22-c3083a1e3f8a</peerid>
+ <port>49152</port>
+ <status>1</status>
+ <pid>1452</pid>
+ <memStatus>
+ <mallinfo>
+ <arena>606208</arena>
+ <ordblks>6</ordblks>
+ <smblks>1</smblks>
+ <hblks>12</hblks>
+ <hblkhd>15179776</hblkhd>
+ <usmblks>0</usmblks>
+ <fsmblks>64</fsmblks>
+ <uordblks>474208</uordblks>
+ <fordblks>132000</fordblks>
+ <keepcost>130224</keepcost>
+ </mallinfo>
+ <mempool>
+ <count>15</count>
+ <pool>
+ <name>music-server:fd_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>1024</coldCount>
+ <padddedSizeOf>100</padddedSizeOf>
+ <allocCount>0</allocCount>
+ <maxAlloc>0</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-server:dentry_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>16384</coldCount>
+ <padddedSizeOf>84</padddedSizeOf>
+ <allocCount>0</allocCount>
+ <maxAlloc>0</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-server:inode_t</name>
+ <hotCount>1</hotCount>
+ <coldCount>16383</coldCount>
+ <padddedSizeOf>148</padddedSizeOf>
+ <allocCount>1</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-locks:pl_local_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>32</coldCount>
+ <padddedSizeOf>140</padddedSizeOf>
+ <allocCount>1</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-marker:marker_local_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>128</coldCount>
+ <padddedSizeOf>316</padddedSizeOf>
+ <allocCount>0</allocCount>
+ <maxAlloc>0</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-server:rpcsvc_request_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>512</coldCount>
+ <padddedSizeOf>6372</padddedSizeOf>
+ <allocCount>10</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:struct saved_frame</name>
+ <hotCount>0</hotCount>
+ <coldCount>8</coldCount>
+ <padddedSizeOf>124</padddedSizeOf>
+ <allocCount>2</allocCount>
+ <maxAlloc>2</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:struct rpc_req</name>
+ <hotCount>0</hotCount>
+ <coldCount>8</coldCount>
+ <padddedSizeOf>2236</padddedSizeOf>
+ <allocCount>2</allocCount>
+ <maxAlloc>2</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:rpcsvc_request_t</name>
+ <hotCount>1</hotCount>
+ <coldCount>7</coldCount>
+ <padddedSizeOf>6372</padddedSizeOf>
+ <allocCount>1</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:data_t</name>
+ <hotCount>117</hotCount>
+ <coldCount>16266</coldCount>
+ <padddedSizeOf>52</padddedSizeOf>
+ <allocCount>179</allocCount>
+ <maxAlloc>121</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:data_pair_t</name>
+ <hotCount>138</hotCount>
+ <coldCount>16245</coldCount>
+ <padddedSizeOf>68</padddedSizeOf>
+ <allocCount>218</allocCount>
+ <maxAlloc>142</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:dict_t</name>
+ <hotCount>13</hotCount>
+ <coldCount>4083</coldCount>
+ <padddedSizeOf>84</padddedSizeOf>
+ <allocCount>24</allocCount>
+ <maxAlloc>15</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:call_stub_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>1024</coldCount>
+ <padddedSizeOf>1228</padddedSizeOf>
+ <allocCount>2</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:call_stack_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>1024</coldCount>
+ <padddedSizeOf>2084</padddedSizeOf>
+ <allocCount>4</allocCount>
+ <maxAlloc>2</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:call_frame_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>4096</coldCount>
+ <padddedSizeOf>172</padddedSizeOf>
+ <allocCount>14</allocCount>
+ <maxAlloc>7</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ </mempool>
+ </memStatus>
+ </node>
+ <node>
+ <hostname>192.168.122.2</hostname>
+ <path>/tmp/music-b2</path>
+ <peerid>f06b108e-a780-4519-bb22-c3083a1e3f8a</peerid>
+ <port>49153</port>
+ <status>1</status>
+ <pid>1459</pid>
+ <memStatus>
+ <mallinfo>
+ <arena>606208</arena>
+ <ordblks>5</ordblks>
+ <smblks>2</smblks>
+ <hblks>12</hblks>
+ <hblkhd>15179776</hblkhd>
+ <usmblks>0</usmblks>
+ <fsmblks>128</fsmblks>
+ <uordblks>474224</uordblks>
+ <fordblks>131984</fordblks>
+ <keepcost>130224</keepcost>
+ </mallinfo>
+ <mempool>
+ <count>15</count>
+ <pool>
+ <name>music-server:fd_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>1024</coldCount>
+ <padddedSizeOf>100</padddedSizeOf>
+ <allocCount>0</allocCount>
+ <maxAlloc>0</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-server:dentry_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>16384</coldCount>
+ <padddedSizeOf>84</padddedSizeOf>
+ <allocCount>0</allocCount>
+ <maxAlloc>0</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-server:inode_t</name>
+ <hotCount>1</hotCount>
+ <coldCount>16383</coldCount>
+ <padddedSizeOf>148</padddedSizeOf>
+ <allocCount>2</allocCount>
+ <maxAlloc>2</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-locks:pl_local_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>32</coldCount>
+ <padddedSizeOf>140</padddedSizeOf>
+ <allocCount>1</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-marker:marker_local_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>128</coldCount>
+ <padddedSizeOf>316</padddedSizeOf>
+ <allocCount>0</allocCount>
+ <maxAlloc>0</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>music-server:rpcsvc_request_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>512</coldCount>
+ <padddedSizeOf>6372</padddedSizeOf>
+ <allocCount>12</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:struct saved_frame</name>
+ <hotCount>0</hotCount>
+ <coldCount>8</coldCount>
+ <padddedSizeOf>124</padddedSizeOf>
+ <allocCount>2</allocCount>
+ <maxAlloc>2</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:struct rpc_req</name>
+ <hotCount>0</hotCount>
+ <coldCount>8</coldCount>
+ <padddedSizeOf>2236</padddedSizeOf>
+ <allocCount>2</allocCount>
+ <maxAlloc>2</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:rpcsvc_request_t</name>
+ <hotCount>1</hotCount>
+ <coldCount>7</coldCount>
+ <padddedSizeOf>6372</padddedSizeOf>
+ <allocCount>1</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:data_t</name>
+ <hotCount>117</hotCount>
+ <coldCount>16266</coldCount>
+ <padddedSizeOf>52</padddedSizeOf>
+ <allocCount>180</allocCount>
+ <maxAlloc>121</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:data_pair_t</name>
+ <hotCount>138</hotCount>
+ <coldCount>16245</coldCount>
+ <padddedSizeOf>68</padddedSizeOf>
+ <allocCount>220</allocCount>
+ <maxAlloc>142</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:dict_t</name>
+ <hotCount>13</hotCount>
+ <coldCount>4083</coldCount>
+ <padddedSizeOf>84</padddedSizeOf>
+ <allocCount>25</allocCount>
+ <maxAlloc>15</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:call_stub_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>1024</coldCount>
+ <padddedSizeOf>1228</padddedSizeOf>
+ <allocCount>4</allocCount>
+ <maxAlloc>1</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:call_stack_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>1024</coldCount>
+ <padddedSizeOf>2084</padddedSizeOf>
+ <allocCount>6</allocCount>
+ <maxAlloc>2</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ <pool>
+ <name>glusterfs:call_frame_t</name>
+ <hotCount>0</hotCount>
+ <coldCount>4096</coldCount>
+ <padddedSizeOf>172</padddedSizeOf>
+ <allocCount>20</allocCount>
+ <maxAlloc>7</maxAlloc>
+ <poolMisses>0</poolMisses>
+ <maxStdAlloc>0</maxStdAlloc>
+ </pool>
+ </mempool>
+ </memStatus>
+ </node>
+ </volume>
+ </volumes>
+ </volStatus>
+</cliOutput>
+"""
+ ostatus = \
+ {'bricks': [{'brick': '192.168.122.2:/tmp/music-b1',
+ 'mallinfo': {'arena': '606208',
+ 'fordblks': '132000',
+ 'fsmblks': '64',
+ 'hblkhd': '15179776',
+ 'hblks': '12',
+ 'keepcost': '130224',
+ 'ordblks': '6',
+ 'smblks': '1',
+ 'uordblks': '474208',
+ 'usmblks': '0'},
+ 'mempool': [{'allocCount': '0',
+ 'coldCount': '1024',
+ 'hotCount': '0',
+ 'maxAlloc': '0',
+ 'maxStdAlloc': '0',
+ 'name': 'music-server:fd_t',
+ 'padddedSizeOf': '100',
+ 'poolMisses': '0'},
+ {'allocCount': '0',
+ 'coldCount': '16384',
+ 'hotCount': '0',
+ 'maxAlloc': '0',
+ 'maxStdAlloc': '0',
+ 'name': 'music-server:dentry_t',
+ 'padddedSizeOf': '84',
+ 'poolMisses': '0'},
+ {'allocCount': '1',
+ 'coldCount': '16383',
+ 'hotCount': '1',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'music-server:inode_t',
+ 'padddedSizeOf': '148',
+ 'poolMisses': '0'},
+ {'allocCount': '1',
+ 'coldCount': '32',
+ 'hotCount': '0',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'music-locks:pl_local_t',
+ 'padddedSizeOf': '140',
+ 'poolMisses': '0'},
+ {'allocCount': '0',
+ 'coldCount': '128',
+ 'hotCount': '0',
+ 'maxAlloc': '0',
+ 'maxStdAlloc': '0',
+ 'name': 'music-marker:marker_local_t',
+ 'padddedSizeOf': '316',
+ 'poolMisses': '0'},
+ {'allocCount': '10',
+ 'coldCount': '512',
+ 'hotCount': '0',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'music-server:rpcsvc_request_t',
+ 'padddedSizeOf': '6372',
+ 'poolMisses': '0'},
+ {'allocCount': '2',
+ 'coldCount': '8',
+ 'hotCount': '0',
+ 'maxAlloc': '2',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:struct saved_frame',
+ 'padddedSizeOf': '124',
+ 'poolMisses': '0'},
+ {'allocCount': '2',
+ 'coldCount': '8',
+ 'hotCount': '0',
+ 'maxAlloc': '2',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:struct rpc_req',
+ 'padddedSizeOf': '2236',
+ 'poolMisses': '0'},
+ {'allocCount': '1',
+ 'coldCount': '7',
+ 'hotCount': '1',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:rpcsvc_request_t',
+ 'padddedSizeOf': '6372',
+ 'poolMisses': '0'},
+ {'allocCount': '179',
+ 'coldCount': '16266',
+ 'hotCount': '117',
+ 'maxAlloc': '121',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:data_t',
+ 'padddedSizeOf': '52',
+ 'poolMisses': '0'},
+ {'allocCount': '218',
+ 'coldCount': '16245',
+ 'hotCount': '138',
+ 'maxAlloc': '142',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:data_pair_t',
+ 'padddedSizeOf': '68',
+ 'poolMisses': '0'},
+ {'allocCount': '24',
+ 'coldCount': '4083',
+ 'hotCount': '13',
+ 'maxAlloc': '15',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:dict_t',
+ 'padddedSizeOf': '84',
+ 'poolMisses': '0'},
+ {'allocCount': '2',
+ 'coldCount': '1024',
+ 'hotCount': '0',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:call_stub_t',
+ 'padddedSizeOf': '1228',
+ 'poolMisses': '0'},
+ {'allocCount': '4',
+ 'coldCount': '1024',
+ 'hotCount': '0',
+ 'maxAlloc': '2',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:call_stack_t',
+ 'padddedSizeOf': '2084',
+ 'poolMisses': '0'},
+ {'allocCount': '14',
+ 'coldCount': '4096',
+ 'hotCount': '0',
+ 'maxAlloc': '7',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:call_frame_t',
+ 'padddedSizeOf': '172',
+ 'poolMisses': '0'}]},
+ {'brick': '192.168.122.2:/tmp/music-b2',
+ 'mallinfo': {'arena': '606208',
+ 'fordblks': '131984',
+ 'fsmblks': '128',
+ 'hblkhd': '15179776',
+ 'hblks': '12',
+ 'keepcost': '130224',
+ 'ordblks': '5',
+ 'smblks': '2',
+ 'uordblks': '474224',
+ 'usmblks': '0'},
+ 'mempool': [{'allocCount': '0',
+ 'coldCount': '1024',
+ 'hotCount': '0',
+ 'maxAlloc': '0',
+ 'maxStdAlloc': '0',
+ 'name': 'music-server:fd_t',
+ 'padddedSizeOf': '100',
+ 'poolMisses': '0'},
+ {'allocCount': '0',
+ 'coldCount': '16384',
+ 'hotCount': '0',
+ 'maxAlloc': '0',
+ 'maxStdAlloc': '0',
+ 'name': 'music-server:dentry_t',
+ 'padddedSizeOf': '84',
+ 'poolMisses': '0'},
+ {'allocCount': '2',
+ 'coldCount': '16383',
+ 'hotCount': '1',
+ 'maxAlloc': '2',
+ 'maxStdAlloc': '0',
+ 'name': 'music-server:inode_t',
+ 'padddedSizeOf': '148',
+ 'poolMisses': '0'},
+ {'allocCount': '1',
+ 'coldCount': '32',
+ 'hotCount': '0',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'music-locks:pl_local_t',
+ 'padddedSizeOf': '140',
+ 'poolMisses': '0'},
+ {'allocCount': '0',
+ 'coldCount': '128',
+ 'hotCount': '0',
+ 'maxAlloc': '0',
+ 'maxStdAlloc': '0',
+ 'name': 'music-marker:marker_local_t',
+ 'padddedSizeOf': '316',
+ 'poolMisses': '0'},
+ {'allocCount': '12',
+ 'coldCount': '512',
+ 'hotCount': '0',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'music-server:rpcsvc_request_t',
+ 'padddedSizeOf': '6372',
+ 'poolMisses': '0'},
+ {'allocCount': '2',
+ 'coldCount': '8',
+ 'hotCount': '0',
+ 'maxAlloc': '2',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:struct saved_frame',
+ 'padddedSizeOf': '124',
+ 'poolMisses': '0'},
+ {'allocCount': '2',
+ 'coldCount': '8',
+ 'hotCount': '0',
+ 'maxAlloc': '2',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:struct rpc_req',
+ 'padddedSizeOf': '2236',
+ 'poolMisses': '0'},
+ {'allocCount': '1',
+ 'coldCount': '7',
+ 'hotCount': '1',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:rpcsvc_request_t',
+ 'padddedSizeOf': '6372',
+ 'poolMisses': '0'},
+ {'allocCount': '180',
+ 'coldCount': '16266',
+ 'hotCount': '117',
+ 'maxAlloc': '121',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:data_t',
+ 'padddedSizeOf': '52',
+ 'poolMisses': '0'},
+ {'allocCount': '220',
+ 'coldCount': '16245',
+ 'hotCount': '138',
+ 'maxAlloc': '142',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:data_pair_t',
+ 'padddedSizeOf': '68',
+ 'poolMisses': '0'},
+ {'allocCount': '25',
+ 'coldCount': '4083',
+ 'hotCount': '13',
+ 'maxAlloc': '15',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:dict_t',
+ 'padddedSizeOf': '84',
+ 'poolMisses': '0'},
+ {'allocCount': '4',
+ 'coldCount': '1024',
+ 'hotCount': '0',
+ 'maxAlloc': '1',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:call_stub_t',
+ 'padddedSizeOf': '1228',
+ 'poolMisses': '0'},
+ {'allocCount': '6',
+ 'coldCount': '1024',
+ 'hotCount': '0',
+ 'maxAlloc': '2',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:call_stack_t',
+ 'padddedSizeOf': '2084',
+ 'poolMisses': '0'},
+ {'allocCount': '20',
+ 'coldCount': '4096',
+ 'hotCount': '0',
+ 'maxAlloc': '7',
+ 'maxStdAlloc': '0',
+ 'name': 'glusterfs:call_frame_t',
+ 'padddedSizeOf': '172',
+ 'poolMisses': '0'}]}],
+ 'name': 'music'}
+ tree = etree.fromstring(out)
+ status = gcli._parseVolumeStatusMem(tree)
+ self.assertEquals(status, ostatus)
+
+ def test_parseVolumeStatus(self):
+ self._parseVolumeStatus_test()
+ self._parseVolumeStatusDetail_test()
+ self._parseVolumeStatusClients_test()
+ self._parseVolumeStatusMem_test()