# # Copyright 2014 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # from testrunner import GlusterNagiosTestCase as TestCaseBase from glusternagios import glustercli as gcli import xml.etree.cElementTree as etree import mock class GlusterCliTests(TestCaseBase): maxDiff = None def _parseVolumeInfo_empty_test(self): out = """ 0 0 """ tree = etree.fromstring(out) self.assertFalse(gcli._parseVolumeInfo(tree)) def _parseVolumeInfo_test(self): out = """ 0 0 music b3114c71-741b-4c6f-a39e-80384c4ea3cf 1 Started 2 2 1 2 2 Replicate 0 192.168.122.2:/tmp/m_b1192.168.122.2:/tmp/m_b1 883c7829-d808-4a92-a414-db87061097b3 192.168.122.2:/tmp/m_b2192.168.122.2:/tmp/m_b2 883c7829-d808-4a92-a414-db87061097b3 1 test1 b444ed94-f346-4cda-bd55-0282f21d22db 2 Stopped 1 1 1 1 0 Distribute 1 192.168.122.2:/tmp/t_b1192.168.122.2:/tmp/t_b1 883c7829-d808-4a92-a414-db87061097b3 0 2 """ tree = etree.fromstring(out) oVolumeInfo = \ {'music': {'brickCount': '2', 'bricks': ['192.168.122.2:/tmp/m_b1', '192.168.122.2:/tmp/m_b2'], 'distCount': '2', 'bricksInfo': [{ 'name': '192.168.122.2:/tmp/m_b1', 'hostUuid': '883c7829-d808-4a92-a414-db87061097b3' }, { 'name': '192.168.122.2:/tmp/m_b2', 'hostUuid': '883c7829-d808-4a92-a414-db87061097b3' }], 'options': {'auth.allow': '*'}, 'replicaCount': '2', 'stripeCount': '1', 'transportType': [gcli.TransportType.TCP], 'uuid': 'b3114c71-741b-4c6f-a39e-80384c4ea3cf', 'volumeName': 'music', 'volumeStatus': gcli.VolumeStatus.ONLINE, 'volumeType': 'REPLICATE'}, 'test1': {'brickCount': '1', 'bricks': ['192.168.122.2:/tmp/t_b1'], 'distCount': '1', 'bricksInfo': [{ 'name': '192.168.122.2:/tmp/t_b1', 'hostUuid': '883c7829-d808-4a92-a414-db87061097b3' }], 'options': {}, 'replicaCount': '1', 'stripeCount': '1', 'transportType': [gcli.TransportType.RDMA], 'uuid': 'b444ed94-f346-4cda-bd55-0282f21d22db', 'volumeName': 'test1', 'volumeStatus': gcli.VolumeStatus.OFFLINE, 'volumeType': 'DISTRIBUTE'}} volumeInfo = gcli._parseVolumeInfo(tree) print volumeInfo print oVolumeInfo self.assertEquals(volumeInfo, oVolumeInfo) def test_parseVolumeInfo(self): self._parseVolumeInfo_empty_test() self._parseVolumeInfo_test() def _parsePeerStatus_empty_test(self): out = """ 0 0 No peers present """ tree = etree.fromstring(out) hostList = \ gcli._parsePeerStatus(tree, 'fedora-16-test', '711d2887-3222-46d8-801a-7e3f646bdd4d', gcli.HostStatus.CONNECTED) self.assertEquals(hostList, [{'hostname': 'fedora-16-test', 'uuid': '711d2887-3222-46d8-801a-7e3f646bdd4d', 'status': gcli.HostStatus.CONNECTED}]) def _parsePeerStatus_test(self): out = """ 0 0 610f466c-781a-4e04-8f67-8eba9a201867 192.168.2.21 1 3 Peer in Cluster 12345678-781a-aaaa-bbbb-8eba9a201867 FC16-1 0 3 Peer in Cluster 12345678-cccc-aaaa-bbbb-8eba9a201867 FC16-2 1 2 Peer rejected """ tree = etree.fromstring(out) hostList = \ gcli._parsePeerStatus(tree, 'fedora-16-test', '711d2887-3222-46d8-801a-7e3f646bdd4d', gcli.HostStatus.CONNECTED) self.assertEquals(hostList, [{'hostname': 'fedora-16-test', 'uuid': '711d2887-3222-46d8-801a-7e3f646bdd4d', 'status': gcli.HostStatus.CONNECTED}, {'hostname': '192.168.2.21', 'uuid': '610f466c-781a-4e04-8f67-8eba9a201867', 'status': gcli.HostStatus.CONNECTED}, {'hostname': 'FC16-1', 'uuid': '12345678-781a-aaaa-bbbb-8eba9a201867', 'status': gcli.HostStatus.DISCONNECTED}, {'hostname': 'FC16-2', 'uuid': '12345678-cccc-aaaa-bbbb-8eba9a201867', 'status': gcli.HostStatus.UNKNOWN}]) def test_parsePeerStatus(self): self._parsePeerStatus_empty_test() self._parsePeerStatus_test() def _parseVolumeStatus_test(self): out = """ 0 0 music 4 192.168.122.2 /tmp/music-b1 f06b108e-a780-4519-bb22-c3083a1e3f8a 49152 1 1313 192.168.122.2 /tmp/music-b2 f06b108e-a780-4519-bb22-c3083a1e3f8a 49153 1 1335 NFS Server 192.168.122.2 f06b108e-a780-4519-bb22-c3083a1e3f8a 38467 1 1357 Self-heal Daemon 192.168.122.2 f06b108e-a780-4519-bb22-c3083a1e3f8a 0 1 1375 """ tree = etree.fromstring(out) status = gcli._parseVolumeStatus(tree) self.assertEquals(status, {'bricks': [{'brick': '192.168.122.2:/tmp/music-b1', 'pid': '1313', 'port': '49152', 'status': 'ONLINE'}, {'brick': '192.168.122.2:/tmp/music-b2', 'pid': '1335', 'port': '49153', 'status': 'ONLINE'}], 'name': 'music', 'nfs': [{'hostname': '192.168.122.2', 'pid': '1357', 'port': '38467', 'status': 'ONLINE'}], 'shd': [{'hostname': '192.168.122.2', 'pid': '1375', 'status': 'ONLINE'}]}) def _parseVolumeStatusDetail_test(self): out = """ 0 0 music 2 192.168.122.2 /tmp/music-b1 f06b108e-a780-4519-bb22-c3083a1e3f8a 49152 1 1313 8370712576 4478812160 /dev/vda1 4096 rw,seclabel,relatime,data=ordered ext4 192.168.122.2 /tmp/music-b2 f06b108e-a780-4519-bb22-c3083a1e3f8a 49153 1 1335 8370712576 4478812160 /dev/vda1 4096 rw,seclabel,relatime,data=ordered ext4 """ tree = etree.fromstring(out) oStatus = \ {'bricks': [{'blockSize': '4096', 'brick': '192.168.122.2:/tmp/music-b1', 'device': '/dev/vda1', 'fsName': 'ext4', 'mntOptions': 'rw,seclabel,relatime,data=ordered', 'sizeFree': '4271.328', 'sizeTotal': '7982.934'}, {'blockSize': '4096', 'brick': '192.168.122.2:/tmp/music-b2', 'device': '/dev/vda1', 'fsName': 'ext4', 'mntOptions': 'rw,seclabel,relatime,data=ordered', 'sizeFree': '4271.328', 'sizeTotal': '7982.934'}], 'name': 'music'} status = gcli._parseVolumeStatusDetail(tree) self.assertEquals(status, oStatus) def _parseVolumeStatusClients_test(self): out = """ 0 0 music 2 192.168.122.2 /tmp/music-b1 49152 1 1313 2 192.168.122.2:1021 1172 792 192.168.122.2:1011 10076 12152 192.168.122.2 /tmp/music-b2 49153 1 1335 2 192.168.122.2:1020 1172 792 192.168.122.2:1010 10864 12816 """ tree = etree.fromstring(out) status = gcli._parseVolumeStatusClients(tree) self.assertEquals(status.keys(), ['bricks', 'name']) self.assertEquals(status['name'], 'music') oBricks = [{'brick': '192.168.122.2:/tmp/music-b1', 'clientsStatus': [{'bytesRead': '1172', 'bytesWrite': '792', 'hostname': '192.168.122.2:1021'}, {'bytesRead': '10076', 'bytesWrite': '12152', 'hostname': '192.168.122.2:1011'}]}, {'brick': '192.168.122.2:/tmp/music-b2', 'clientsStatus': [{'bytesRead': '1172', 'bytesWrite': '792', 'hostname': '192.168.122.2:1020'}, {'bytesRead': '10864', 'bytesWrite': '12816', 'hostname': '192.168.122.2:1010'}]}] self.assertEquals(status['bricks'], oBricks) def _parseVolumeStatusMem_test(self): out = """ 0 0 music 2 192.168.122.2 /tmp/music-b1 f06b108e-a780-4519-bb22-c3083a1e3f8a 49152 1 1452 606208 6 1 12 15179776 0 64 474208 132000 130224 15 music-server:fd_t 0 1024 100 0 0 0 0 music-server:dentry_t 0 16384 84 0 0 0 0 music-server:inode_t 1 16383 148 1 1 0 0 music-locks:pl_local_t 0 32 140 1 1 0 0 music-marker:marker_local_t 0 128 316 0 0 0 0 music-server:rpcsvc_request_t 0 512 6372 10 1 0 0 glusterfs:struct saved_frame 0 8 124 2 2 0 0 glusterfs:struct rpc_req 0 8 2236 2 2 0 0 glusterfs:rpcsvc_request_t 1 7 6372 1 1 0 0 glusterfs:data_t 117 16266 52 179 121 0 0 glusterfs:data_pair_t 138 16245 68 218 142 0 0 glusterfs:dict_t 13 4083 84 24 15 0 0 glusterfs:call_stub_t 0 1024 1228 2 1 0 0 glusterfs:call_stack_t 0 1024 2084 4 2 0 0 glusterfs:call_frame_t 0 4096 172 14 7 0 0 192.168.122.2 /tmp/music-b2 f06b108e-a780-4519-bb22-c3083a1e3f8a 49153 1 1459 606208 5 2 12 15179776 0 128 474224 131984 130224 15 music-server:fd_t 0 1024 100 0 0 0 0 music-server:dentry_t 0 16384 84 0 0 0 0 music-server:inode_t 1 16383 148 2 2 0 0 music-locks:pl_local_t 0 32 140 1 1 0 0 music-marker:marker_local_t 0 128 316 0 0 0 0 music-server:rpcsvc_request_t 0 512 6372 12 1 0 0 glusterfs:struct saved_frame 0 8 124 2 2 0 0 glusterfs:struct rpc_req 0 8 2236 2 2 0 0 glusterfs:rpcsvc_request_t 1 7 6372 1 1 0 0 glusterfs:data_t 117 16266 52 180 121 0 0 glusterfs:data_pair_t 138 16245 68 220 142 0 0 glusterfs:dict_t 13 4083 84 25 15 0 0 glusterfs:call_stub_t 0 1024 1228 4 1 0 0 glusterfs:call_stack_t 0 1024 2084 6 2 0 0 glusterfs:call_frame_t 0 4096 172 20 7 0 0 """ ostatus = \ {'bricks': [{'brick': '192.168.122.2:/tmp/music-b1', 'mallinfo': {'arena': '606208', 'fordblks': '132000', 'fsmblks': '64', 'hblkhd': '15179776', 'hblks': '12', 'keepcost': '130224', 'ordblks': '6', 'smblks': '1', 'uordblks': '474208', 'usmblks': '0'}, 'mempool': [{'allocCount': '0', 'coldCount': '1024', 'hotCount': '0', 'maxAlloc': '0', 'maxStdAlloc': '0', 'name': 'music-server:fd_t', 'padddedSizeOf': '100', 'poolMisses': '0'}, {'allocCount': '0', 'coldCount': '16384', 'hotCount': '0', 'maxAlloc': '0', 'maxStdAlloc': '0', 'name': 'music-server:dentry_t', 'padddedSizeOf': '84', 'poolMisses': '0'}, {'allocCount': '1', 'coldCount': '16383', 'hotCount': '1', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'music-server:inode_t', 'padddedSizeOf': '148', 'poolMisses': '0'}, {'allocCount': '1', 'coldCount': '32', 'hotCount': '0', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'music-locks:pl_local_t', 'padddedSizeOf': '140', 'poolMisses': '0'}, {'allocCount': '0', 'coldCount': '128', 'hotCount': '0', 'maxAlloc': '0', 'maxStdAlloc': '0', 'name': 'music-marker:marker_local_t', 'padddedSizeOf': '316', 'poolMisses': '0'}, {'allocCount': '10', 'coldCount': '512', 'hotCount': '0', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'music-server:rpcsvc_request_t', 'padddedSizeOf': '6372', 'poolMisses': '0'}, {'allocCount': '2', 'coldCount': '8', 'hotCount': '0', 'maxAlloc': '2', 'maxStdAlloc': '0', 'name': 'glusterfs:struct saved_frame', 'padddedSizeOf': '124', 'poolMisses': '0'}, {'allocCount': '2', 'coldCount': '8', 'hotCount': '0', 'maxAlloc': '2', 'maxStdAlloc': '0', 'name': 'glusterfs:struct rpc_req', 'padddedSizeOf': '2236', 'poolMisses': '0'}, {'allocCount': '1', 'coldCount': '7', 'hotCount': '1', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'glusterfs:rpcsvc_request_t', 'padddedSizeOf': '6372', 'poolMisses': '0'}, {'allocCount': '179', 'coldCount': '16266', 'hotCount': '117', 'maxAlloc': '121', 'maxStdAlloc': '0', 'name': 'glusterfs:data_t', 'padddedSizeOf': '52', 'poolMisses': '0'}, {'allocCount': '218', 'coldCount': '16245', 'hotCount': '138', 'maxAlloc': '142', 'maxStdAlloc': '0', 'name': 'glusterfs:data_pair_t', 'padddedSizeOf': '68', 'poolMisses': '0'}, {'allocCount': '24', 'coldCount': '4083', 'hotCount': '13', 'maxAlloc': '15', 'maxStdAlloc': '0', 'name': 'glusterfs:dict_t', 'padddedSizeOf': '84', 'poolMisses': '0'}, {'allocCount': '2', 'coldCount': '1024', 'hotCount': '0', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'glusterfs:call_stub_t', 'padddedSizeOf': '1228', 'poolMisses': '0'}, {'allocCount': '4', 'coldCount': '1024', 'hotCount': '0', 'maxAlloc': '2', 'maxStdAlloc': '0', 'name': 'glusterfs:call_stack_t', 'padddedSizeOf': '2084', 'poolMisses': '0'}, {'allocCount': '14', 'coldCount': '4096', 'hotCount': '0', 'maxAlloc': '7', 'maxStdAlloc': '0', 'name': 'glusterfs:call_frame_t', 'padddedSizeOf': '172', 'poolMisses': '0'}]}, {'brick': '192.168.122.2:/tmp/music-b2', 'mallinfo': {'arena': '606208', 'fordblks': '131984', 'fsmblks': '128', 'hblkhd': '15179776', 'hblks': '12', 'keepcost': '130224', 'ordblks': '5', 'smblks': '2', 'uordblks': '474224', 'usmblks': '0'}, 'mempool': [{'allocCount': '0', 'coldCount': '1024', 'hotCount': '0', 'maxAlloc': '0', 'maxStdAlloc': '0', 'name': 'music-server:fd_t', 'padddedSizeOf': '100', 'poolMisses': '0'}, {'allocCount': '0', 'coldCount': '16384', 'hotCount': '0', 'maxAlloc': '0', 'maxStdAlloc': '0', 'name': 'music-server:dentry_t', 'padddedSizeOf': '84', 'poolMisses': '0'}, {'allocCount': '2', 'coldCount': '16383', 'hotCount': '1', 'maxAlloc': '2', 'maxStdAlloc': '0', 'name': 'music-server:inode_t', 'padddedSizeOf': '148', 'poolMisses': '0'}, {'allocCount': '1', 'coldCount': '32', 'hotCount': '0', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'music-locks:pl_local_t', 'padddedSizeOf': '140', 'poolMisses': '0'}, {'allocCount': '0', 'coldCount': '128', 'hotCount': '0', 'maxAlloc': '0', 'maxStdAlloc': '0', 'name': 'music-marker:marker_local_t', 'padddedSizeOf': '316', 'poolMisses': '0'}, {'allocCount': '12', 'coldCount': '512', 'hotCount': '0', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'music-server:rpcsvc_request_t', 'padddedSizeOf': '6372', 'poolMisses': '0'}, {'allocCount': '2', 'coldCount': '8', 'hotCount': '0', 'maxAlloc': '2', 'maxStdAlloc': '0', 'name': 'glusterfs:struct saved_frame', 'padddedSizeOf': '124', 'poolMisses': '0'}, {'allocCount': '2', 'coldCount': '8', 'hotCount': '0', 'maxAlloc': '2', 'maxStdAlloc': '0', 'name': 'glusterfs:struct rpc_req', 'padddedSizeOf': '2236', 'poolMisses': '0'}, {'allocCount': '1', 'coldCount': '7', 'hotCount': '1', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'glusterfs:rpcsvc_request_t', 'padddedSizeOf': '6372', 'poolMisses': '0'}, {'allocCount': '180', 'coldCount': '16266', 'hotCount': '117', 'maxAlloc': '121', 'maxStdAlloc': '0', 'name': 'glusterfs:data_t', 'padddedSizeOf': '52', 'poolMisses': '0'}, {'allocCount': '220', 'coldCount': '16245', 'hotCount': '138', 'maxAlloc': '142', 'maxStdAlloc': '0', 'name': 'glusterfs:data_pair_t', 'padddedSizeOf': '68', 'poolMisses': '0'}, {'allocCount': '25', 'coldCount': '4083', 'hotCount': '13', 'maxAlloc': '15', 'maxStdAlloc': '0', 'name': 'glusterfs:dict_t', 'padddedSizeOf': '84', 'poolMisses': '0'}, {'allocCount': '4', 'coldCount': '1024', 'hotCount': '0', 'maxAlloc': '1', 'maxStdAlloc': '0', 'name': 'glusterfs:call_stub_t', 'padddedSizeOf': '1228', 'poolMisses': '0'}, {'allocCount': '6', 'coldCount': '1024', 'hotCount': '0', 'maxAlloc': '2', 'maxStdAlloc': '0', 'name': 'glusterfs:call_stack_t', 'padddedSizeOf': '2084', 'poolMisses': '0'}, {'allocCount': '20', 'coldCount': '4096', 'hotCount': '0', 'maxAlloc': '7', 'maxStdAlloc': '0', 'name': 'glusterfs:call_frame_t', 'padddedSizeOf': '172', 'poolMisses': '0'}]}], 'name': 'music'} tree = etree.fromstring(out) status = gcli._parseVolumeStatusMem(tree) self.assertEquals(status, ostatus) def test_parseVolumeStatus(self): self._parseVolumeStatus_test() self._parseVolumeStatusDetail_test() self._parseVolumeStatusClients_test() self._parseVolumeStatusMem_test() @mock.patch('glusternagios.utils.execCmd') @mock.patch('glusternagios.glustercli._getGlusterVolCmd') def test_parseVolumeQuotaStatus(self, mock_glusterVolCmd, mock_execCmd,): mock_glusterVolCmd.return_value = ["gluster", "volume"] mock_execCmd.return_value = 0, ["quota command failed : " "Quota is not enabled on " "volume demo-test-vol"], None status = gcli.volumeQuotaStatus("test-vol") exp_disabled_out = {'status': gcli.VolumeQuotaStatus.DISABLED, 'hard_ex_dirs': [], 'soft_ex_dirs': []} self.assertEquals(status, exp_disabled_out) mock_execCmd.return_value = 0, ["quota: No quota " "configured on " "volume demo-test-vol"], None status = gcli.volumeQuotaStatus("test-vol") self.assertEquals(status, exp_disabled_out) mock_execCmd.return_value = 0, self.__getQuotaOut(), None status = gcli.volumeQuotaStatus("test-vol") exp_disabled_out = \ {'status': gcli.VolumeQuotaStatus.HARD_LIMIT_EXCEEDED, 'hard_ex_dirs': ['/test/rewe'], 'soft_ex_dirs': ['/test']} self.assertEquals(status, exp_disabled_out) @mock.patch('glusternagios.utils.execCmd') @mock.patch('glusternagios.glustercli._getGlusterVolCmd') def test_parseVolumeQuotaStatusWhenException(self, mock_glusterVolCmd, mock_execCmd,): mock_glusterVolCmd.return_value = ["gluster", "volume"] mock_execCmd.return_value = -1, None, "err" try: gcli.volumeQuotaStatus("test-vol") assert False except gcli.GlusterCmdFailedException: assert True @mock.patch('glusternagios.utils.execCmd') @mock.patch('glusternagios.glustercli._getGlusterVolCmd') def test_getVolumeHealSplitBrainStatusNonRepl(self, mock_glusterVolCmd, mock_execCmd,): mock_glusterVolCmd.return_value = ["gluster", "volume"] mock_execCmd.return_value = 2, None, ["Volume test-vol is not " "of type replicate"] expectedOut = {'test-vol': {'status': gcli.VolumeSplitBrainStatus.NOTAPPLICABLE, 'unsyncedentries': 0}} status = gcli.volumeHealSplitBrainStatus("test-vol") print(status) self.assertEquals(status, expectedOut) @mock.patch('glusternagios.utils.execCmd') @mock.patch('glusternagios.glustercli._getGlusterVolCmd') def test_getVolumeHealSplitBrainStatus(self, mock_glusterVolCmd, mock_execCmd,): mock_glusterVolCmd.return_value = ["gluster", "volume"] mock_execCmd.return_value = (0, self.__getGlusterSelfHealInfoResult(), None) expectedOut = {'test-vol': {'status': gcli.VolumeSplitBrainStatus.SPLITBRAIN, 'unsyncedentries': 10}} status = gcli.volumeHealSplitBrainStatus("test-vol") print(status) self.assertEquals(status, expectedOut) @mock.patch('glusternagios.utils.execCmd') @mock.patch('glusternagios.glustercli._getGlusterVolCmd') @mock.patch('glusternagios.glustercli.volumeInfo') def test_getVolumeGeoRepStatus(self, mock_volumeInfo, mock_glusterVolCmd, mock_execCmd,): mock_glusterVolCmd.return_value = ["gluster", "volume"] mock_execCmd.return_value = (0, self.__getGlusterGeoRepStatusResult(), None) mock_volumeInfo.return_value = {'test-vol': {'volumeType': 'REPLICATE', 'replicaCount': 2, 'brickCount': 2 } } expectedOut = {'test-vol': {'slaves': {'10.70.43.68::slave-vol': {'faulty': 2, 'nodecount': 2, 'notstarted': 0, 'stopped': 0, 'passive': 0, 'detail': 'rhs3.novalocal:' '/bricks/b3 - FAULTY;' 'rhs3-2.novalocal:' '/bricks/b3 - FAULTY;', 'status': gcli.GeoRepStatus.FAULTY} }}} status = gcli.volumeGeoRepStatus("test-vol") print(status) self.assertEquals(status, expectedOut) @mock.patch('glusternagios.utils.execCmd') @mock.patch('glusternagios.glustercli._getGlusterVolCmd') @mock.patch('glusternagios.glustercli.volumeInfo') def test_getVolumeGeoRepStatusMultiSlave(self, mock_volumeInfo, mock_glusterVolCmd, mock_execCmd,): mock_glusterVolCmd.return_value = ["gluster", "volume"] mock_execCmd.return_value = (0, self.__getGlusterGeoRepStatusResult2(), None) mock_volumeInfo.return_value = {'test-vol': {'volumeType': 'REPLICATE', 'replicaCount': 2, 'brickCount': 2 } } expectedOut = {'test-vol': {'slaves': {'10.70.43.68::slave-vol': {'faulty': 1, 'nodecount': 2, 'notstarted': 0, 'stopped': 0, 'passive': 1, 'detail': 'rhs3.novalocal:/bricks/b3 ' '- PASSIVE;' 'rhs3-2.novalocal:/bricks/b3 ' '- FAULTY;', 'status': gcli.GeoRepStatus.FAULTY}, '10.70.43.68::slave-vol2': {'faulty': 0, 'nodecount': 2, 'notstarted': 2, 'stopped': 0, 'passive': 0, 'detail': 'rhs3.novalocal:/bricks/b3 ' '- NOT_STARTED;' 'rhs3-2.novalocal:/bricks/b3 ' '- NOT_STARTED;', 'status': gcli.GeoRepStatus.NOT_STARTED} }}} status = gcli.volumeGeoRepStatus("test-vol") print(status) self.assertEquals(status, expectedOut) mock_execCmd.return_value = (0, self.__getGlusterGeoRepStatusResult3(), None) expectedOut = {'test-vol': {'slaves': {'10.70.43.68::slave-vol': {'faulty': 0, 'nodecount': 2, 'notstarted': 1, 'stopped': 0, 'passive': 1, 'detail': 'rhs3.novalocal:/bricks/b3 ' '- PASSIVE;' 'rhs3-2.novalocal:' '/bricks/b3 ' '- NOT_STARTED;', 'status': gcli.GeoRepStatus.NOT_STARTED }}}} status = gcli.volumeGeoRepStatus("test-vol") print(status) self.assertEquals(status, expectedOut) def __getQuotaOut(self): return \ [" Path Hard-limit Soft-limit" " Used Available Soft-limit exceeded? Hard-limit exceeded?", "-------------------------------------------------------------" "--------------------------------------------------------------", "/test 200.0KB 80% " " 200.0KB 0Bytes Yes No", "/test/rewe 200.0KB 80% " "200.0KB 0Bytes Yes Yes"] def __getGlusterSelfHealInfoResult(self): return ["Gathering list of entries to be healed " "on volume rep has been successful", "", "Brick node2:/bricks/b3", "Status: Brick is Not connected", "Number of entries: 0" "", "Brick node1:/bricks/b3", "Number of entries: 10", "/dir.7/file.5", "/dir.8/file.3", "/dir.9/file.5", "/dir.2/file.4", "/dir.9/file.4", "/dir.4/file.1", "/file.4", "/dir.7/file.2", "/dir.10/file.2", "/dir.7/file.4"] def __getGlusterGeoRepStatusResult(self): return [" ", "MASTER NODE MASTER VOL MASTER BRICK " "SLAVE STATUS CHECKPOINT STATUS " "CRAWL STATUS", "--------------------------------------------------------" "--------------------------------------------------------" "----------------", "rhs3.novalocal rep /bricks/b3 " "10.70.43.68::slave-vol faulty " "N/A N/A", "rhs3-2.novalocal rep /bricks/b3 " "10.70.43.68::slave-vol faulty " "N/A N/A "] def __getGlusterGeoRepStatusResult2(self): return [" ", "MASTER NODE MASTER VOL MASTER BRICK " "SLAVE STATUS CHECKPOINT STATUS " "CRAWL STATUS", "--------------------------------------------------------" "--------------------------------------------------------" "----------------", "rhs3.novalocal rep /bricks/b3 " "10.70.43.68::slave-vol Passive " "N/A N/A", "rhs3-2.novalocal rep /bricks/b3 " "10.70.43.68::slave-vol faulty " "N/A N/A ", "rhs3.novalocal rep /bricks/b3 " "10.70.43.68::slave-vol2 Not Started " "N/A N/A", "rhs3-2.novalocal rep /bricks/b3 " "10.70.43.68::slave-vol2 Not Started " "N/A N/A "] def __getGlusterGeoRepStatusResult3(self): return [" ", "MASTER NODE MASTER VOL MASTER BRICK " "SLAVE STATUS CHECKPOINT STATUS " "CRAWL STATUS", "--------------------------------------------------------" "--------------------------------------------------------" "----------------", "rhs3.novalocal rep /bricks/b3 " "10.70.43.68::slave-vol Passive " "N/A N/A", "rhs3-2.novalocal rep /bricks/b3 " "10.70.43.68::slave-vol Not Started " "N/A N/A "]