summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanju Rakonde <srakonde@redhat.com>2018-06-06 01:39:32 +0530
committerNigel Babu <nigelb@redhat.com>2018-06-15 07:13:55 +0000
commit6583686527c62d88d678c9d4040eaa9fe728efeb (patch)
tree24d28cb8cf615343ead47aff355477e2e6e8460b
parent77dc34706ce4ee6aa410b8b6614dd125d955673b (diff)
glusto-tests/glusterd: gluster volume status with/without xml tag
In this test case, we will check gluster volume status and gluster volume status --xml from a node which is part of cluster but not having any bricks of volumes. Steps followed are: 1. Create a two node cluster 2. Create a distributed volume with one brick(Assume brick contains to N1) 3. From node which is not having any bricks i.e, N2 check gluster v status which should fail saying volume is not started. 4. From N2, check gluster v status --xml. It should fail because volume is not started yet. 5. Start the volume 6. From N2, check gluster v status, this should succeed. 7. From N2, check gluster v status --xml, this should succeed. Change-Id: I1a230b82c0628c66c16f25f89dd4e6d1d0b3f443 Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
-rw-r--r--tests/functional/glusterd/test_volume_status_xml.py112
1 files changed, 112 insertions, 0 deletions
diff --git a/tests/functional/glusterd/test_volume_status_xml.py b/tests/functional/glusterd/test_volume_status_xml.py
new file mode 100644
index 000000000..1f5475374
--- /dev/null
+++ b/tests/functional/glusterd/test_volume_status_xml.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.volume_ops import (volume_create, volume_status,
+ get_volume_status, volume_start)
+from glustolibs.gluster.lib_utils import form_bricks_list
+from glustolibs.gluster.peer_ops import (peer_probe_servers, peer_detach,
+ peer_detach_servers,
+ nodes_from_pool_list)
+
+
+@runs_on([['distributed'], ['glusterfs']])
+class TestVolumeStatusxml(GlusterBaseClass):
+
+ def setUp(self):
+
+ GlusterBaseClass.setUp.im_func(self)
+
+ # check whether peers are in connected state
+ ret = self.validate_peers_are_connected()
+ if not ret:
+ raise ExecutionError("Peers are not in connected state")
+
+ # detach all the nodes
+ ret = peer_detach_servers(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Peer detach failed to all the servers from "
+ "the node %s." % self.mnode)
+ g.log.info("Peer detach SUCCESSFUL.")
+
+ def tearDown(self):
+
+ # stopping and cleaning up the volume
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to Cleanup the Volume %s"
+ % self.volname)
+
+ pool = nodes_from_pool_list(self.mnode)
+ for node in pool:
+ peer_detach(self.mnode, node)
+
+ ret = peer_probe_servers(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Failed to probe detached "
+ "servers %s" % self.servers)
+ GlusterBaseClass.tearDown.im_func(self)
+
+ def test_volume_status_xml(self):
+
+ # create a two node cluster
+ ret = peer_probe_servers(self.servers[0], self.servers[1])
+ self.assertTrue(ret, "Peer probe failed to %s from %s"
+ % (self.mnode, self.servers[1]))
+
+ # create a distributed volume with single node
+ number_of_bricks = 1
+ servers_info_from_single_node = {}
+ servers_info_from_single_node[
+ self.servers[0]] = self.all_servers_info[self.servers[0]]
+
+ bricks_list = form_bricks_list(self.mnode, self.volname,
+ number_of_bricks, self.servers[0],
+ servers_info_from_single_node)
+ ret, _, _ = volume_create(self.servers[0], self.volname, bricks_list)
+ self.assertEqual(ret, 0, "Volume creation failed")
+ g.log.info("Volume %s created successfully", self.volname)
+
+ # Get volume status
+ ret, _, err = volume_status(self.servers[1], self.volname)
+ self.assertNotEqual(ret, 0, ("Unexpected: volume status is success for"
+ " %s, even though volume is not started "
+ "yet" % self.volname))
+ self.assertIn("is not started", err, ("volume status exited with"
+ " incorrect error message"))
+
+ # Get volume status with --xml
+ vol_status = get_volume_status(self.servers[1], self.volname)
+ self.assertIsNone(vol_status, ("Unexpected: volume status --xml for %s"
+ " is success even though the volume is"
+ " not stared yet" % self.volname))
+
+ # start the volume
+ ret, _, _ = volume_start(self.servers[1], self.volname)
+ self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname)
+
+ # Get volume status
+ ret, _, _ = volume_status(self.servers[1], self.volname)
+ self.assertEqual(ret, 0, ("Failed to get volume status for %s"
+ % self.volname))
+
+ # Get volume status with --xml
+ vol_status = get_volume_status(self.servers[1], self.volname)
+ self.assertIsNotNone(vol_status, ("Failed to get volume "
+ "status --xml for %s"
+ % self.volname))