summaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/test_baseclass_variables.py69
-rw-r--r--examples/test_glusterd_ops.py112
-rw-r--r--examples/test_peer_ops.py329
-rw-r--r--examples/tests_using_baseclass.py72
4 files changed, 582 insertions, 0 deletions
diff --git a/examples/test_baseclass_variables.py b/examples/test_baseclass_variables.py
new file mode 100644
index 0000000..86488d6
--- /dev/null
+++ b/examples/test_baseclass_variables.py
@@ -0,0 +1,69 @@
+""" This Module demostrates how to use functions available in volume_ops
+"""
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass)
+
+
+class DemoGlusyerBaseClassVariables(GlusterBaseClass):
+ """Demonstrating all the functions available in volume_ops module
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ """
+ # Read all the cluster config from the g.config and assign it to
+ # class variables
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Servers (list)
+ g.log.info("Servers:\n %s\n\n", cls.servers)
+
+ # Clients (list)
+ g.log.info("Clients:\n %s\n\n", cls.clients)
+
+ # Servers Info
+ g.log.info("Servers Info:\n %s\n\n", cls.all_servers_info)
+
+ # Clients Info
+ g.log.info("Clients Info:\n %s\n\n", cls.all_clients_info)
+
+ # Server IP's
+ g.log.info("Servers IP's:\n %s\n\n", cls.servers_ips)
+
+ # Volume type
+ g.log.info("Volume Type: %s\n\n", cls.volume_type)
+
+ # Mount type
+ g.log.info("Mount Type: %s\n\n", cls.mount_type)
+
+ # SMB Cluster info
+ g.log.info("SMB Users Info:\n %s\n\n", cls.smb_users_info)
+
+ # NFS-Ganesha Cluster info
+ g.log.info("NFS-Ganesha Number of Nodes:\n %s\n\n",
+ cls.num_of_nfs_ganesha_nodes)
+
+ # Default volume_types configuration
+ g.log.info("Default volume_types configuration:\n %s\n\n",
+ cls.default_volume_type_config)
+
+ # Volume configuration
+ g.log.info("Volume configuration:\n %s\n\n", cls.volume)
+
+ # Volume options
+ g.log.info("Default Volume Options:\n %s\n\n", cls.volume_options)
+
+ # Mnode
+ g.log.info("Mnode: %s\n\n", cls.mnode)
+
+ # Mounts
+ g.log.info("Mounts:\n %s\n\n", cls.mounts)
+
+ # Gluster log dirs
+ g.log.info("Gluster Log dirs:\n%s\n\n", cls.server_gluster_logs_dirs)
+
+ # Gluster Log files
+ g.log.info("Gluster Log files:\n%s\n\n", cls.client_gluster_logs_files)
+
+ def test1(self):
+ pass
diff --git a/examples/test_glusterd_ops.py b/examples/test_glusterd_ops.py
new file mode 100644
index 0000000..39cad71
--- /dev/null
+++ b/examples/test_glusterd_ops.py
@@ -0,0 +1,112 @@
+""" This Module demostrates how to use functions available in gluster_init
+ module
+"""
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.gluster_init import (
+ is_glusterd_running, restart_glusterd, start_glusterd, stop_glusterd)
+
+
+class DemoGlusterInitClass(GlusterBaseClass):
+ """Demonstrating all the functions available in gluster_init module
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ """
+ # Read all the cluster config from the g.config and assign it to
+ # class variables
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ def setUp(self):
+ """setUp required for tests
+ """
+ # Calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ # Check if glusterd is running on all servers(expected: active)
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: active)", self.servers)
+ ret = is_glusterd_running(self.servers)
+ if ret == 0:
+ g.log.info("Glusterd is running on all servers %s", self.servers)
+ elif ret == 1:
+ g.log.info("Glusterd is not running on all the servers %s",
+ self.servers)
+ elif ret == -1:
+ g.log.info("Glusterd is not running on all the servers %s. "
+ "PID is alive", self.servers)
+
+ def test_glusterd_services(self):
+ """Test restart, stop, start of glusterd
+ """
+ # restart glusterd on all servers
+ g.log.info("Restart glusterd on all servers %s", self.servers)
+ ret = restart_glusterd(self.servers)
+ self.assertTrue(ret, ("Failed to restart glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully restarted glusterd on all servers %s",
+ self.servers)
+
+ # Check if glusterd is running on all servers(expected: active)
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: active)", self.servers)
+ ret = is_glusterd_running(self.servers)
+ self.assertEqual(ret, 0, ("Glusterd is not running on all servers %s",
+ self.servers))
+ g.log.info("Glusterd is running on all the servers %s", self.servers)
+
+ # Stop glusterd on all servers
+ g.log.info("Stop glusterd on all servers %s", self.servers)
+ ret = stop_glusterd(self.servers)
+ self.assertTrue(ret, ("Failed to stop glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully stopped glusterd on all servers %s",
+ self.servers)
+
+ # Check if glusterd is running on all servers(expected: not running)
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: not running)", self.servers)
+ ret = is_glusterd_running(self.servers)
+ self.assertNotEqual(ret, 0, ("Glusterd is still running on some "
+ "servers %s", self.servers))
+ g.log.info("Glusterd not running on any servers %s as expected.",
+ self.servers)
+
+ # Start glusterd on all servers
+ g.log.info("Start glusterd on all servers %s", self.servers)
+ ret = start_glusterd(self.servers)
+ self.assertTrue(ret, ("Failed to start glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully started glusterd on all servers %s",
+ self.servers)
+
+ # Check if glusterd is running on all servers(expected: active)
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: active)", self.servers)
+ ret = is_glusterd_running(self.servers)
+ self.assertEqual(ret, 0, ("Glusterd is not running on all servers %s",
+ self.servers))
+ g.log.info("Glusterd is running on all the servers %s", self.servers)
+
+ def tearDown(self):
+ """restart glusterd on all servers during teardown
+ """
+ # restart glusterd on all servers
+ g.log.info("Restart glusterd on all servers %s", self.servers)
+ ret = restart_glusterd(self.servers)
+ if not ret:
+ raise ExecutionError("Failed to restart glusterd on all "
+ "servers %s", self.servers)
+ g.log.info("Successfully restarted glusterd on all servers %s",
+ self.servers)
+
+ # Calling GlusterBaseClass tearDown
+ GlusterBaseClass.tearDown.im_func(self)
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ """
+ GlusterBaseClass.tearDownClass.im_func(cls)
diff --git a/examples/test_peer_ops.py b/examples/test_peer_ops.py
new file mode 100644
index 0000000..12f3677
--- /dev/null
+++ b/examples/test_peer_ops.py
@@ -0,0 +1,329 @@
+""" This Module demostrates how to use functions available in peer_ops module
+"""
+
+import socket
+import random
+import re
+from glusto.core import Glusto as g
+from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
+from glustolibs.gluster.peer_ops import (
+ pool_list, peer_probe, peer_status, peer_probe_servers,
+ nodes_from_pool_list, is_peer_connected, peer_detach, peer_detach_servers,
+ get_peer_status, get_pool_list)
+
+
+class DemoPeerOpsClass(GlusterBaseClass):
+ """Demonstrating all the functions available in peer_ops module
+ """
+ @classmethod
+ def setUpClass(cls):
+ """
+ """
+ # Read all the cluster config from the g.config and assign it to
+ # class variables
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Detach all the servers if it's already attached to the cluster
+ nodes_in_pool_list = nodes_from_pool_list(cls.mnode)
+ if nodes_in_pool_list is None:
+ g.log.error("Unable to get nodes from gluster pool list "
+ "from node %s", cls.mnode)
+ else:
+ g.log.info("Nodes in pool: %s", nodes_in_pool_list)
+
+ if nodes_in_pool_list:
+ if cls.mnode in nodes_in_pool_list:
+ nodes_in_pool_list.remove(cls.mnode)
+ g.log.info("Detaching servers '%s' from the cluster from node %s",
+ nodes_in_pool_list, cls.mnode)
+ ret = peer_detach_servers(cls.mnode, nodes_in_pool_list)
+ if not ret:
+ raise ExecutionError("Failed to detach some or all "
+ "servers %s from the cluster "
+ "from node %s", nodes_in_pool_list,
+ cls.mnode)
+ g.log.info("Successfully detached all servers '%s' "
+ "from the cluster from node %s",
+ nodes_in_pool_list, cls.mnode)
+
+ # Get pool list from mnode
+ g.log.info("Pool list on node %s", cls.mnode)
+ ret, out, err = pool_list(cls.mnode)
+ if ret != 0:
+ raise ExecutionError("Failed to get pool list on node %s: %s",
+ cls.mnode, err)
+ g.log.info("Successfully got pool list on node %s:\n%s", cls.mnode,
+ out)
+
+ # Get peer status output from all servers
+ for server in cls.servers:
+ g.log.info("Peer status on node %s", server)
+ ret, out, err = peer_status(server)
+ if ret != 0:
+ raise ExecutionError("Failed to get peer status on node %s: "
+ "%s", server, err)
+ g.log.info("Successfully got peer status on node %s:\n%s",
+ server, out)
+
+ def setUp(self):
+ """
+ """
+ GlusterBaseClass.setUp.im_func(self)
+ # Peer probe servers
+ g.log.info("Peer Probe servers '%s'", self.servers)
+ ret = peer_probe_servers(self.mnode, self.servers)
+ if not ret:
+ raise ExecutionError("Failed to peer probe some or all servers %s "
+ "into the cluster", self.servers)
+ g.log.info("Successfully peer probed all servers '%s' to the cluster",
+ self.servers)
+
+ # Validate if peers are connected from each server
+ g.log.info("Validating if servers %s are connected from other servers "
+ "in the cluster", self.servers)
+ for server in self.servers:
+ ret = is_peer_connected(server, self.servers)
+ if not ret:
+ raise ExecutionError("Some or all servers %s are not "
+ "in connected state from node %s",
+ self.servers, self.mnode)
+ g.log.info("Successfully validated servers %s are all "
+ "in connected state from node %s",
+ self.servers, self.mnode)
+ g.log.info("Successfully validated all servers %s are in connected "
+ "state from other servers in the cluster", self.servers)
+
+ def test_pool_list(self):
+ """Testing pool list command
+ """
+ # peer status from mnode
+ g.log.info("Get Pool List from node %s", self.mnode)
+ ret, out, err = pool_list(self.mnode)
+ self.assertEqual(ret, 0, ("Failed to get pool list from node "
+ "%s: %s", self.mnode, err))
+ g.log.info("Successfully got pool list from node %s:\n%s",
+ self.mnode, out)
+
+ # Get pool list randomly from some node
+ random_server = random.choice(self.servers)
+ g.log.info("Get Pool List from node %s", random_server)
+ ret, out, err = pool_list(random_server)
+ self.assertEqual(ret, 0, ("Failed to get pool list from node "
+ "%s: %s", random_server, err))
+ g.log.info("Successfully got pool list from node %s:\n%s",
+ random_server, out)
+
+ # Get pool list from all the servers
+ for server in self.servers:
+ g.log.info("Get Pool List from node %s", server)
+ ret, out, err = pool_list(server)
+ self.assertEqual(ret, 0, ("Failed to get pool list from node "
+ "%s: %s", server, err))
+ g.log.info("Successfully got pool list from node %s:\n%s",
+ server, out)
+
+ def test_peer_status(self):
+ """Testing peer status command
+ """
+ # peer status from mnode
+ g.log.info("Get peer status from node %s", self.mnode)
+ ret, out, err = peer_status(self.mnode)
+ self.assertEqual(ret, 0, ("Failed to get peer status from node "
+ "%s: %s", self.mnode, err))
+ g.log.info("Successfully got peer status from node %s:\n%s",
+ self.mnode, out)
+
+ # Get peer status randomly from some node
+ random_server = random.choice(self.servers)
+ g.log.info("Get peer status from node %s", random_server)
+ ret, out, err = pool_list(random_server)
+ self.assertEqual(ret, 0, ("Failed to get peer status from node "
+ "%s: %s", random_server, err))
+ g.log.info("Successfully got peer status from node %s:\n%s",
+ random_server, out)
+
+ # Get peer status output from all servers
+ for server in self.servers:
+ g.log.info("Peer status on node %s", server)
+ ret, out, err = peer_status(server)
+ self.assertEqual(ret, 0, ("Failed to get peer status from node "
+ "%s: %s", server, err))
+ g.log.info("Successfully got peer status from node %s:\n%s",
+ server, out)
+
+ def test_is_peer_connected(self):
+ """Check if peer is connected with is_peer_connected function
+ """
+ # Executing if all the peers are in connected state from mnode
+ # This will validate all nodes in self.servers are in 'Connected'
+ # State from self.mnode
+ g.log.info("Validating servers %s are in connected state from node %s",
+ self.servers, self.mnode)
+ ret = is_peer_connected(self.mnode, self.servers)
+ self.assertTrue(ret, ("Some or all servers %s are not in connected "
+ "state from node %s", self.servers, self.mnode))
+ g.log.info("Successfully validated servers %s are all in connected "
+ "state from node %s", self.servers, self.mnode)
+
+ # Validate if peers are connected from each server
+ g.log.info("Validating if servers %s are connected from other servers "
+ "in the cluster", self.servers)
+ for server in self.servers:
+ ret = is_peer_connected(server, self.servers)
+ self.assertTrue(ret, ("Some or all servers %s are not "
+ "in connected state from node %s",
+ self.servers, self.mnode))
+ g.log.info("Successfully validated servers %s are all "
+ "in connected state from node %s",
+ self.servers, self.mnode)
+ g.log.info("Successfully validated all servers %s are in connected "
+ "state from other servers in the cluster", self.servers)
+
+ def test_nodes_from_pool_list(self):
+ """Testing nodes from pool list and peer probe by hostname or IP
+ """
+ # Get list of nodes from 'gluster pool list'
+ nodes_in_pool_list = nodes_from_pool_list(self.mnode)
+ if nodes_in_pool_list is None:
+ g.log.error("Unable to get nodes from gluster pool list "
+ "from node %s", self.mnode)
+ else:
+ g.log.info("Nodes in pool: %s", nodes_in_pool_list)
+
+ # Peer probe by hostname if node in nodes_in_pool_list is IP or
+ # Peer probe by IP if node in nodes_in_pool_list is hostname
+ for node in nodes_in_pool_list:
+ if socket.gethostbyname(node) == node:
+ node = socket.gethostbyaddr(node)[0]
+ else:
+ node = socket.gethostbyname(node)
+ if node:
+ g.log.info("Peer probe node %s from %s", node, self.mnode)
+ ret, out, err = peer_probe(self.mnode, node)
+ self.assertFalse((ret != 0 or
+ re.search(r'^peer\sprobe\:\ssuccess(.*)',
+ out) is None),
+ ("Failed to peer probe %s from node %s",
+ node, self.mnode))
+ g.log.info("Successfully peer probed %s from node %s",
+ node, self.mnode)
+
+ def test_get_pool_list(self):
+ # Get pool list
+ """ Example output of pool list
+
+ [{'uuid': 'a2b88b10-eba2-4f97-add2-8dc37df08b27',
+ 'hostname': 'abc.lab.eng.xyz.com',
+ 'state': '3',
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'},
+
+ {'uuid': 'b15b8337-9f8e-4ec3-8bdb-200d6a67ae12',
+ 'hostname': 'def.lab.eng.xyz.com',
+ 'state': '3',
+ 'hostnames': ['def.lab.eng.xyz.com'],
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'}
+ ]
+ """
+ g.log.info("Get pool list --xml output as python dict from node %s",
+ self.mnode)
+ pool_list_data = get_pool_list(self.mnode)
+ self.assertIsNotNone(pool_list_data, ("Failed to get pool list --xml "
+ "output as python dict on "
+ "node %s", self.mnode))
+ g.log.info("Successful in getting Pool list --xml output from node "
+ "%s as python dict:\n %s", self.mnode, pool_list_data)
+
+ # Log connected state of the peer
+ for item in pool_list_data:
+ node = item['hostname']
+ if node == self.mnode:
+ continue
+ connected_status = item['connected']
+ state_str = item['stateStr']
+ g.log.info("Node %s status: \n%s", node,
+ ("Connected: %s\nStateStr:%s\n" %
+ (connected_status, state_str)
+ ))
+
+ def test_get_peer_status(self):
+ # Get peer status
+ """ Example output of peer status
+
+ [{'uuid': '77dc299a-32f7-43d8-9977-7345a344c398',
+ 'hostname': 'ijk.lab.eng.xyz.com',
+ 'state': '3',
+ 'hostnames' : ['ijk.lab.eng.xyz.com'],
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'},
+
+ {'uuid': 'b15b8337-9f8e-4ec3-8bdb-200d6a67ae12',
+ 'hostname': 'def.lab.eng.xyz.com',
+ 'state': '3',
+ 'hostnames': ['def.lab.eng.xyz.com'],
+ 'connected': '1',
+ 'stateStr': 'Peer in Cluster'}
+ ]
+ """
+ g.log.info("Get peer status --xml output as python dict from node %s",
+ self.mnode)
+ peer_status_list = get_peer_status(self.mnode)
+ self.assertIsNotNone(peer_status_list,
+ ("Failed to get peer status --xml "
+ "output as python dict from "
+ "node %s", self.mnode))
+ g.log.info("Successful in getting Peer status --xml output from "
+ "node %s as python dict:\n %s", self.mnode,
+ peer_status_list)
+
+ # Validating UUID of the peer with get_peer_status
+ server_ips = []
+ for server in self.servers:
+ server_ips.append(socket.gethostbyname(server))
+
+ for peer_stat in peer_status_list:
+ if socket.gethostbyname(peer_stat['hostname']) in server_ips:
+ self.assertIsNotNone(
+ re.match(r'([0-9a-f]{8})(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}',
+ peer_stat['uuid'], re.I),
+ ("Invalid UUID for the node '%s'", peer_stat['hostname']))
+ g.log.info("Valid UUID '%s' for the node %s",
+ peer_stat['uuid'], peer_stat['hostname'])
+
+ def tearDown(self):
+ """peer teardown
+ """
+ # Detach all the servers if it's already attached to the cluster
+ nodes_in_pool_list = nodes_from_pool_list(self.mnode)
+ if nodes_in_pool_list is None:
+ g.log.error("Unable to get nodes from gluster pool list "
+ "from node %s", self.mnode)
+ else:
+ g.log.info("Nodes in pool: %s", nodes_in_pool_list)
+
+ if nodes_in_pool_list:
+ if self.mnode in nodes_in_pool_list:
+ nodes_in_pool_list.remove(self.mnode)
+ g.log.info("Detaching servers %s from node %s",
+ nodes_in_pool_list, self.mnode)
+ for server in nodes_in_pool_list:
+ ret, out, err = peer_detach(self.mnode, server)
+ self.assertFalse(
+ (ret != 0 or
+ re.search(r'^peer\sdetach\:\ssuccess(.*)', out) is None),
+ ("Failed to detach server %s from node %s: %s", server,
+ self.mnode, err))
+ g.log.info("Successfully detached server %s from node %s: %s",
+ server, self.mnode, out)
+ g.log.info("Successfully detached servers %s from node %s",
+ nodes_in_pool_list, self.mnode)
+
+ GlusterBaseClass.tearDown.im_func(self)
+
+ @classmethod
+ def tearDownClass(cls):
+ """
+ """
+ GlusterBaseClass.tearDownClass.im_func(cls)
diff --git a/examples/tests_using_baseclass.py b/examples/tests_using_baseclass.py
new file mode 100644
index 0000000..569feac
--- /dev/null
+++ b/examples/tests_using_baseclass.py
@@ -0,0 +1,72 @@
+# Copyright (C) 2015-2016 Red Hat, Inc. <http://www.redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
+
+""" Example1: Using GlusterBaseClass
+"""
+
+
+@runs_on([['replicated', 'distributed', 'distributed-replicated',
+ 'dispersed', 'distributed-dispersed'],
+ ['glusterfs', 'nfs', 'cifs']])
+class TestUsingGlusterBaseClass(GlusterBaseClass):
+ """Use GlusterBaseClass
+ """
+ @classmethod
+ def setUpClass(cls):
+ """setUpClass. This will be executed once per class.
+ """
+ # Calling GlusterBaseClass setUpClass. This will read all the
+ # Variables from the g.config and will assign values to variables to
+ # Use in the tests
+ GlusterBaseClass.setUpClass.im_func(cls)
+
+ # Add test class setup code here.
+
+ def setUp(self):
+ """setUp before the test
+ """
+ # Calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ # Add test setup code here
+
+ def test1(self):
+ pass
+
+ def test2(self):
+ pass
+
+ def test3(self):
+ pass
+
+ def tearDown(self):
+ """teardown after the test
+ """
+ # Add test teardown code here
+
+ # Calling GlusterBaseClass teardown
+ GlusterBaseClass.tearDown.im_func(self)
+
+ @classmethod
+ def tearDownClass(cls):
+ """tearDownClass. This will be executed once per class.
+ """
+ # Add test class teardown code here
+
+ # Calling GlusterBaseClass tearDownClass.
+ GlusterBaseClass.tearDownClass.im_func(cls)