summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_base_class.py468
-rw-r--r--tests/functional/bvt/test_basic.py92
-rw-r--r--tests/functional/bvt/test_cvt.py66
-rw-r--r--tests/functional/bvt/test_vvt.py95
4 files changed, 489 insertions, 232 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index e191f005d..d7297e177 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -22,15 +22,18 @@
import unittest
import os
import random
-import time
import copy
import datetime
+import socket
from glusto.core import Glusto as g
-from glustolibs.gluster.exceptions import ExecutionError, ConfigError
+from glustolibs.gluster.exceptions import ConfigError
from glustolibs.gluster.peer_ops import is_peer_connected, peer_status
-from glustolibs.gluster.volume_ops import volume_info, set_volume_options
-from glustolibs.gluster.volume_libs import (setup_volume, cleanup_volume,
+from glustolibs.gluster.volume_ops import set_volume_options
+from glustolibs.gluster.volume_libs import (setup_volume,
+ cleanup_volume,
log_volume_info_and_status)
+# from glustolibs.gluster.volume_libs import (
+# wait_for_volume_process_to_be_online)
from glustolibs.gluster.samba_libs import share_volume_over_smb
from glustolibs.gluster.nfs_libs import export_volume_through_nfs
from glustolibs.gluster.mount_ops import create_mount_objs
@@ -109,6 +112,310 @@ class GlusterBaseClass(unittest.TestCase):
return _rc
@classmethod
+ def get_ip_from_hostname(cls, nodes):
+ """Returns list of IP's for the list of nodes in order.
+
+ Args:
+ nodes(list): List of nodes hostnames
+
+ Returns:
+ list: List of IP's corresponding to the hostnames of nodes.
+ """
+ nodes_ips = []
+ if isinstance(nodes, str):
+ nodes = [nodes]
+ for node in nodes:
+ try:
+ ip = socket.gethostbyname(node)
+ except socket.gaierror as e:
+ g.log.error("Failed to get the IP of Host: %s : %s", node,
+ e.strerror)
+ ip = None
+ nodes_ips.append(ip)
+ return nodes_ips
+
+ @classmethod
+ def validate_peers_are_connected(cls):
+ """Validate whether each server in the cluster is connected to
+ all other servers in cluster.
+
+ Returns (bool): True if all peers are in connected with other peers.
+ False otherwise.
+ """
+ # Validate if peer is connected from all the servers
+ g.log.info("Validating if servers %s are connected from other servers "
+ "in the cluster", cls.servers)
+ for server in cls.servers:
+ g.log.info("Validate servers %s are in connected from node %s",
+ cls.servers, server)
+ ret = is_peer_connected(server, cls.servers)
+ if not ret:
+ g.log.error("Some or all servers %s are not in connected "
+ "state from node %s", cls.servers, server)
+ return False
+ g.log.info("Successfully validated servers %s are all in "
+ "connected state from node %s",
+ cls.servers, server)
+ g.log.info("Successfully validated all servers %s are in connected "
+ "state from other servers in the cluster", cls.servers)
+
+ # Peer Status from mnode
+ peer_status(cls.mnode)
+
+ return True
+
+ @classmethod
+ def setup_volume(cls, volume_create_force=False):
+ """Setup the volume:
+ - Create the volume, Start volume, Set volume
+ options, enable snapshot/quota/tier if specified in the config
+ file.
+ - Wait for volume processes to be online
+ - Export volume as NFS/SMB share if mount_type is NFS or SMB
+ - Log volume info and status
+
+ Args:
+ volume_create_force(bool): True if create_volume should be
+ executed with 'force' option.
+
+ Returns (bool): True if all the steps mentioned in the descriptions
+ passes. False otherwise.
+ """
+ # Setup Volume
+ g.log.info("Setting up volume %s", cls.volname)
+ ret = setup_volume(mnode=cls.mnode,
+ all_servers_info=cls.all_servers_info,
+ volume_config=cls.volume, force=volume_create_force)
+ if not ret:
+ g.log.error("Failed to Setup volume %s", cls.volname)
+ return False
+ g.log.info("Successful in setting up volume %s", cls.volname)
+
+# # Wait for volume processes to be online
+# g.log.info("Wait for volume %s processes to be online", cls.volname)
+# ret = wait_for_volume_process_to_be_online(cls.mnode, cls.volname)
+# if not ret:
+# g.log.error("Failed to wait for volume %s processes to "
+# "be online", cls.volname)
+# return False
+# g.log.info("Successful in waiting for volume %s processes to be "
+# "online", cls.volname)
+
+ # Export/Share the volume based on mount_type
+ if cls.mount_type != "glusterfs":
+ g.log.info("Export/Sharing the volume %s", cls.volname)
+ if "nfs" in cls.mount_type:
+ ret = export_volume_through_nfs(
+ mnode=cls.mnode, volname=cls.volname,
+ enable_ganesha=cls.enable_nfs_ganesha)
+ if not ret:
+ g.log.error("Failed to export volume %s "
+ "as NFS export", cls.volname)
+ return False
+ g.log.info("Successful in exporting the volume %s "
+ "as NFS export", cls.volname)
+
+ # Set NFS-Ganesha specific volume options
+ if cls.enable_nfs_ganesha and cls.nfs_ganesha_export_options:
+ g.log.info("Setting NFS-Ganesha export specific "
+ "volume options on volume %s", cls.volname)
+ ret = set_volume_options(
+ mnode=cls.mnode, volname=cls.volname,
+ options=cls.nfs_ganesha_export_options)
+ if not ret:
+ g.log.error("Failed to set NFS-Ganesha "
+ "export specific options on "
+ "volume %s", cls.volname)
+ return False
+ g.log.info("Successful in setting NFS-Ganesha export "
+ "specific volume options on volume %s",
+ cls.volname)
+
+ if "smb" in cls.mount_type or "cifs" in cls.mount_type:
+ ret = share_volume_over_smb(mnode=cls.mnode,
+ volname=cls.volname,
+ smb_users_info=cls.smb_users_info)
+ if not ret:
+ g.log.error("Failed to export volume %s "
+ "as SMB Share", cls.volname)
+ return False
+ g.log.info("Successful in exporting volume %s as SMB Share",
+ cls.volname)
+
+ # Set SMB share specific volume options
+ if cls.smb_share_options:
+ g.log.info("Setting SMB share specific volume options "
+ "on volume %s", cls.volname)
+ ret = set_volume_options(mnode=cls.mnode,
+ volname=cls.volname,
+ options=cls.smb_share_options)
+ if not ret:
+ g.log.error("Failed to set SMB share "
+ "specific options "
+ "on volume %s", cls.volname)
+ return False
+ g.log.info("Successful in setting SMB share specific "
+ "volume options on volume %s", cls.volname)
+
+ # Log Volume Info and Status
+ g.log.info("Log Volume %s Info and Status", cls.volname)
+ ret = log_volume_info_and_status(cls.mnode, cls.volname)
+ if not ret:
+ g.log.error("Logging volume %s info and status failed",
+ cls.volname)
+ return False
+ g.log.info("Successful in logging volume %s info and status",
+ cls.volname)
+
+ return True
+
+ @classmethod
+ def mount_volume(cls, mounts):
+ """Mount volume
+
+ Args:
+ mounts(list): List of mount_objs
+
+ Returns (bool): True if mounting the volume for a mount obj is
+ successful. False otherwise
+ """
+ g.log.info("Starting to mount volume %s", cls.volname)
+ for mount_obj in mounts:
+ g.log.info("Mounting volume '%s:%s' on '%s:%s'",
+ mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint)
+ ret = mount_obj.mount()
+ if not ret:
+ g.log.error("Failed to mount volume '%s:%s' on '%s:%s'",
+ mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint)
+ return False
+ else:
+ g.log.info("Successful in mounting volume '%s:%s' on "
+ "'%s:%s'", mount_obj.server_system,
+ mount_obj.volname, mount_obj.client_system,
+ mount_obj.mountpoint)
+ g.log.info("Successful in mounting all mount objs for the volume %s",
+ cls.volname)
+
+ # Get mounts info
+ g.log.info("Get mounts Info:")
+ log_mounts_info(mounts)
+
+ return True
+
+ @classmethod
+ def setup_volume_and_mount_volume(cls, mounts, volume_create_force=False):
+ """Setup the volume and mount the volume
+
+ Args:
+ mounts(list): List of mount_objs
+ volume_create_force(bool): True if create_volume should be
+ executed with 'force' option.
+
+ Returns (bool): True if setting up volume and mounting the volume
+ for a mount obj is successful. False otherwise
+ """
+ # Validate peers before setting up volume
+ _rc = cls.validate_peers_are_connected()
+ if not _rc:
+ return _rc
+
+ # Setup Volume
+ _rc = cls.setup_volume(volume_create_force)
+ if not _rc:
+ return _rc
+
+ # Mount Volume
+ _rc = cls.mount_volume(mounts)
+ if not _rc:
+ return _rc
+
+ return True
+
+ @classmethod
+ def unmount_volume(cls, mounts):
+ """Unmount all mounts for the volume
+
+ Args:
+ mounts(list): List of mount_objs
+
+ Returns (bool): True if unmounting the volume for a mount obj is
+ successful. False otherwise
+ """
+ # Unmount volume
+ g.log.info("Starting to UnMount Volume %s", cls.volname)
+ for mount_obj in mounts:
+ g.log.info("UnMounting volume '%s:%s' on '%s:%s'",
+ mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint)
+ ret = mount_obj.unmount()
+ if not ret:
+ g.log.error("Failed to unmount volume '%s:%s' on '%s:%s'",
+ mount_obj.server_system, mount_obj.volname,
+ mount_obj.client_system, mount_obj.mountpoint)
+
+ # Get mounts info
+ g.log.info("Get mounts Info:")
+ log_mounts_info(cls.mounts)
+
+ return False
+ else:
+ g.log.info("Successful in unmounting volume '%s:%s' on "
+ "'%s:%s'", mount_obj.server_system,
+ mount_obj.volname, mount_obj.client_system,
+ mount_obj.mountpoint)
+ g.log.info("Successful in unmounting all mount objs for the volume %s",
+ cls.volname)
+
+ # Get mounts info
+ g.log.info("Get mounts Info:")
+ log_mounts_info(mounts)
+
+ return True
+
+ @classmethod
+ def cleanup_volume(cls):
+ """Cleanup the volume
+
+ Returns (bool): True if cleanup volume is successful. False otherwise.
+ """
+ g.log.info("Cleanup Volume %s", cls.volname)
+ ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
+ if not ret:
+ g.log.error("cleanup of volume %s failed", cls.volname)
+ else:
+ g.log.info("Successfully cleaned-up volume %s", cls.volname)
+
+ # Log Volume Info and Status
+ g.log.info("Log Volume %s Info and Status", cls.volname)
+ log_volume_info_and_status(cls.mnode, cls.volname)
+
+ return ret
+
+ @classmethod
+ def unmount_volume_and_cleanup_volume(cls, mounts):
+ """Unmount the volume and cleanup volume
+
+ Args:
+ mounts(list): List of mount_objs
+
+ Returns (bool): True if unmounting the volume for the mounts and
+ cleaning up volume is successful. False otherwise
+ """
+ # UnMount Volume
+ _rc = cls.unmount_volume(mounts)
+ if not _rc:
+ return _rc
+
+ # Setup Volume
+ _rc = cls.cleanup_volume()
+ if not _rc:
+ return _rc
+
+ return True
+
+ @classmethod
def setUpClass(cls):
"""Initialize all the variables necessary for testing Gluster
"""
@@ -147,6 +454,10 @@ class GlusterBaseClass(unittest.TestCase):
# Set mnode : Node on which gluster commands are executed
cls.mnode = cls.all_servers[0]
+ # Server IP's
+ cls.servers_ips = []
+ cls.servers_ips = cls.get_ip_from_hostname(cls.servers)
+
# SMB Cluster info
try:
cls.smb_users_info = (
@@ -172,7 +483,7 @@ class GlusterBaseClass(unittest.TestCase):
cls.vips = []
# Defining default volume_types configuration.
- default_volume_type_config = {
+ cls.default_volume_type_config = {
'replicated': {
'type': 'replicated',
'replica_count': 3,
@@ -253,7 +564,7 @@ class GlusterBaseClass(unittest.TestCase):
[cls.volume_type])
except KeyError:
try:
- cls.volume['voltype'] = (default_volume_type_config
+ cls.volume['voltype'] = (cls.default_volume_type_config
[cls.volume_type])
except KeyError:
raise ConfigError("Unable to get configs of volume "
@@ -411,148 +722,3 @@ class GlusterBaseClass(unittest.TestCase):
msg = "Teardownclass: %s : %s" % (cls.__name__, cls.glustotest_run_id)
g.log.info(msg)
cls.inject_msg_in_gluster_logs(msg)
-
-
-class GlusterVolumeBaseClass(GlusterBaseClass):
- """GlusterVolumeBaseClass sets up the volume for testing purposes.
- """
- @classmethod
- def setUpClass(cls, mount_vol=True):
- """Setup volume, shares/exports volume for cifs/nfs protocols,
- mounts the volume.
- """
- GlusterBaseClass.setUpClass.im_func(cls)
-
- # Validate if peer is connected from all the servers
- for server in cls.servers:
- ret = is_peer_connected(server, cls.servers)
- if not ret:
- raise ExecutionError("Validating Peers to be in Cluster "
- "Failed")
- g.log.info("All peers are in connected state")
-
- # Peer Status from mnode
- peer_status(cls.mnode)
-
- # Setup Volume
- ret = setup_volume(mnode=cls.mnode,
- all_servers_info=cls.all_servers_info,
- volume_config=cls.volume, force=True)
- if not ret:
- raise ExecutionError("Setup volume %s failed", cls.volname)
- time.sleep(10)
-
- # Export/Share the volume based on mount_type
- if cls.mount_type != "glusterfs":
- if "nfs" in cls.mount_type:
- ret = export_volume_through_nfs(
- mnode=cls.mnode, volname=cls.volname,
- enable_ganesha=cls.enable_nfs_ganesha)
- if not ret:
- raise ExecutionError("Failed to export volume %s "
- "as NFS export", cls.volname)
-
- # Set NFS-Ganesha specific volume options
- if cls.enable_nfs_ganesha and cls.nfs_ganesha_export_options:
- g.log.info("Setting NFS-Ganesha export specific "
- "volume options")
- ret = set_volume_options(
- mnode=cls.mnode, volname=cls.volname,
- options=cls.nfs_ganesha_export_options)
- if not ret:
- raise ExecutionError("Failed to set NFS-Ganesha "
- "export specific options on "
- "volume %s", cls.volname)
- g.log.info("Successful in setting NFS-Ganesha export "
- "specific volume options")
-
- if "smb" in cls.mount_type or "cifs" in cls.mount_type:
- ret = share_volume_over_smb(mnode=cls.mnode,
- volname=cls.volname,
- smb_users_info=cls.smb_users_info)
- if not ret:
- raise ExecutionError("Failed to export volume %s "
- "as SMB Share", cls.volname)
-
- # Set SMB share specific volume options
- if cls.smb_share_options:
- g.log.info("Setting SMB share specific volume options")
- ret = set_volume_options(mnode=cls.mnode,
- volname=cls.volname,
- options=cls.smb_share_options)
- if not ret:
- raise ExecutionError("Failed to set SMB share "
- "specific options "
- "on volume %s", cls.volname)
- g.log.info("Successful in setting SMB share specific "
- "volume options")
-
- # Log Volume Info and Status
- ret = log_volume_info_and_status(cls.mnode, cls.volname)
- if not ret:
- raise ExecutionError("Logging volume %s info and status failed",
- cls.volname)
-
- # Create Mounts
- if mount_vol:
- _rc = True
- g.log.info("Starting to mount volume")
- for mount_obj in cls.mounts:
- ret = mount_obj.mount()
- if not ret:
- g.log.error("Unable to mount volume '%s:%s' on '%s:%s'",
- mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint)
- _rc = False
- if not _rc:
- raise ExecutionError("Mounting volume %s on few clients "
- "failed", cls.volname)
- else:
- g.log.info("Successful in mounting volume on all clients")
-
- # Get info of mount before the IO
- g.log.info("Get mounts Info:")
- log_mounts_info(cls.mounts)
- else:
- g.log.info("Not Mounting the volume as 'mount_vol' option is "
- "set to %s", mount_vol)
-
- @classmethod
- def tearDownClass(cls, umount_vol=True, cleanup_vol=True):
- """Teardown the mounts and volume.
- """
- # Unmount volume
- if umount_vol:
- _rc = True
- g.log.info("Starting to UnMount Volumes")
- for mount_obj in cls.mounts:
- ret = mount_obj.unmount()
- if not ret:
- g.log.error("Unable to unmount volume '%s:%s' on '%s:%s'",
- mount_obj.server_system, mount_obj.volname,
- mount_obj.client_system, mount_obj.mountpoint)
- _rc = False
- if not _rc:
- raise ExecutionError("Unmount of all mounts are not "
- "successful")
- else:
- g.log.info("Successful in unmounting volume on all clients")
- else:
- g.log.info("Not Unmounting the Volume as 'umount_vol' is set "
- "to %s", umount_vol)
-
- # Cleanup volume
- if cleanup_vol:
- ret = cleanup_volume(mnode=cls.mnode, volname=cls.volname)
- if not ret:
- raise ExecutionError("cleanup volume %s failed", cls.volname)
- else:
- g.log.info("Successfully cleaned-up volume")
- else:
- g.log.info("Not Cleaning-Up volume as 'cleanup_vol' is %s",
- cleanup_vol)
-
- # All Volume Info
- volume_info(cls.mnode)
-
- GlusterBaseClass.tearDownClass.im_func(cls)
diff --git a/tests/functional/bvt/test_basic.py b/tests/functional/bvt/test_basic.py
index 336443940..54078ef44 100644
--- a/tests/functional/bvt/test_basic.py
+++ b/tests/functional/bvt/test_basic.py
@@ -20,33 +20,22 @@ import pytest
import time
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_init import (
is_glusterd_running, restart_glusterd, start_glusterd, stop_glusterd)
-from glustolibs.gluster.peer_ops import is_peer_connected, peer_status
class TestGlusterdSanity(GlusterBaseClass):
"""GLusterd Sanity check
"""
- def are_peers_in_connected_state(self):
- """Validate if all the peers are in connected state from all servers.
- """
- _rc = True
- # Validate if peer is connected from all the servers
- for server in self.servers:
- ret = is_peer_connected(server, self.servers)
- if not ret:
- _rc = False
-
- # Peer Status from mnode
- peer_status(self.mnode)
-
- return _rc
-
def setUp(self):
"""setUp required for tests
"""
+ # Calling GlusterBaseClass setUp
GlusterBaseClass.setUp.im_func(self)
+
+ # Defining this variable to check if restart glusterd is required
+ # in teardown
self.test_method_complete = False
@pytest.mark.bvt_basic
@@ -55,51 +44,60 @@ class TestGlusterdSanity(GlusterBaseClass):
peers are in connected state after glusterd restarts.
"""
# restart glusterd on all servers
- g.log.info("Restart glusterd on all servers")
+ g.log.info("Restart glusterd on all servers %s", self.servers)
ret = restart_glusterd(self.servers)
- self.assertTrue(ret, "Failed to restart glusterd on all servers")
- g.log.info("Successfully restarted glusterd on all servers")
+ self.assertTrue(ret, ("Failed to restart glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully restarted glusterd on all servers %s",
+ self.servers)
# Check if glusterd is running on all servers(expected: active)
- g.log.info("Check if glusterd is running on all servers"
- "(expected: active)")
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: active)", self.servers)
ret = is_glusterd_running(self.servers)
- self.assertEqual(ret, 0, "Glusterd is not running on all servers")
- g.log.info("Glusterd is running on all the servers")
+ self.assertEqual(ret, 0, ("Glusterd is not running on all servers %s",
+ self.servers))
+ g.log.info("Glusterd is running on all the servers %s", self.servers)
# Stop glusterd on all servers
- g.log.info("Stop glusterd on all servers")
+ g.log.info("Stop glusterd on all servers %s", self.servers)
ret = stop_glusterd(self.servers)
- self.assertTrue(ret, "Failed to stop glusterd on all servers")
- g.log.info("Successfully stopped glusterd on all servers")
+ self.assertTrue(ret, ("Failed to stop glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully stopped glusterd on all servers %s",
+ self.servers)
# Check if glusterd is running on all servers(expected: not running)
- g.log.info("Check if glusterd is running on all servers"
- "(expected: not running)")
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: not running)", self.servers)
ret = is_glusterd_running(self.servers)
- self.assertNotEqual(ret, 0, "Glusterd is still running on some "
- "servers")
- g.log.info("Glusterd not running on any servers as expected.")
+ self.assertNotEqual(ret, 0, ("Glusterd is still running on some "
+ "servers %s", self.servers))
+ g.log.info("Glusterd not running on any servers %s as expected.",
+ self.servers)
# Start glusterd on all servers
- g.log.info("Start glusterd on all servers")
+ g.log.info("Start glusterd on all servers %s", self.servers)
ret = start_glusterd(self.servers)
- self.assertTrue(ret, "Failed to start glusterd on all servers")
- g.log.info("Successfully started glusterd on all servers")
+ self.assertTrue(ret, ("Failed to start glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully started glusterd on all servers %s",
+ self.servers)
# Check if glusterd is running on all servers(expected: active)
- g.log.info("Check if glusterd is running on all servers"
- "(expected: active)")
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: active)", self.servers)
ret = is_glusterd_running(self.servers)
- self.assertEqual(ret, 0, "Glusterd is not running on all servers")
- g.log.info("Glusterd is running on all the servers")
+ self.assertEqual(ret, 0, ("Glusterd is not running on all servers %s",
+ self.servers))
+ g.log.info("Glusterd is running on all the servers %s", self.servers)
# Wait for all the glusterd's to establish communication.
time.sleep(30)
# Validate all the peers are in connected state
g.log.info("Validating all the peers are in Cluster and Connected")
- ret = self.are_peers_in_connected_state()
+ ret = self.validate_peers_are_connected()
self.assertTrue(ret, "Validating Peers to be in Cluster Failed")
g.log.info("All peers are in connected state")
@@ -110,18 +108,24 @@ class TestGlusterdSanity(GlusterBaseClass):
"""
if not self.test_method_complete:
# restart glusterd on all servers
- g.log.info("Restart glusterd on all servers")
+ g.log.info("Restart glusterd on all servers %s", self.servers)
ret = restart_glusterd(self.servers)
- self.assertTrue(ret, "Failed to restart glusterd on all servers")
- g.log.info("Successfully restarted glusterd on all servers")
+ if not ret:
+ raise ExecutionError("Failed to restart glusterd on all "
+ "servers %s", self.servers)
+ g.log.info("Successfully restarted glusterd on all servers %s",
+ self.servers)
# Wait for all the glusterd's to establish communication.
time.sleep(30)
# Validate all the peers are in connected state
g.log.info("Validating all the peers are in Cluster and Connected")
- ret = self.are_peers_in_connected_state()
- self.assertTrue(ret, "Validating Peers to be in Cluster Failed")
+ ret = self.validate_peers_are_connected()
+ if not ret:
+ raise ExecutionError("Validating Peers to be in Cluster "
+ "Failed")
g.log.info("All peers are in connected state")
+ # Calling GlusterBaseClass tearDown
GlusterBaseClass.tearDown.im_func(self)
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index ff6d3f2ec..2306c4c7e 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -34,8 +34,7 @@
import time
import pytest
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import (GlusterVolumeBaseClass,
- runs_on)
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.volume_libs import enable_and_validate_volume_options
from glustolibs.gluster.volume_libs import (
verify_all_process_of_volume_are_online)
@@ -57,31 +56,35 @@ from glustolibs.gluster.quota_ops import (enable_quota, disable_quota,
from glustolibs.gluster.snap_ops import (snap_create, get_snap_list,
snap_activate, snap_deactivate)
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import (validate_io_procs, log_mounts_info,
+from glustolibs.io.utils import (validate_io_procs,
list_all_files_and_dirs_mounts,
view_snaps_from_mount,
wait_for_io_to_complete)
from glustolibs.gluster.exceptions import ExecutionError
-class GlusterBasicFeaturesSanityBaseClass(GlusterVolumeBaseClass):
+class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
""" BaseClass for all the gluster basic features sanity tests. """
@classmethod
def setUpClass(cls):
- """Setup Volume, Create Mounts and upload the necessary scripts to run
- tests.
+ """Upload the necessary scripts to run tests.
"""
- # Sets up volume, mounts
- GlusterVolumeBaseClass.setUpClass.im_func(cls)
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
# Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
script_local_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts")
+ raise ExecutionError("Failed to upload IO scripts to clients %s",
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
cls.counter = 1
"""int: Value of counter is used for dirname-start-num argument for
@@ -99,16 +102,27 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterVolumeBaseClass):
"""
def setUp(self):
- """setUp starts the io from all the mounts.
- IO creates deep dirs and files.
"""
- # Calling BaseClass setUp
- GlusterVolumeBaseClass.setUp.im_func(self)
+ - Setup Volume and Mount Volume
+ - setUp starts the io from all the mounts.
+ - IO creates deep dirs and files.
+ """
+ # Calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup Volume and Mount Volume")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
# Start IO on mounts
g.log.info("Starting IO on all mounts...")
self.all_mounts_procs = []
for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
cmd = ("python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
@@ -130,6 +144,8 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterVolumeBaseClass):
def tearDown(self):
"""If test method failed before validating IO, tearDown waits for the
IO's to complete and checks for the IO exit status
+
+ Unmount Volume and Cleanup Volume
"""
# Wait for IO to complete if io validation is not executed in the
# test method
@@ -140,17 +156,23 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterVolumeBaseClass):
if not ret:
raise ExecutionError("IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- GlusterVolumeBaseClass.tearDown.im_func(self)
- @classmethod
- def tearDownClass(cls):
- """Cleanup data from mount and cleanup volume.
- """
- # Log Mounts info
- g.log.info("Log mounts info")
- log_mounts_info(cls.mounts)
+ # List all files and dirs created
+ g.log.info("List all files and directories:")
+ ret = list_all_files_and_dirs_mounts(self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to list all files and dirs")
+ g.log.info("Listing all files and directories is successful")
+
+ # Unmount Volume and Cleanup Volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
- GlusterVolumeBaseClass.tearDownClass.im_func(cls)
+ # Calling GlusterBaseClass tearDown
+ GlusterBaseClass.tearDown.im_func(self)
@runs_on([['replicated', 'distributed', 'distributed-replicated',
diff --git a/tests/functional/bvt/test_vvt.py b/tests/functional/bvt/test_vvt.py
index 5730996bd..ecf5866a4 100644
--- a/tests/functional/bvt/test_vvt.py
+++ b/tests/functional/bvt/test_vvt.py
@@ -23,8 +23,7 @@
import pytest
import time
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import (GlusterVolumeBaseClass,
- runs_on)
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_init import is_glusterd_running
from glustolibs.gluster.volume_ops import volume_stop, volume_start
@@ -38,26 +37,56 @@ from glustolibs.io.utils import validate_io_procs, get_mounts_stat
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['glusterfs', 'nfs', 'cifs']])
-class VolumeAccessibilityTests(GlusterVolumeBaseClass):
+class VolumeAccessibilityTests(GlusterBaseClass):
""" VolumeAccessibilityTests contains tests which verifies
accessablity of the volume.
"""
@classmethod
def setUpClass(cls):
- """Setup Volume, Create Mounts and upload the necessary scripts to run
- tests.
+ """Upload the necessary scripts to run tests.
"""
- # Sets up volume, mounts
- GlusterVolumeBaseClass.setUpClass.im_func(cls)
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
# Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
script_local_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts")
+ raise ExecutionError("Failed to upload IO scripts to clients %s",
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ """Setup Volume
+ """
+ # Calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ # Setup_Volume
+ g.log.info("Starting to Setup Volume %s", self.volname)
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume %s", self.volname)
+ g.log.info("Successful in Setup Volume %s", self.volname)
+
+ def tearDown(self):
+ """Cleanup the volume
+ """
+ # Cleanup Volume
+ g.log.info("Starting to Setup Volume %s", self.volname)
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume %s", self.volname)
+ g.log.info("Successful in Setup Volume %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ GlusterBaseClass.tearDown.im_func(self)
@pytest.mark.bvt_vvt
def test_volume_create_start_stop_start(self):
@@ -65,49 +94,75 @@ class VolumeAccessibilityTests(GlusterVolumeBaseClass):
Also Validates whether all the brick process are running after the
start of the volume.
"""
- # Verify volume's all process are online
+ # Verify volume processes are online
+ g.log.info("Verify volume %s processes are online", self.volname)
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online" %
self.volname))
+ g.log.info("Successfully Verified volume %s processes are online",
+ self.volname)
# Stop Volume
+ g.log.info("Stopping Volume %s", self.volname)
ret, _, _ = volume_stop(self.mnode, self.volname, force=True)
self.assertEqual(ret, 0, "Failed to stop volume %s" % self.volname)
+ g.log.info("Successfully stopped volume %s", self.volname)
# Start Volume
+ g.log.info("Starting Volume %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Unable to start volume %s" % self.volname)
+ self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname)
+ g.log.info("Successfully started volume %s", self.volname)
time.sleep(15)
# Log Volume Info and Status
+ g.log.info("Logging Volume %s Info and Status", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume %s info and status failed" %
+ self.assertTrue(ret, ("Failed to Log volume %s info and status",
self.volname))
+ g.log.info("Successfully logged Volume %s Info and Status",
+ self.volname)
# Verify volume's all process are online
+ g.log.info("Verify volume %s processes are online", self.volname)
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online" %
self.volname))
+ g.log.info("Successfully verified volume %s processes are online",
+ self.volname)
# Log Volume Info and Status
+ g.log.info("Logging Volume %s Info and Status", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume %s info and status failed" %
+ self.assertTrue(ret, ("Failed to Log volume %s info and status",
self.volname))
+ g.log.info("Successfully logged Volume %s Info and Status",
+ self.volname)
- # Verify all glusterd's are running
+ # Check if glusterd is running on all servers(expected: active)
+ g.log.info("Check if glusterd is running on all servers"
+ "(expected: active)")
ret = is_glusterd_running(self.servers)
- self.assertEqual(ret, 0, ("glusterd not running on all servers: %s" %
- self.servers))
+ self.assertEqual(ret, 0, "Glusterd is not running on all servers")
+ g.log.info("Glusterd is running on all the servers")
@pytest.mark.bvt_vvt
def test_file_dir_create_ops_on_volume(self):
"""Test File Directory Creation on the volume.
"""
+ # Mount Volume
+ g.log.info("Starting to Mount Volume %s", self.volname)
+ ret = self.mount_volume(self.mounts)
+ self.assertTrue(ret, ("Failed to Mount Volume %s", self.volname))
+ g.log.info("Successful in Mounting Volume %s", self.volname)
+
# Start IO on all mounts.
all_mounts_procs = []
count = 1
for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
cmd = ("python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
@@ -121,9 +176,19 @@ class VolumeAccessibilityTests(GlusterVolumeBaseClass):
count = count + 10
# Validate IO
+ g.log.info("Validating IO's")
ret = validate_io_procs(all_mounts_procs, self.mounts)
self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("Successfully validated all io's")
# Get stat of all the files/dirs created.
+ g.log.info("Get stat of all the files/dirs created.")
ret = get_mounts_stat(self.mounts)
self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfully got stat of all files/dirs created")
+
+ # UnMount Volume
+ g.log.info("Starting to Unmount Volume %s", self.volname)
+ ret = self.unmount_volume(self.mounts)
+ self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
+ g.log.info("Successfully Unmounted Volume %s", self.volname)