summaryrefslogtreecommitdiffstats
path: root/tests/functional/bvt
diff options
context:
space:
mode:
Diffstat (limited to 'tests/functional/bvt')
-rw-r--r--tests/functional/bvt/test_basic.py92
-rw-r--r--tests/functional/bvt/test_cvt.py66
-rw-r--r--tests/functional/bvt/test_vvt.py95
3 files changed, 172 insertions, 81 deletions
diff --git a/tests/functional/bvt/test_basic.py b/tests/functional/bvt/test_basic.py
index 336443940..54078ef44 100644
--- a/tests/functional/bvt/test_basic.py
+++ b/tests/functional/bvt/test_basic.py
@@ -20,33 +20,22 @@ import pytest
import time
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import GlusterBaseClass
+from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_init import (
is_glusterd_running, restart_glusterd, start_glusterd, stop_glusterd)
-from glustolibs.gluster.peer_ops import is_peer_connected, peer_status
class TestGlusterdSanity(GlusterBaseClass):
"""GLusterd Sanity check
"""
- def are_peers_in_connected_state(self):
- """Validate if all the peers are in connected state from all servers.
- """
- _rc = True
- # Validate if peer is connected from all the servers
- for server in self.servers:
- ret = is_peer_connected(server, self.servers)
- if not ret:
- _rc = False
-
- # Peer Status from mnode
- peer_status(self.mnode)
-
- return _rc
-
def setUp(self):
"""setUp required for tests
"""
+ # Calling GlusterBaseClass setUp
GlusterBaseClass.setUp.im_func(self)
+
+ # Defining this variable to check if restart glusterd is required
+ # in teardown
self.test_method_complete = False
@pytest.mark.bvt_basic
@@ -55,51 +44,60 @@ class TestGlusterdSanity(GlusterBaseClass):
peers are in connected state after glusterd restarts.
"""
# restart glusterd on all servers
- g.log.info("Restart glusterd on all servers")
+ g.log.info("Restart glusterd on all servers %s", self.servers)
ret = restart_glusterd(self.servers)
- self.assertTrue(ret, "Failed to restart glusterd on all servers")
- g.log.info("Successfully restarted glusterd on all servers")
+ self.assertTrue(ret, ("Failed to restart glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully restarted glusterd on all servers %s",
+ self.servers)
# Check if glusterd is running on all servers(expected: active)
- g.log.info("Check if glusterd is running on all servers"
- "(expected: active)")
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: active)", self.servers)
ret = is_glusterd_running(self.servers)
- self.assertEqual(ret, 0, "Glusterd is not running on all servers")
- g.log.info("Glusterd is running on all the servers")
+ self.assertEqual(ret, 0, ("Glusterd is not running on all servers %s",
+ self.servers))
+ g.log.info("Glusterd is running on all the servers %s", self.servers)
# Stop glusterd on all servers
- g.log.info("Stop glusterd on all servers")
+ g.log.info("Stop glusterd on all servers %s", self.servers)
ret = stop_glusterd(self.servers)
- self.assertTrue(ret, "Failed to stop glusterd on all servers")
- g.log.info("Successfully stopped glusterd on all servers")
+ self.assertTrue(ret, ("Failed to stop glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully stopped glusterd on all servers %s",
+ self.servers)
# Check if glusterd is running on all servers(expected: not running)
- g.log.info("Check if glusterd is running on all servers"
- "(expected: not running)")
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: not running)", self.servers)
ret = is_glusterd_running(self.servers)
- self.assertNotEqual(ret, 0, "Glusterd is still running on some "
- "servers")
- g.log.info("Glusterd not running on any servers as expected.")
+ self.assertNotEqual(ret, 0, ("Glusterd is still running on some "
+ "servers %s", self.servers))
+ g.log.info("Glusterd not running on any servers %s as expected.",
+ self.servers)
# Start glusterd on all servers
- g.log.info("Start glusterd on all servers")
+ g.log.info("Start glusterd on all servers %s", self.servers)
ret = start_glusterd(self.servers)
- self.assertTrue(ret, "Failed to start glusterd on all servers")
- g.log.info("Successfully started glusterd on all servers")
+ self.assertTrue(ret, ("Failed to start glusterd on all servers %s",
+ self.servers))
+ g.log.info("Successfully started glusterd on all servers %s",
+ self.servers)
# Check if glusterd is running on all servers(expected: active)
- g.log.info("Check if glusterd is running on all servers"
- "(expected: active)")
+ g.log.info("Check if glusterd is running on all servers %s"
+ "(expected: active)", self.servers)
ret = is_glusterd_running(self.servers)
- self.assertEqual(ret, 0, "Glusterd is not running on all servers")
- g.log.info("Glusterd is running on all the servers")
+ self.assertEqual(ret, 0, ("Glusterd is not running on all servers %s",
+ self.servers))
+ g.log.info("Glusterd is running on all the servers %s", self.servers)
# Wait for all the glusterd's to establish communication.
time.sleep(30)
# Validate all the peers are in connected state
g.log.info("Validating all the peers are in Cluster and Connected")
- ret = self.are_peers_in_connected_state()
+ ret = self.validate_peers_are_connected()
self.assertTrue(ret, "Validating Peers to be in Cluster Failed")
g.log.info("All peers are in connected state")
@@ -110,18 +108,24 @@ class TestGlusterdSanity(GlusterBaseClass):
"""
if not self.test_method_complete:
# restart glusterd on all servers
- g.log.info("Restart glusterd on all servers")
+ g.log.info("Restart glusterd on all servers %s", self.servers)
ret = restart_glusterd(self.servers)
- self.assertTrue(ret, "Failed to restart glusterd on all servers")
- g.log.info("Successfully restarted glusterd on all servers")
+ if not ret:
+ raise ExecutionError("Failed to restart glusterd on all "
+ "servers %s", self.servers)
+ g.log.info("Successfully restarted glusterd on all servers %s",
+ self.servers)
# Wait for all the glusterd's to establish communication.
time.sleep(30)
# Validate all the peers are in connected state
g.log.info("Validating all the peers are in Cluster and Connected")
- ret = self.are_peers_in_connected_state()
- self.assertTrue(ret, "Validating Peers to be in Cluster Failed")
+ ret = self.validate_peers_are_connected()
+ if not ret:
+ raise ExecutionError("Validating Peers to be in Cluster "
+ "Failed")
g.log.info("All peers are in connected state")
+ # Calling GlusterBaseClass tearDown
GlusterBaseClass.tearDown.im_func(self)
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index ff6d3f2ec..2306c4c7e 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -34,8 +34,7 @@
import time
import pytest
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import (GlusterVolumeBaseClass,
- runs_on)
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.volume_libs import enable_and_validate_volume_options
from glustolibs.gluster.volume_libs import (
verify_all_process_of_volume_are_online)
@@ -57,31 +56,35 @@ from glustolibs.gluster.quota_ops import (enable_quota, disable_quota,
from glustolibs.gluster.snap_ops import (snap_create, get_snap_list,
snap_activate, snap_deactivate)
from glustolibs.misc.misc_libs import upload_scripts
-from glustolibs.io.utils import (validate_io_procs, log_mounts_info,
+from glustolibs.io.utils import (validate_io_procs,
list_all_files_and_dirs_mounts,
view_snaps_from_mount,
wait_for_io_to_complete)
from glustolibs.gluster.exceptions import ExecutionError
-class GlusterBasicFeaturesSanityBaseClass(GlusterVolumeBaseClass):
+class GlusterBasicFeaturesSanityBaseClass(GlusterBaseClass):
""" BaseClass for all the gluster basic features sanity tests. """
@classmethod
def setUpClass(cls):
- """Setup Volume, Create Mounts and upload the necessary scripts to run
- tests.
+ """Upload the necessary scripts to run tests.
"""
- # Sets up volume, mounts
- GlusterVolumeBaseClass.setUpClass.im_func(cls)
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
# Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
script_local_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts")
+ raise ExecutionError("Failed to upload IO scripts to clients %s",
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
cls.counter = 1
"""int: Value of counter is used for dirname-start-num argument for
@@ -99,16 +102,27 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterVolumeBaseClass):
"""
def setUp(self):
- """setUp starts the io from all the mounts.
- IO creates deep dirs and files.
"""
- # Calling BaseClass setUp
- GlusterVolumeBaseClass.setUp.im_func(self)
+ - Setup Volume and Mount Volume
+ - setUp starts the io from all the mounts.
+ - IO creates deep dirs and files.
+ """
+ # Calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ # Setup Volume and Mount Volume
+ g.log.info("Starting to Setup Volume and Mount Volume")
+ ret = self.setup_volume_and_mount_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume and Mount_Volume")
+ g.log.info("Successful in Setup Volume and Mount Volume")
# Start IO on mounts
g.log.info("Starting IO on all mounts...")
self.all_mounts_procs = []
for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
cmd = ("python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
@@ -130,6 +144,8 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterVolumeBaseClass):
def tearDown(self):
"""If test method failed before validating IO, tearDown waits for the
IO's to complete and checks for the IO exit status
+
+ Unmount Volume and Cleanup Volume
"""
# Wait for IO to complete if io validation is not executed in the
# test method
@@ -140,17 +156,23 @@ class GlusterBasicFeaturesSanityBaseClass(GlusterVolumeBaseClass):
if not ret:
raise ExecutionError("IO failed on some of the clients")
g.log.info("IO is successful on all mounts")
- GlusterVolumeBaseClass.tearDown.im_func(self)
- @classmethod
- def tearDownClass(cls):
- """Cleanup data from mount and cleanup volume.
- """
- # Log Mounts info
- g.log.info("Log mounts info")
- log_mounts_info(cls.mounts)
+ # List all files and dirs created
+ g.log.info("List all files and directories:")
+ ret = list_all_files_and_dirs_mounts(self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to list all files and dirs")
+ g.log.info("Listing all files and directories is successful")
+
+ # Unmount Volume and Cleanup Volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
+ if not ret:
+ raise ExecutionError("Failed to Unmount Volume and Cleanup Volume")
+ g.log.info("Successful in Unmount Volume and Cleanup Volume")
- GlusterVolumeBaseClass.tearDownClass.im_func(cls)
+ # Calling GlusterBaseClass tearDown
+ GlusterBaseClass.tearDown.im_func(self)
@runs_on([['replicated', 'distributed', 'distributed-replicated',
diff --git a/tests/functional/bvt/test_vvt.py b/tests/functional/bvt/test_vvt.py
index 5730996bd..ecf5866a4 100644
--- a/tests/functional/bvt/test_vvt.py
+++ b/tests/functional/bvt/test_vvt.py
@@ -23,8 +23,7 @@
import pytest
import time
from glusto.core import Glusto as g
-from glustolibs.gluster.gluster_base_class import (GlusterVolumeBaseClass,
- runs_on)
+from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_init import is_glusterd_running
from glustolibs.gluster.volume_ops import volume_stop, volume_start
@@ -38,26 +37,56 @@ from glustolibs.io.utils import validate_io_procs, get_mounts_stat
@runs_on([['replicated', 'distributed', 'distributed-replicated',
'dispersed', 'distributed-dispersed'],
['glusterfs', 'nfs', 'cifs']])
-class VolumeAccessibilityTests(GlusterVolumeBaseClass):
+class VolumeAccessibilityTests(GlusterBaseClass):
""" VolumeAccessibilityTests contains tests which verifies
accessablity of the volume.
"""
@classmethod
def setUpClass(cls):
- """Setup Volume, Create Mounts and upload the necessary scripts to run
- tests.
+ """Upload the necessary scripts to run tests.
"""
- # Sets up volume, mounts
- GlusterVolumeBaseClass.setUpClass.im_func(cls)
+ # Calling GlusterBaseClass setUpClass
+ GlusterBaseClass.setUpClass.im_func(cls)
# Upload io scripts for running IO on mounts
+ g.log.info("Upload io scripts to clients %s for running IO on "
+ "mounts", cls.clients)
script_local_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
cls.script_upload_path = ("/usr/share/glustolibs/io/scripts/"
"file_dir_ops.py")
ret = upload_scripts(cls.clients, script_local_path)
if not ret:
- raise ExecutionError("Failed to upload IO scripts")
+ raise ExecutionError("Failed to upload IO scripts to clients %s",
+ cls.clients)
+ g.log.info("Successfully uploaded IO scripts to clients %s",
+ cls.clients)
+
+ def setUp(self):
+ """Setup Volume
+ """
+ # Calling GlusterBaseClass setUp
+ GlusterBaseClass.setUp.im_func(self)
+
+ # Setup_Volume
+ g.log.info("Starting to Setup Volume %s", self.volname)
+ ret = self.setup_volume()
+ if not ret:
+ raise ExecutionError("Failed to Setup Volume %s", self.volname)
+ g.log.info("Successful in Setup Volume %s", self.volname)
+
+ def tearDown(self):
+ """Cleanup the volume
+ """
+ # Cleanup Volume
+ g.log.info("Starting to Setup Volume %s", self.volname)
+ ret = self.cleanup_volume()
+ if not ret:
+ raise ExecutionError("Failed to Setup_Volume %s", self.volname)
+ g.log.info("Successful in Setup Volume %s", self.volname)
+
+ # Calling GlusterBaseClass tearDown
+ GlusterBaseClass.tearDown.im_func(self)
@pytest.mark.bvt_vvt
def test_volume_create_start_stop_start(self):
@@ -65,49 +94,75 @@ class VolumeAccessibilityTests(GlusterVolumeBaseClass):
Also Validates whether all the brick process are running after the
start of the volume.
"""
- # Verify volume's all process are online
+ # Verify volume processes are online
+ g.log.info("Verify volume %s processes are online", self.volname)
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online" %
self.volname))
+ g.log.info("Successfully Verified volume %s processes are online",
+ self.volname)
# Stop Volume
+ g.log.info("Stopping Volume %s", self.volname)
ret, _, _ = volume_stop(self.mnode, self.volname, force=True)
self.assertEqual(ret, 0, "Failed to stop volume %s" % self.volname)
+ g.log.info("Successfully stopped volume %s", self.volname)
# Start Volume
+ g.log.info("Starting Volume %s", self.volname)
ret, _, _ = volume_start(self.mnode, self.volname)
- self.assertEqual(ret, 0, "Unable to start volume %s" % self.volname)
+ self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname)
+ g.log.info("Successfully started volume %s", self.volname)
time.sleep(15)
# Log Volume Info and Status
+ g.log.info("Logging Volume %s Info and Status", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume %s info and status failed" %
+ self.assertTrue(ret, ("Failed to Log volume %s info and status",
self.volname))
+ g.log.info("Successfully logged Volume %s Info and Status",
+ self.volname)
# Verify volume's all process are online
+ g.log.info("Verify volume %s processes are online", self.volname)
ret = verify_all_process_of_volume_are_online(self.mnode, self.volname)
self.assertTrue(ret, ("Volume %s : All process are not online" %
self.volname))
+ g.log.info("Successfully verified volume %s processes are online",
+ self.volname)
# Log Volume Info and Status
+ g.log.info("Logging Volume %s Info and Status", self.volname)
ret = log_volume_info_and_status(self.mnode, self.volname)
- self.assertTrue(ret, ("Logging volume %s info and status failed" %
+ self.assertTrue(ret, ("Failed to Log volume %s info and status",
self.volname))
+ g.log.info("Successfully logged Volume %s Info and Status",
+ self.volname)
- # Verify all glusterd's are running
+ # Check if glusterd is running on all servers(expected: active)
+ g.log.info("Check if glusterd is running on all servers"
+ "(expected: active)")
ret = is_glusterd_running(self.servers)
- self.assertEqual(ret, 0, ("glusterd not running on all servers: %s" %
- self.servers))
+ self.assertEqual(ret, 0, "Glusterd is not running on all servers")
+ g.log.info("Glusterd is running on all the servers")
@pytest.mark.bvt_vvt
def test_file_dir_create_ops_on_volume(self):
"""Test File Directory Creation on the volume.
"""
+ # Mount Volume
+ g.log.info("Starting to Mount Volume %s", self.volname)
+ ret = self.mount_volume(self.mounts)
+ self.assertTrue(ret, ("Failed to Mount Volume %s", self.volname))
+ g.log.info("Successful in Mounting Volume %s", self.volname)
+
# Start IO on all mounts.
all_mounts_procs = []
count = 1
for mount_obj in self.mounts:
+ g.log.info("Starting IO on %s:%s", mount_obj.client_system,
+ mount_obj.mountpoint)
cmd = ("python %s create_deep_dirs_with_files "
"--dirname-start-num %d "
"--dir-depth 2 "
@@ -121,9 +176,19 @@ class VolumeAccessibilityTests(GlusterVolumeBaseClass):
count = count + 10
# Validate IO
+ g.log.info("Validating IO's")
ret = validate_io_procs(all_mounts_procs, self.mounts)
self.assertTrue(ret, "IO failed on some of the clients")
+ g.log.info("Successfully validated all io's")
# Get stat of all the files/dirs created.
+ g.log.info("Get stat of all the files/dirs created.")
ret = get_mounts_stat(self.mounts)
self.assertTrue(ret, "Stat failed on some of the clients")
+ g.log.info("Successfully got stat of all files/dirs created")
+
+ # UnMount Volume
+ g.log.info("Starting to Unmount Volume %s", self.volname)
+ ret = self.unmount_volume(self.mounts)
+ self.assertTrue(ret, ("Failed to Unmount Volume %s" % self.volname))
+ g.log.info("Successfully Unmounted Volume %s", self.volname)