summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShwethaHP <spandura@redhat.com>2018-01-18 17:46:11 +0530
committerNigel Babu <nigelb@redhat.com>2018-01-30 10:52:52 +0530
commitf696f3911567b65abe70e30e2be3ecf05d47b75d (patch)
treecca1fd61fedb3a9f50dd5a0cc57c4fdae3f095dc
parent24eee0352397cfa8b51d467eb1bd46f710124012 (diff)
Use wait_for_volume_process_to_be_online
Replace all the time.sleep() instances with wait_for_volume_process_to_be_online function Change-Id: Id7e34979f811bd85f7475748406803026741a3a8 Signed-off-by: ShwethaHP <spandura@redhat.com>
-rw-r--r--glustolibs-gluster/glustolibs/gluster/gluster_base_class.py37
-rw-r--r--tests/functional/bvt/test_cvt.py51
-rw-r--r--tests/functional/bvt/test_vvt.py12
3 files changed, 68 insertions, 32 deletions
diff --git a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
index ae23081..1b6e02b 100644
--- a/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
+++ b/glustolibs-gluster/glustolibs/gluster/gluster_base_class.py
@@ -32,8 +32,8 @@ from glustolibs.gluster.volume_ops import set_volume_options
from glustolibs.gluster.volume_libs import (setup_volume,
cleanup_volume,
log_volume_info_and_status)
-# from glustolibs.gluster.volume_libs import (
-# wait_for_volume_process_to_be_online)
+from glustolibs.gluster.volume_libs import (
+ wait_for_volume_process_to_be_online)
from glustolibs.gluster.samba_libs import share_volume_over_smb
from glustolibs.gluster.nfs_libs import export_volume_through_nfs
from glustolibs.gluster.mount_ops import create_mount_objs
@@ -185,6 +185,16 @@ class GlusterBaseClass(unittest.TestCase):
if volume_create_force or cls.volume_create_force:
force_volume_create = True
+ # Validate peers before setting up volume
+ g.log.info("Validate peers before setting up volume ")
+ ret = cls.validate_peers_are_connected()
+ if not ret:
+ g.log.error("Failed to validate peers are in connected state "
+ "before setting up volume")
+ return False
+ g.log.info("Successfully validated peers are in connected state "
+ "before setting up volume")
+
# Setup Volume
g.log.info("Setting up volume %s", cls.volname)
ret = setup_volume(mnode=cls.mnode,
@@ -195,15 +205,15 @@ class GlusterBaseClass(unittest.TestCase):
return False
g.log.info("Successful in setting up volume %s", cls.volname)
-# # Wait for volume processes to be online
-# g.log.info("Wait for volume %s processes to be online", cls.volname)
-# ret = wait_for_volume_process_to_be_online(cls.mnode, cls.volname)
-# if not ret:
-# g.log.error("Failed to wait for volume %s processes to "
-# "be online", cls.volname)
-# return False
-# g.log.info("Successful in waiting for volume %s processes to be "
-# "online", cls.volname)
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume %s processes to be online", cls.volname)
+ ret = wait_for_volume_process_to_be_online(cls.mnode, cls.volname)
+ if not ret:
+ g.log.error("Failed to wait for volume %s processes to "
+ "be online", cls.volname)
+ return False
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", cls.volname)
# Export/Share the volume based on mount_type
if cls.mount_type != "glusterfs":
@@ -320,11 +330,6 @@ class GlusterBaseClass(unittest.TestCase):
Returns (bool): True if setting up volume and mounting the volume
for a mount obj is successful. False otherwise
"""
- # Validate peers before setting up volume
- _rc = cls.validate_peers_are_connected()
- if not _rc:
- return _rc
-
# Setup Volume
_rc = cls.setup_volume(volume_create_force)
if not _rc:
diff --git a/tests/functional/bvt/test_cvt.py b/tests/functional/bvt/test_cvt.py
index a2e75be..14af6a0 100644
--- a/tests/functional/bvt/test_cvt.py
+++ b/tests/functional/bvt/test_cvt.py
@@ -38,9 +38,9 @@ from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.volume_libs import enable_and_validate_volume_options
from glustolibs.gluster.volume_libs import (
verify_all_process_of_volume_are_online)
-from glustolibs.gluster.volume_libs import (log_volume_info_and_status,
- expand_volume, shrink_volume,
- replace_brick_from_volume)
+from glustolibs.gluster.volume_libs import (
+ log_volume_info_and_status, expand_volume, shrink_volume,
+ replace_brick_from_volume, wait_for_volume_process_to_be_online)
from glustolibs.gluster.rebalance_ops import (rebalance_start,
wait_for_rebalance_to_complete,
rebalance_status)
@@ -208,8 +208,13 @@ class TestGlusterExpandVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("Expanding volume when IO in progress is successful on "
"volume %s", self.volname)
- # Wait for gluster processes to come online
- time.sleep(30)
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
# Log Volume Info and Status after expanding the volume
g.log.info("Logging volume info and Status after expanding volume")
@@ -297,8 +302,13 @@ class TestGlusterShrinkVolumeSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("Shrinking volume when IO in progress is successful on "
"volume %s", self.volname)
- # Wait for gluster processes to come online
- time.sleep(30)
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
# Log Volume Info and Status after shrinking the volume
g.log.info("Logging volume info and Status after shrinking volume")
@@ -618,8 +628,13 @@ class TestGlusterReplaceBrickSanity(GlusterBasicFeaturesSanityBaseClass):
self.assertTrue(ret, "Failed to replace faulty brick from the volume")
g.log.info("Successfully replaced faulty brick from the volume")
- # Wait for gluster processes to come online
- time.sleep(30)
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
# Log Volume Info and Status after replacing the brick
g.log.info("Logging volume info and Status after replacing brick "
@@ -701,8 +716,13 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("Successful in bringing bricks: %s offline",
bricks_to_bring_offline)
- # Wait for gluster processes to be offline
- time.sleep(10)
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Volume %s processes not online despite waiting"
+ "for 5 mins", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
# Log Volume Info and Status
g.log.info("Logging volume info and Status after bringing bricks "
@@ -735,8 +755,13 @@ class TestGlusterHealSanity(GlusterBasicFeaturesSanityBaseClass):
g.log.info("Successfully brought all bricks:%s online",
bricks_to_bring_offline)
- # Wait for gluster processes to be online
- time.sleep(10)
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
# Log Volume Info and Status
g.log.info("Logging volume info and Status after bringing bricks "
diff --git a/tests/functional/bvt/test_vvt.py b/tests/functional/bvt/test_vvt.py
index ecf5866..1cff675 100644
--- a/tests/functional/bvt/test_vvt.py
+++ b/tests/functional/bvt/test_vvt.py
@@ -21,14 +21,14 @@
"""
import pytest
-import time
from glusto.core import Glusto as g
from glustolibs.gluster.gluster_base_class import (GlusterBaseClass, runs_on)
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_init import is_glusterd_running
from glustolibs.gluster.volume_ops import volume_stop, volume_start
from glustolibs.gluster.volume_libs import (
- verify_all_process_of_volume_are_online)
+ verify_all_process_of_volume_are_online,
+ wait_for_volume_process_to_be_online)
from glustolibs.gluster.volume_libs import log_volume_info_and_status
from glustolibs.misc.misc_libs import upload_scripts
from glustolibs.io.utils import validate_io_procs, get_mounts_stat
@@ -114,7 +114,13 @@ class VolumeAccessibilityTests(GlusterBaseClass):
self.assertEqual(ret, 0, "Failed to start volume %s" % self.volname)
g.log.info("Successfully started volume %s", self.volname)
- time.sleep(15)
+ # Wait for volume processes to be online
+ g.log.info("Wait for volume processes to be online")
+ ret = wait_for_volume_process_to_be_online(self.mnode, self.volname)
+ self.assertTrue(ret, ("Failed to wait for volume %s processes to "
+ "be online", self.volname))
+ g.log.info("Successful in waiting for volume %s processes to be "
+ "online", self.volname)
# Log Volume Info and Status
g.log.info("Logging Volume %s Info and Status", self.volname)