summaryrefslogtreecommitdiffstats
path: root/tests
diff options
context:
space:
mode:
authorSri Vignesh <sselvan@redhat.com>2020-01-30 19:59:18 +0530
committerBala Konda Reddy M <bala12352@gmail.com>2020-02-18 05:47:13 +0000
commit458ae92da119b3b8d378bb6021190891b265368f (patch)
tree2a048d2cfbb61b61fcf0bf3d7e9b371d1ccfc9c4 /tests
parent25064013d6c0889db3d2bf931b0b2d01c72f6a96 (diff)
[testfix] Add steps to stabilize contents in snapshot - part2
used library functions to wait for glusterd to start and wait for peer to connect and made modifications in teardown part to rectified statements to correct values Change-Id: I40b4362ae1491acf75681c7623c16c53213bb1b9 Signed-off-by: Sri Vignesh <sselvan@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r--tests/functional/snapshot/test_snap_delete_multiple.py14
-rw-r--r--tests/functional/snapshot/test_snap_delete_snap_of_volume.py14
-rw-r--r--tests/functional/snapshot/test_snap_glusterd_down.py46
-rw-r--r--tests/functional/snapshot/test_snap_scheduler_status.py55
-rw-r--r--tests/functional/snapshot/test_snap_uss_while_io.py20
-rw-r--r--tests/functional/snapshot/test_snapshot_create.py20
6 files changed, 81 insertions, 88 deletions
diff --git a/tests/functional/snapshot/test_snap_delete_multiple.py b/tests/functional/snapshot/test_snap_delete_multiple.py
index 8badee4..d1d7286 100644
--- a/tests/functional/snapshot/test_snap_delete_multiple.py
+++ b/tests/functional/snapshot/test_snap_delete_multiple.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -231,16 +231,16 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
self.clone2, ret3)
self.assertEqual(ret1, 0, "Failed to create snapshots")
+ def tearDown(self):
+ # Calling GlusterBaseClass teardown
+ self.get_super_method(self, 'tearDown')()
+
# delete created snapshots
g.log.info("starting to delete all created snapshots")
ret, _, _ = snap_delete_all(self.mnode)
self.assertEqual(ret, 0, "Failed to delete all snapshots")
g.log.info("Successfully deleted all snapshots")
- def tearDown(self):
- # Calling GlusterBaseClass teardown
- self.get_super_method(self, 'tearDown')()
-
# Disable Activate on create
option = {'activate-on-create': 'disable'}
ret, _, _ = set_snap_config(self.mnode, option)
@@ -251,13 +251,13 @@ class SnapshotCloneDeleteMultiple(GlusterBaseClass):
# umount clone volume
g.log.info("Unmounting clone volume")
- ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint1)
+ ret, _, _ = umount_volume(self.clients[0], self.mpoint1)
if ret != 0:
raise ExecutionError("Failed to unmount clone "
"volume %s" % self.clone1)
g.log.info("Successfully unmounted clone volume %s", self.clone1)
- ret, _, _ = umount_volume(self.mounts[0].client_system, self.mpoint2)
+ ret, _, _ = umount_volume(self.clients[0], self.mpoint2)
if ret != 0:
raise ExecutionError("Failed to unmount clone "
"volume %s" % self.clone2)
diff --git a/tests/functional/snapshot/test_snap_delete_snap_of_volume.py b/tests/functional/snapshot/test_snap_delete_snap_of_volume.py
index 6273647..afea293 100644
--- a/tests/functional/snapshot/test_snap_delete_snap_of_volume.py
+++ b/tests/functional/snapshot/test_snap_delete_snap_of_volume.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -47,6 +47,12 @@ class SnapshotDeleteSnapVolume(GlusterBaseClass):
# Calling GlusterBaseClass tearDown
self.get_super_method(self, 'tearDown')()
+ # delete all snapshot created
+ g.log.info("Deleting all snapshots created")
+ ret, _, _ = snap_delete_all(self.mnode)
+ self.assertEqual(ret, 0, "Failed to delete snapshots")
+ g.log.info("All Snapshots deleted successfully")
+
# Unmount and cleanup-volume
g.log.info("Unmount and cleanup-volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
@@ -102,9 +108,3 @@ class SnapshotDeleteSnapVolume(GlusterBaseClass):
"%s" % (self.snap1, self.volname)))
g.log.info("Snapshot %s created successfully"
" for volume %s", self.snap1, self.volname)
-
- # delete all snapshot created
- g.log.info("Deleting all snapshots created")
- ret, _, _ = snap_delete_all(self.mnode)
- self.assertEqual(ret, 0, "Failed to delete snapshots")
- g.log.info("All Snapshots deleted successfully")
diff --git a/tests/functional/snapshot/test_snap_glusterd_down.py b/tests/functional/snapshot/test_snap_glusterd_down.py
index 70cf765..d18dbe4 100644
--- a/tests/functional/snapshot/test_snap_glusterd_down.py
+++ b/tests/functional/snapshot/test_snap_glusterd_down.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,14 +21,13 @@ Test Cases in this module tests the
snapshot activation and deactivation status
when glusterd is down.
"""
-import time
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
-from glustolibs.gluster.peer_ops import is_peer_connected
+from glustolibs.gluster.peer_ops import wait_for_peers_to_connect
from glustolibs.gluster.gluster_init import (stop_glusterd,
start_glusterd,
- is_glusterd_running)
+ wait_for_glusterd_to_start)
from glustolibs.gluster.snap_ops import (snap_create,
get_snap_info_by_snapname,
get_snap_list, snap_deactivate,
@@ -121,15 +120,9 @@ class SnapshotGlusterddown(GlusterBaseClass):
# Check Glusterd status
g.log.info("Check glusterd running or not")
- count = 0
- while count < 80:
- ret = is_glusterd_running(self.servers[1])
- if ret == 1:
- break
- time.sleep(2)
- count += 2
- self.assertEqual(ret, 1, "Unexpected: glusterd running on node %s" %
- self.servers[1])
+ self.assertFalse(
+ wait_for_glusterd_to_start(self.servers[1]),
+ "glusterd is still running on %s" % self.servers[1])
g.log.info("Expected: Glusterd not running on node %s",
self.servers[1])
@@ -158,15 +151,9 @@ class SnapshotGlusterddown(GlusterBaseClass):
# Check Glusterd status
g.log.info("Check glusterd running or not")
- count = 0
- while count < 80:
- ret = is_glusterd_running(self.servers[1])
- if ret:
- break
- time.sleep(2)
- count += 2
- self.assertEqual(ret, 0, "glusterd not running on node %s "
- % self.servers[1])
+ self.assertTrue(
+ wait_for_glusterd_to_start(self.servers[1]),
+ "glusterd is still running on %s" % self.servers[1])
g.log.info("glusterd is running on %s node",
self.servers[1])
@@ -183,15 +170,9 @@ class SnapshotGlusterddown(GlusterBaseClass):
# Check all the peers are in connected state
g.log.info("Validating all the peers are in connected state")
- for servers in self.servers:
- count = 0
- while count < 80:
- ret = is_peer_connected(self.mnode, servers)
- if ret:
- break
- time.sleep(2)
- count += 2
- self.assertTrue(ret, "All the nodes are not in cluster")
+ self.assertTrue(
+ wait_for_peers_to_connect(self.mnode, self.servers),
+ "glusterd is still running on %s" % self.servers)
g.log.info("Successfully validated all the peers")
def tearDown(self):
@@ -202,3 +183,6 @@ class SnapshotGlusterddown(GlusterBaseClass):
if not ret:
raise ExecutionError("Failed to umount the vol & cleanup Volume")
g.log.info("Successful in umounting the volume and Cleanup")
+
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
diff --git a/tests/functional/snapshot/test_snap_scheduler_status.py b/tests/functional/snapshot/test_snap_scheduler_status.py
index fe81c4b..a403c7b 100644
--- a/tests/functional/snapshot/test_snap_scheduler_status.py
+++ b/tests/functional/snapshot/test_snap_scheduler_status.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -55,6 +55,29 @@ class SnapshotSchedulerStatus(GlusterBaseClass):
tearDown for every test
"""
+ # disable snap scheduler
+ g.log.info("disabling snap scheduler")
+ ret, _, _ = scheduler_disable(self.mnode)
+ self.assertEqual(ret, 0, "Unexpected: Failed to disable "
+ "snapshot scheduler")
+ g.log.info("Successfully disabled snapshot scheduler")
+
+ # Check snapshot scheduler status
+ g.log.info("checking status of snapshot scheduler")
+ for server in self.servers:
+ count = 0
+ while count < 40:
+ ret, status, _ = scheduler_status(server)
+ status = status.strip().split(":")[2]
+ if not ret and status == ' Disabled':
+ break
+ sleep(2)
+ count += 1
+ self.assertEqual(ret, 0, "Failed to check status of scheduler"
+ " on node %s" % server)
+ g.log.info("Successfully checked scheduler status on %s nodes",
+ server)
+
# Check if shared storage is enabled
# Disable if true
g.log.info("Checking if shared storage is mounted")
@@ -134,6 +157,7 @@ class SnapshotSchedulerStatus(GlusterBaseClass):
# Initialise snap_scheduler on all nodes
g.log.info("Initialising snapshot scheduler on all nodes")
count = 0
+ sleep(2)
while count < 40:
ret = scheduler_init(self.servers)
if ret:
@@ -156,33 +180,8 @@ class SnapshotSchedulerStatus(GlusterBaseClass):
count = 0
while count < 40:
ret, status, _ = scheduler_status(server)
- if ret == 0:
- self.assertEqual(status.strip().split(":")[2], ' Enabled',
- "Failed to check status of scheduler")
- break
- sleep(2)
- count += 1
- self.assertEqual(ret, 0, "Failed to check status of scheduler"
- " on node %s" % server)
- g.log.info("Successfully checked scheduler status on %s nodes",
- server)
-
- # disable snap scheduler
- g.log.info("disabling snap scheduler")
- ret, _, _ = scheduler_disable(self.mnode)
- self.assertEqual(ret, 0, "Unexpected: Failed to disable "
- "snapshot scheduler")
- g.log.info("Successfully disabled snapshot scheduler")
-
- # Check snapshot scheduler status
- g.log.info("checking status of snapshot scheduler")
- for server in self.servers:
- count = 0
- while count < 40:
- ret, status, _ = scheduler_status(server)
- if not ret:
- self.assertEqual(status.strip().split(":")[2], ' Disabled',
- "Failed to check status of scheduler")
+ status = status.strip().split(":")[2]
+ if ret == 0 and status == ' Enabled':
break
sleep(2)
count += 1
diff --git a/tests/functional/snapshot/test_snap_uss_while_io.py b/tests/functional/snapshot/test_snap_uss_while_io.py
index b51e929..9f5c91e 100644
--- a/tests/functional/snapshot/test_snap_uss_while_io.py
+++ b/tests/functional/snapshot/test_snap_uss_while_io.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -33,7 +33,8 @@ from glustolibs.io.utils import (validate_io_procs,
from glustolibs.gluster.snap_ops import (snap_create,
snap_activate,
snap_list)
-from glustolibs.gluster.uss_ops import (enable_uss, is_uss_enabled,
+from glustolibs.gluster.uss_ops import (disable_uss,
+ enable_uss, is_uss_enabled,
is_snapd_running)
from glustolibs.misc.misc_libs import upload_scripts
@@ -70,6 +71,18 @@ class SnapshotUssWhileIo(GlusterBaseClass):
def tearDown(self):
+ # Validate USS running
+ g.log.info("Validating USS enabled or disabled")
+ ret = is_uss_enabled(self.mnode, self.volname)
+ if not ret:
+ # Disable USS
+ ret, _, _ = disable_uss(self.mnode, self.volname)
+ if not ret:
+ raise ExecutionError("Failed to disable USS on volume"
+ "%s" % self.volname)
+ g.log.info("Successfully disabled USS on volume %s",
+ self.volname)
+
# Unmount and cleanup original volume
g.log.info("Starting to Unmount Volume and Cleanup Volume")
ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
@@ -77,6 +90,9 @@ class SnapshotUssWhileIo(GlusterBaseClass):
raise ExecutionError("Failed to umount the vol & cleanup Volume")
g.log.info("Successful in umounting the volume and Cleanup")
+ # Calling GlusterBaseClass tearDown
+ self.get_super_method(self, 'tearDown')()
+
def test_snap_uss_while_io(self):
# pylint: disable=too-many-statements
"""
diff --git a/tests/functional/snapshot/test_snapshot_create.py b/tests/functional/snapshot/test_snapshot_create.py
index a8e6c1c..daa7fdf 100644
--- a/tests/functional/snapshot/test_snapshot_create.py
+++ b/tests/functional/snapshot/test_snapshot_create.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2017-2018 Red Hat, Inc. <http://www.redhat.com>
+# Copyright (C) 2017-2020 Red Hat, Inc. <http://www.redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -91,20 +91,14 @@ class SnapCreate(GlusterBaseClass):
ret, _, _ = snap_delete_all(self.mnode)
if ret != 0:
raise ExecutionError("Failed to delete all snaps")
- self.get_super_method(self, 'tearDown')()
- @classmethod
- def tearDownClass(cls):
- """
- Clean up the volume & mount
- """
- g.log.info("Starting volume and mount cleanup")
- ret = cls.unmount_volume_and_cleanup_volume(cls.mounts)
+ # Unmount and cleanup original volume
+ g.log.info("Starting to Unmount Volume and Cleanup Volume")
+ ret = self.unmount_volume_and_cleanup_volume(mounts=self.mounts)
if not ret:
- raise ExecutionError("Failed to cleanup volume and mount")
- g.log.info("Cleanup successful for the volume and mount")
-
- cls.get_super_method(cls, 'tearDownClass')()
+ raise ExecutionError("Failed to umount the vol & cleanup Volume")
+ g.log.info("Successful in umounting the volume and Cleanup")
+ self.get_super_method(self, 'tearDown')()
def test_validate_snaps_create(self):
"""